From 05dad1b01be766d4244965caeb93e33ecee3bb59 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu <maltanar@gmail.com> Date: Thu, 14 May 2020 18:27:04 +0100 Subject: [PATCH] [Test] use load_test_checkpoint_or_skip in multistep tests --- tests/end2end/test_end2end_cnv_w1a1.py | 63 ++++++++++++------ .../test_end2end_tfc_w1a1_throughput_test.py | 65 +++++++++++++------ tests/end2end/test_end2end_tfc_w1a2.py | 63 ++++++++++++------ tests/end2end/test_end2end_tfc_w2a2.py | 63 ++++++++++++------ .../test_create_dataflow_partition.py | 8 +-- .../test_fpgadataflow_ip_stitch.py | 29 ++++++--- 6 files changed, 200 insertions(+), 91 deletions(-) diff --git a/tests/end2end/test_end2end_cnv_w1a1.py b/tests/end2end/test_end2end_cnv_w1a1.py index 47d69b3e9..eb4693b0f 100644 --- a/tests/end2end/test_end2end_cnv_w1a1.py +++ b/tests/end2end/test_end2end_cnv_w1a1.py @@ -36,7 +36,6 @@ import onnx # NOQA import pytest import pkg_resources as pk -from finn.core.modelwrapper import ModelWrapper from finn.custom_op.registry import getCustomOp from finn.core.onnx_exec import execute_onnx from finn.transformation.double_to_single_float import DoubleToSingleFloat @@ -69,7 +68,7 @@ from finn.transformation.fpgadataflow.make_pynq_proj import MakePYNQProject from finn.transformation.fpgadataflow.synth_pynq_proj import SynthPYNQProject from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ from finn.util.basic import pynq_part_map -from finn.util.test import get_test_model_trained +from finn.util.test import get_test_model_trained, load_test_checkpoint_or_skip from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO @@ -91,7 +90,7 @@ def test_end2end_cnv_w1a1_export(): def test_end2end_cnv_w1a1_import_and_tidy(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_export.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_export.onnx") model = model.transform(DoubleToSingleFloat()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -101,7 +100,7 @@ def test_end2end_cnv_w1a1_import_and_tidy(): def test_end2end_cnv_w1a1_streamline(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_tidy.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_tidy.onnx") model = model.transform(Streamline()) model = model.transform(LowerConvsToMatMul()) model = model.transform(MakeMaxPoolNHWC()) @@ -112,7 +111,9 @@ def test_end2end_cnv_w1a1_streamline(): def test_end2end_cnv_w1a1_convert_to_hls_layers(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_streamlined.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_streamlined.onnx" + ) model = model.transform(to_hls.InferBinaryStreamingFCLayer(mem_mode)) model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) model = model.transform(to_hls.InferConvInpGen()) @@ -122,18 +123,22 @@ def test_end2end_cnv_w1a1_convert_to_hls_layers(): def test_end2end_cnv_w1a1_create_dataflow_partition(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_hls_layers.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_hls_layers.onnx" + ) parent_model = model.transform(CreateDataflowPartition()) parent_model.save(build_dir + "/end2end_cnv_w1a1_dataflow_parent.onnx") sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") - dataflow_model = ModelWrapper(dataflow_model_filename) + dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) dataflow_model.save(build_dir + "/end2end_cnv_w1a1_dataflow_model.onnx") def test_end2end_cnv_w1a1_fold_and_tlastmarker(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_dataflow_model.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_dataflow_model.onnx" + ) fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") # each tuple is (PE, SIMD, in_fifo_depth) for a layer folding = [ @@ -169,7 +174,7 @@ def test_end2end_cnv_w1a1_fold_and_tlastmarker(): @pytest.mark.slow def test_end2end_cnv_w1a1_gen_hls_ip(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_folded.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_folded.onnx") model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(AnnotateResources("hls")) @@ -177,14 +182,14 @@ def test_end2end_cnv_w1a1_gen_hls_ip(): def test_end2end_cnv_w1a1_ip_stitch(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_ipgen.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_ipgen.onnx") model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.save(build_dir + "/end2end_cnv_w1a1_ipstitch.onnx") def test_end2end_cnv_w1a1_verify_dataflow_part(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_ipstitch.onnx") x = np.zeros((1, 32, 32, 3), dtype=np.float32) inp_name = model.graph.input[0].name out_name = model.graph.output[0].name @@ -215,7 +220,9 @@ def test_end2end_cnv_w1a1_verify_dataflow_part(): def test_end2end_cnv_w1a1_verify_all(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_cnv_w1a1_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name # load one of the test vectors @@ -229,22 +236,31 @@ def test_end2end_cnv_w1a1_verify_all(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_ipgen_cppsim.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_cnv_w1a1_ipgen_cppsim.onnx") ret_cppsim = execute_onnx(parent_model, {iname: x}, True) y_cppsim = ret_cppsim[oname] # produce results with node-by-node rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_ipgen_nodebynode_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_cnv_w1a1_ipgen_nodebynode_rtlsim.onnx" ) ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] # produce results with whole-network (stitched ip) rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_ipstitch_whole_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_cnv_w1a1_ipstitch_whole_rtlsim.onnx" ) @@ -259,27 +275,31 @@ def test_end2end_cnv_w1a1_verify_all(): def test_end2end_cnv_w1a1_make_pynq_proj(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_ipstitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) model.save(build_dir + "/end2end_cnv_w1a1_pynq_project.onnx") @pytest.mark.slow def test_end2end_cnv_w1a1_synth_pynq_project(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_pynq_project.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_pynq_project.onnx" + ) model = model.transform(SynthPYNQProject()) model = model.transform(AnnotateResources("synth")) model.save(build_dir + "/end2end_cnv_w1a1_synth.onnx") def test_end2end_cnv_w1a1_make_driver(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_synth.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_synth.onnx") model = model.transform(MakePYNQDriver()) model.save(build_dir + "/end2end_cnv_w1a1_pynq_driver.onnx") def test_end2end_cnv_w1a1_deploy_on_pynq(): - model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_pynq_driver.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_pynq_driver.onnx" + ) try: ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined if ip == "": @@ -297,7 +317,9 @@ def test_end2end_cnv_w1a1_deploy_on_pynq(): def test_end2end_cnv_w1a1_run_on_pynq(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_cnv_w1a1_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name # load one of the test vectors @@ -311,7 +333,9 @@ def test_end2end_cnv_w1a1_run_on_pynq(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_cnv_w1a1_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name try: @@ -321,6 +345,7 @@ def test_end2end_cnv_w1a1_run_on_pynq(): # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_cnv_w1a1_pynq_deploy.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_cnv_w1a1_pynq_deploy.onnx") ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] diff --git a/tests/end2end/test_end2end_tfc_w1a1_throughput_test.py b/tests/end2end/test_end2end_tfc_w1a1_throughput_test.py index df53524ff..cba6983f6 100644 --- a/tests/end2end/test_end2end_tfc_w1a1_throughput_test.py +++ b/tests/end2end/test_end2end_tfc_w1a1_throughput_test.py @@ -40,7 +40,6 @@ import onnx.numpy_helper as nph import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb -from finn.core.modelwrapper import ModelWrapper from finn.core.onnx_exec import execute_onnx from finn.core.throughput_test import throughput_test from finn.custom_op.registry import getCustomOp @@ -71,7 +70,7 @@ from finn.transformation.infer_shapes import InferShapes from finn.transformation.streamline import Streamline from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds from finn.util.basic import pynq_part_map -from finn.util.test import get_test_model_trained +from finn.util.test import get_test_model_trained, load_test_checkpoint_or_skip from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -92,7 +91,7 @@ def test_end2end_tfc_w1a1_export(): def test_end2end_tfc_w1a1_import_and_tidy(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_export.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_export.onnx") model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -102,13 +101,15 @@ def test_end2end_tfc_w1a1_import_and_tidy(): def test_end2end_tfc_w1a1_streamline(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_tidy.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_tidy.onnx") model = model.transform(Streamline()) model.save(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") def test_end2end_tfc_w1a1_convert_to_hls_layers(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_streamlined.onnx" + ) model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(absorb.AbsorbAddIntoMultiThreshold()) model = model.transform(absorb.AbsorbMulIntoMultiThreshold()) @@ -118,18 +119,22 @@ def test_end2end_tfc_w1a1_convert_to_hls_layers(): def test_end2end_tfc_w1a1_create_dataflow_partition(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_hls_layers.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_hls_layers.onnx" + ) parent_model = model.transform(CreateDataflowPartition()) parent_model.save(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") - dataflow_model = ModelWrapper(dataflow_model_filename) + dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) dataflow_model.save(build_dir + "/end2end_tfc_w1a1_dataflow_model.onnx") def test_end2end_tfc_w1a1_fold_and_tlastmarker(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_model.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_dataflow_model.onnx" + ) fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") # (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer config = [ @@ -155,7 +160,7 @@ def test_end2end_tfc_w1a1_fold_and_tlastmarker(): @pytest.mark.slow def test_end2end_tfc_w1a1_gen_hls_ip(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_folded.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_folded.onnx") model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(AnnotateResources("hls")) @@ -163,14 +168,14 @@ def test_end2end_tfc_w1a1_gen_hls_ip(): def test_end2end_tfc_w1a1_ip_stitch(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipgen.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_ipgen.onnx") model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.save(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") def test_end2end_tfc_w1a1_verify_dataflow_part(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") x = np.zeros((1, 784), dtype=np.float32) inp_name = model.graph.input[0].name out_name = model.graph.output[0].name @@ -199,7 +204,9 @@ def test_end2end_tfc_w1a1_verify_dataflow_part(): def test_end2end_tfc_w1a1_verify_all(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -210,22 +217,31 @@ def test_end2end_tfc_w1a1_verify_all(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_ipstitch_cppsim.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a1_ipstitch_cppsim.onnx") ret_cppsim = execute_onnx(parent_model, {iname: x}, True) y_cppsim = ret_cppsim[oname] # produce results with node-by-node rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_ipstitch_nodebynode_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w1a1_ipstitch_nodebynode_rtlsim.onnx" ) ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] # produce results with whole-network (stitched ip) rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_ipstitch_whole_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w1a1_ipstitch_whole_rtlsim.onnx" ) @@ -237,27 +253,31 @@ def test_end2end_tfc_w1a1_verify_all(): def test_end2end_tfc_w1a1_make_pynq_proj(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) model.save(build_dir + "/end2end_tfc_w1a1_pynq_project.onnx") @pytest.mark.slow def test_end2end_tfc_w1a1_synth_pynq_project(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_pynq_project.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_pynq_project.onnx" + ) model = model.transform(SynthPYNQProject()) model = model.transform(AnnotateResources("synth")) model.save(build_dir + "/end2end_tfc_w1a1_synth.onnx") def test_end2end_tfc_w1a1_make_driver(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_synth.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_synth.onnx") model = model.transform(MakePYNQDriver()) model.save(build_dir + "/end2end_tfc_w1a1_pynq_driver.onnx") def test_end2end_tfc_w1a1_deploy_on_pynq(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_pynq_driver.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_pynq_driver.onnx" + ) try: ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined if ip == "": @@ -275,7 +295,9 @@ def test_end2end_tfc_w1a1_deploy_on_pynq(): def test_end2end_tfc_w1a1_run_on_pynq(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -287,7 +309,9 @@ def test_end2end_tfc_w1a1_run_on_pynq(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name try: @@ -297,11 +321,12 @@ def test_end2end_tfc_w1a1_run_on_pynq(): # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a1_pynq_deploy.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a1_pynq_deploy.onnx") ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] assert np.isclose(y, y_golden).all() - child_model = ModelWrapper(sdp_node.get_nodeattr("model")) + child_model = load_test_checkpoint_or_skip(sdp_node.get_nodeattr("model")) res = throughput_test(child_model) assert res is not None diff --git a/tests/end2end/test_end2end_tfc_w1a2.py b/tests/end2end/test_end2end_tfc_w1a2.py index 716a72a69..027bbdd73 100644 --- a/tests/end2end/test_end2end_tfc_w1a2.py +++ b/tests/end2end/test_end2end_tfc_w1a2.py @@ -39,7 +39,6 @@ import onnx # NOQA import onnx.numpy_helper as nph import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls -from finn.core.modelwrapper import ModelWrapper from finn.core.onnx_exec import execute_onnx from finn.custom_op.registry import getCustomOp from finn.transformation.fold_constants import FoldConstants @@ -67,7 +66,7 @@ from finn.transformation.infer_datatypes import InferDataTypes from finn.transformation.infer_shapes import InferShapes from finn.transformation.streamline import Streamline from finn.util.basic import pynq_part_map -from finn.util.test import get_test_model_trained +from finn.util.test import get_test_model_trained, load_test_checkpoint_or_skip from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -88,7 +87,7 @@ def test_end2end_tfc_w1a2_export(): def test_end2end_tfc_w1a2_import_and_tidy(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_export.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_export.onnx") model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -98,30 +97,36 @@ def test_end2end_tfc_w1a2_import_and_tidy(): def test_end2end_tfc_w1a2_streamline(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_tidy.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_tidy.onnx") model = model.transform(Streamline()) model.save(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") def test_end2end_tfc_w1a2_convert_to_hls_layers(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_streamlined.onnx" + ) model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) model.save(build_dir + "/end2end_tfc_w1a2_hls_layers.onnx") def test_end2end_tfc_w1a2_create_dataflow_partition(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_hls_layers.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_hls_layers.onnx" + ) parent_model = model.transform(CreateDataflowPartition()) parent_model.save(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") - dataflow_model = ModelWrapper(dataflow_model_filename) + dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) dataflow_model.save(build_dir + "/end2end_tfc_w1a2_dataflow_model.onnx") def test_end2end_tfc_w1a2_fold_and_tlastmarker(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_model.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_dataflow_model.onnx" + ) fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") # (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer config = [ @@ -147,7 +152,7 @@ def test_end2end_tfc_w1a2_fold_and_tlastmarker(): @pytest.mark.slow def test_end2end_tfc_w1a2_gen_hls_ip(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_folded.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_folded.onnx") model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(AnnotateResources("hls")) @@ -155,14 +160,14 @@ def test_end2end_tfc_w1a2_gen_hls_ip(): def test_end2end_tfc_w1a2_ip_stitch(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipgen.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_ipgen.onnx") model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.save(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") def test_end2end_tfc_w1a2_verify_dataflow_part(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") x = np.zeros((1, 784), dtype=np.float32) inp_name = model.graph.input[0].name out_name = model.graph.output[0].name @@ -191,7 +196,9 @@ def test_end2end_tfc_w1a2_verify_dataflow_part(): def test_end2end_tfc_w1a2_verify_all(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -202,22 +209,31 @@ def test_end2end_tfc_w1a2_verify_all(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_ipstitch_cppsim.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a2_ipstitch_cppsim.onnx") ret_cppsim = execute_onnx(parent_model, {iname: x}, True) y_cppsim = ret_cppsim[oname] # produce results with node-by-node rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_ipstitch_nodebynode_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w1a2_ipstitch_nodebynode_rtlsim.onnx" ) ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] # produce results with whole-network (stitched ip) rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_ipstitch_whole_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w1a2_ipstitch_whole_rtlsim.onnx" ) @@ -229,27 +245,31 @@ def test_end2end_tfc_w1a2_verify_all(): def test_end2end_tfc_w1a2_make_pynq_proj(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) model.save(build_dir + "/end2end_tfc_w1a2_pynq_project.onnx") @pytest.mark.slow def test_end2end_tfc_w1a2_synth_pynq_project(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_pynq_project.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_pynq_project.onnx" + ) model = model.transform(SynthPYNQProject()) model = model.transform(AnnotateResources("synth")) model.save(build_dir + "/end2end_tfc_w1a2_synth.onnx") def test_end2end_tfc_w1a2_make_driver(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_synth.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_synth.onnx") model = model.transform(MakePYNQDriver()) model.save(build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx") def test_end2end_tfc_w1a2_deploy_on_pynq(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx" + ) try: ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined if ip == "": @@ -267,7 +287,9 @@ def test_end2end_tfc_w1a2_deploy_on_pynq(): def test_end2end_tfc_w1a2_run_on_pynq(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -279,7 +301,9 @@ def test_end2end_tfc_w1a2_run_on_pynq(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name try: @@ -289,6 +313,7 @@ def test_end2end_tfc_w1a2_run_on_pynq(): # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w1a2_pynq_deploy.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a2_pynq_deploy.onnx") ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] diff --git a/tests/end2end/test_end2end_tfc_w2a2.py b/tests/end2end/test_end2end_tfc_w2a2.py index f11d065a9..f21ea2e60 100644 --- a/tests/end2end/test_end2end_tfc_w2a2.py +++ b/tests/end2end/test_end2end_tfc_w2a2.py @@ -39,7 +39,6 @@ import onnx # NOQA import onnx.numpy_helper as nph import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls -from finn.core.modelwrapper import ModelWrapper from finn.core.onnx_exec import execute_onnx from finn.custom_op.registry import getCustomOp from finn.transformation.fold_constants import FoldConstants @@ -67,7 +66,7 @@ from finn.transformation.infer_datatypes import InferDataTypes from finn.transformation.infer_shapes import InferShapes from finn.transformation.streamline import Streamline from finn.util.basic import pynq_part_map -from finn.util.test import get_test_model_trained +from finn.util.test import get_test_model_trained, load_test_checkpoint_or_skip from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -88,7 +87,7 @@ def test_end2end_tfc_w2a2_export(): def test_end2end_tfc_w2a2_import_and_tidy(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_export.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_export.onnx") model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -98,30 +97,36 @@ def test_end2end_tfc_w2a2_import_and_tidy(): def test_end2end_tfc_w2a2_streamline(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_tidy.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_tidy.onnx") model = model.transform(Streamline()) model.save(build_dir + "/end2end_tfc_w2a2_streamlined.onnx") def test_end2end_tfc_w2a2_convert_to_hls_layers(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_streamlined.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_streamlined.onnx" + ) model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) model.save(build_dir + "/end2end_tfc_w2a2_hls_layers.onnx") def test_end2end_tfc_w2a2_create_dataflow_partition(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_hls_layers.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_hls_layers.onnx" + ) parent_model = model.transform(CreateDataflowPartition()) parent_model.save(build_dir + "/end2end_tfc_w2a2_dataflow_parent.onnx") sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") - dataflow_model = ModelWrapper(dataflow_model_filename) + dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) dataflow_model.save(build_dir + "/end2end_tfc_w2a2_dataflow_model.onnx") def test_end2end_tfc_w2a2_fold_and_tlastmarker(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_dataflow_model.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_dataflow_model.onnx" + ) fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") # (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer config = [ @@ -147,7 +152,7 @@ def test_end2end_tfc_w2a2_fold_and_tlastmarker(): @pytest.mark.slow def test_end2end_tfc_w2a2_gen_hls_ip(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_folded.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_folded.onnx") model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(AnnotateResources("hls")) @@ -155,14 +160,14 @@ def test_end2end_tfc_w2a2_gen_hls_ip(): def test_end2end_tfc_w2a2_ip_stitch(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_ipgen.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_ipgen.onnx") model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.save(build_dir + "/end2end_tfc_w2a2_ipstitch.onnx") def test_end2end_tfc_w2a2_verify_dataflow_part(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_ipstitch.onnx") x = np.zeros((1, 784), dtype=np.float32) inp_name = model.graph.input[0].name out_name = model.graph.output[0].name @@ -191,7 +196,9 @@ def test_end2end_tfc_w2a2_verify_dataflow_part(): def test_end2end_tfc_w2a2_verify_all(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w2a2_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -202,22 +209,31 @@ def test_end2end_tfc_w2a2_verify_all(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_ipstitch_cppsim.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w2a2_ipstitch_cppsim.onnx") ret_cppsim = execute_onnx(parent_model, {iname: x}, True) y_cppsim = ret_cppsim[oname] # produce results with node-by-node rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_ipstitch_nodebynode_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w2a2_ipstitch_nodebynode_rtlsim.onnx" ) ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] # produce results with whole-network (stitched ip) rtlsim + load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_ipstitch_whole_rtlsim.onnx" + ) sdp_node.set_nodeattr( "model", build_dir + "/end2end_tfc_w2a2_ipstitch_whole_rtlsim.onnx" ) @@ -229,27 +245,31 @@ def test_end2end_tfc_w2a2_verify_all(): def test_end2end_tfc_w2a2_make_pynq_proj(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_ipstitch.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_ipstitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) model.save(build_dir + "/end2end_tfc_w2a2_pynq_project.onnx") @pytest.mark.slow def test_end2end_tfc_w2a2_synth_pynq_project(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_pynq_project.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_pynq_project.onnx" + ) model = model.transform(SynthPYNQProject()) model = model.transform(AnnotateResources("synth")) model.save(build_dir + "/end2end_tfc_w2a2_synth.onnx") def test_end2end_tfc_w2a2_make_driver(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_synth.onnx") + model = load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_synth.onnx") model = model.transform(MakePYNQDriver()) model.save(build_dir + "/end2end_tfc_w2a2_pynq_driver.onnx") def test_end2end_tfc_w2a2_deploy_on_pynq(): - model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_pynq_driver.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_pynq_driver.onnx" + ) try: ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined if ip == "": @@ -267,7 +287,9 @@ def test_end2end_tfc_w2a2_deploy_on_pynq(): def test_end2end_tfc_w2a2_run_on_pynq(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w2a2_streamlined.onnx") + golden = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_streamlined.onnx" + ) iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -279,7 +301,9 @@ def test_end2end_tfc_w2a2_run_on_pynq(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w2a2_dataflow_parent.onnx") + parent_model = load_test_checkpoint_or_skip( + build_dir + "/end2end_tfc_w2a2_dataflow_parent.onnx" + ) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name try: @@ -289,6 +313,7 @@ def test_end2end_tfc_w2a2_run_on_pynq(): # produce results with cppsim sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) + load_test_checkpoint_or_skip(build_dir + "/end2end_tfc_w2a2_pynq_deploy.onnx") sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w2a2_pynq_deploy.onnx") ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] diff --git a/tests/fpgadataflow/test_create_dataflow_partition.py b/tests/fpgadataflow/test_create_dataflow_partition.py index 77e0ddeeb..c4f748051 100644 --- a/tests/fpgadataflow/test_create_dataflow_partition.py +++ b/tests/fpgadataflow/test_create_dataflow_partition.py @@ -29,7 +29,6 @@ import os.path from pkgutil import get_data -import pytest from finn.core.modelwrapper import ModelWrapper from finn.custom_op.registry import getCustomOp @@ -38,11 +37,11 @@ from finn.transformation.fpgadataflow.create_dataflow_partition import ( ) from finn.transformation.fpgadataflow.insert_tlastmarker import InsertTLastMarker from finn.util.basic import make_build_dir +from finn.util.test import load_test_checkpoint_or_skip build_dir = make_build_dir("test_dataflow_partition_") -@pytest.mark.dependency() def test_dataflow_partition_create(): # load the onnx model raw_m = get_data( @@ -57,9 +56,10 @@ def test_dataflow_partition_create(): model.save(build_dir + "/test_dataflow_partition_create.onnx") -@pytest.mark.dependency(depends=["test_dataflow_partition_create"]) def test_dataflow_partition_tlastmarker(): - model = ModelWrapper(build_dir + "/test_dataflow_partition_create.onnx") + model = load_test_checkpoint_or_skip( + build_dir + "/test_dataflow_partition_create.onnx" + ) model_path = getCustomOp(model.graph.node[2]).get_nodeattr("model") model = ModelWrapper(model_path) model = model.transform(InsertTLastMarker()) diff --git a/tests/fpgadataflow/test_fpgadataflow_ip_stitch.py b/tests/fpgadataflow/test_fpgadataflow_ip_stitch.py index 1aaf71ffa..9bc824d1d 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ip_stitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ip_stitch.py @@ -52,6 +52,7 @@ import finn.transformation.fpgadataflow.replace_verilog_relpaths as rvp from finn.transformation.general import GiveUniqueNodeNames from finn.util.basic import gen_finn_dt_tensor, pynq_part_map from finn.util.fpgadataflow import pyverilate_stitched_ip +from finn.util.test import load_test_checkpoint_or_skip test_pynq_board = os.getenv("PYNQ_BOARD", default="Pynq-Z1") test_fpga_part = pynq_part_map[test_pynq_board] @@ -204,7 +205,7 @@ def test_fpgadataflow_ipstitch_gen_model(): # exec_mode): sdp_node = getCustomOp(model.graph.node[0]) assert sdp_node.__class__.__name__ == "StreamingDataflowPartition" assert os.path.isfile(sdp_node.get_nodeattr("model")) - model = ModelWrapper(sdp_node.get_nodeattr("model")) + model = load_test_checkpoint_or_skip(sdp_node.get_nodeattr("model")) model.set_metadata_prop("exec_mode", "remote_pynq") model = model.transform(InsertTLastMarker()) model = model.transform(GiveUniqueNodeNames()) @@ -216,7 +217,7 @@ def test_fpgadataflow_ipstitch_gen_model(): # exec_mode): def test_fpgadataflow_ipstitch_do_stitch(): - model = ModelWrapper( + model = load_test_checkpoint_or_skip( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_gen_model.onnx" ) model = model.transform(rvp.ReplaceVerilogRelPaths()) @@ -232,7 +233,9 @@ def test_fpgadataflow_ipstitch_do_stitch(): def test_fpgadataflow_ipstitch_rtlsim(): - model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx") + model = load_test_checkpoint_or_skip( + ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx" + ) model.set_metadata_prop("rtlsim_trace", "whole_trace.vcd") sim = pyverilate_stitched_ip(model) exp_io = [ @@ -276,7 +279,9 @@ def test_fpgadataflow_ipstitch_rtlsim(): def test_fpgadataflow_ipstitch_pynq_projgen(): - model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx") + model = load_test_checkpoint_or_skip( + ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx" + ) model = model.transform(MakePYNQProject(test_pynq_board)) vivado_pynq_proj_dir = model.get_metadata_prop("vivado_pynq_proj") assert vivado_pynq_proj_dir is not None @@ -286,7 +291,9 @@ def test_fpgadataflow_ipstitch_pynq_projgen(): @pytest.mark.slow def test_fpgadataflow_ipstitch_pynq_synth(): - model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx") + model = load_test_checkpoint_or_skip( + ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx" + ) model = model.transform(SynthPYNQProject()) bitfile = model.get_metadata_prop("vivado_pynq_bitfile") assert bitfile is not None @@ -295,7 +302,9 @@ def test_fpgadataflow_ipstitch_pynq_synth(): def test_fpgadataflow_ipstitch_pynq_driver(): - model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx") + model = load_test_checkpoint_or_skip( + ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_synth.onnx" + ) model = model.transform(MakePYNQDriver()) driver_dir = model.get_metadata_prop("pynq_driver_dir") assert driver_dir is not None @@ -304,13 +313,13 @@ def test_fpgadataflow_ipstitch_pynq_driver(): def test_fpgadataflow_ipstitch_pynq_deployment_folder(): + model = load_test_checkpoint_or_skip( + ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx" + ) try: ip = os.environ["PYNQ_IP"] # no default for this one; skip if not defined if ip == "": pytest.skip("PYNQ board IP address not specified") - model = ModelWrapper( - ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx" - ) username = os.getenv("PYNQ_USERNAME", "xilinx") password = os.getenv("PYNQ_PASSWORD", "xilinx") port = os.getenv("PYNQ_PORT", 22) @@ -342,7 +351,7 @@ def test_fpgadataflow_ipstitch_remote_execution(): ip = os.environ["PYNQ_IP"] # NOQA if ip == "": pytest.skip("PYNQ board IP address not specified") - model = ModelWrapper( + model = load_test_checkpoint_or_skip( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_deployment.onnx" ) iname = "inp" -- GitLab