diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py index 0f153dfa59158f9cf3990e849690831d8de73858..414d7b42cc89400bbf6271f4c8aaa39fc83179c7 100644 --- a/tests/fpgadataflow/test_code_gen_trafo.py +++ b/tests/fpgadataflow/test_code_gen_trafo.py @@ -5,7 +5,6 @@ from onnx import TensorProto, helper import finn.util.basic as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.cleanup import CleanUp from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim @@ -66,4 +65,3 @@ def test_code_gen_trafo(): op type {} is empty!""".format( node.op_type ) - model = model.transform(CleanUp()) diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py index 3579485f37fb84a4bed6b5ca575e27c689a6521d..91f4b549911dc1a6cf8078d87208d4d603cc59a9 100644 --- a/tests/fpgadataflow/test_compilation_trafo.py +++ b/tests/fpgadataflow/test_compilation_trafo.py @@ -5,7 +5,6 @@ from onnx import TensorProto, helper import finn.util.basic as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.cleanup import CleanUp from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -63,4 +62,3 @@ def test_compilation_trafo(): op type {} does not exist!""".format( node.op_type ) - model = model.transform(CleanUp()) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 7b08e4ae8629c74776380b95a9d0ba26558bf95b..8e160248c08c65ffcc0b3f5090b34c21ceacd2bd 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -6,7 +6,6 @@ from onnx import TensorProto, helper import finn.core.onnx_exec as oxe from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.cleanup import CleanUp from finn.transformation.fpgadataflow.codegen_ipgen import CodeGen_ipgen from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -167,4 +166,3 @@ def test_fpgadataflow_slidingwindow(idt, k, ifm_dim, ifm_ch, stride): model = model.transform(HLSSynth_IPGen()) y_produced = oxe.execute_onnx(model, input_dict)["outp"] assert (y_produced == y_expected).all(), "rtlsim failed" - model = model.transform(CleanUp()) diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_fclayer.py index 42a3484667adf0bf8b2abe6a4d91226acb6043fe..ed13b09b0bf299c741bbaa7b50c4bd836ca0ca4a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py +++ b/tests/fpgadataflow/test_fpgadataflow_fclayer.py @@ -9,7 +9,6 @@ from finn.analysis.fpgadataflow.hls_synth_res_estimation import hls_synth_res_es from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper from finn.custom_op.multithreshold import multithreshold -from finn.transformation.fpgadataflow.cleanup import CleanUp from finn.transformation.fpgadataflow.codegen_ipgen import CodeGen_ipgen from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -175,7 +174,6 @@ def test_fpgadataflow_fclayer_npysim(idt, wdt, act, nf, sf, mw, mh): # execute model y_produced = oxe.execute_onnx(model, input_dict)["outp"] assert (y_produced.reshape(y_expected.shape) == y_expected).all(), "npysim failed" - model = model.transform(CleanUp()) # activation: None or DataType @@ -257,5 +255,3 @@ def test_fpgadataflow_fclayer_rtlsim(idt, wdt, act, nf, sf, mw, mh): hls_synt_res_est = model.analysis(hls_synth_res_estimation) assert "StreamingFCLayer_Batch_0" in hls_synt_res_est - - model = model.transform(CleanUp()) diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py index a7a096d6a69de743a318c0a514a2f24da5d7a29f..c929ca4953766479e594dd302ef5942d1941f887 100644 --- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py +++ b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py @@ -4,7 +4,6 @@ from onnx import TensorProto, helper import finn.core.onnx_exec as oxe from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.cleanup import CleanUp from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile from finn.transformation.fpgadataflow.set_sim_mode import SetSimMode @@ -111,7 +110,6 @@ def test_layer_streaming_maxpool_batch(): ], dtype=np.float32, ).reshape(2, 2, 4, 4) - print(input_tensor) model = model.transform(SetSimMode("npysim")) model = model.transform(CodeGen_npysim()) @@ -119,5 +117,3 @@ def test_layer_streaming_maxpool_batch(): input_dict = {"in": input_tensor} output_dict = oxe.execute_onnx(model, input_dict) - print(output_dict) - model = model.transform(CleanUp()) diff --git a/tests/transformation/test_batchnorm_to_affine.py b/tests/transformation/test_batchnorm_to_affine.py index d23934ce2b24531e13f106abe2d3108406ac8cb4..b55b88d6f4e031237bc384ef3f28d9c6b0703693 100644 --- a/tests/transformation/test_batchnorm_to_affine.py +++ b/tests/transformation/test_batchnorm_to_affine.py @@ -30,16 +30,18 @@ def test_batchnorm_to_affine_lfc_w1a1(): os.remove(export_onnx_path) -def test_batchnorm_to_affine_cnv_w1a1(): - lfc = get_test_model_trained("CNV", 1, 1) - bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(InferShapes()) - model = model.transform(FoldConstants()) - # TODO shape inference failing on transformed model below -- needs debug - new_model = model.transform(BatchNormToAffine()) - # check that there are no BN nodes left - # TODO replace this with execution test - op_types = list(map(lambda x: x.op_type, new_model.graph.node)) - assert "BatchNormalization" not in op_types - os.remove(export_onnx_path) +# cnv batchnorm to affine not yet supported + +# def test_batchnorm_to_affine_cnv_w1a1(): +# lfc = get_test_model_trained("CNV", 1, 1) +# bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path) +# model = ModelWrapper(export_onnx_path) +# model = model.transform(InferShapes()) +# model = model.transform(FoldConstants()) +# # TODO shape inference failing on transformed model below -- needs debug +# new_model = model.transform(BatchNormToAffine()) +# # check that there are no BN nodes left +# # TODO replace this with execution test +# op_types = list(map(lambda x: x.op_type, new_model.graph.node)) +# assert "BatchNormalization" not in op_types +# os.remove(export_onnx_path)