diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index d0a9aa1c073d4f93e48b0d7d1ece17e1c288f56a..a6e7ad642222936775293ec145e845ef111dd4d3 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -347,6 +347,8 @@ class TestEnd2End: assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model pre_model = ModelWrapper(chkpt_preproc_name) + pre_model = pre_model.transform(InferShapes()) + pre_model = pre_model.transform(FoldConstants()) model = model.transform(MergeONNXModels(pre_model)) # add input quantization annotation: UINT8 for all BNN-PYNQ models global_inp_name = model.graph.input[0].name diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index 6e11a30a410c5a76e1b056277346a6b88a7ec1bf..79263a7099b91fb0dbaa10871f7859690ab9e4c2 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -101,6 +101,7 @@ def test_end2end_mobilenet_export(): # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType.UINT8) preproc_model = preproc_model.transform(InferShapes()) + preproc_model = preproc_model.transform(FoldConstants()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) preproc_model = preproc_model.transform(GiveReadableTensorNames())