diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py index 8d21a33f78ca6f1229bdc11e753fc17cdf170242..c04e16ad1923609c81240235057cc7a190c90ffb 100644 --- a/tests/brevitas/test_brevitas_cnv.py +++ b/tests/brevitas/test_brevitas_cnv.py @@ -59,6 +59,7 @@ def test_brevitas_cnv_export_exec(wbits, abits): model = model.transform(FoldConstants()) fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution input_dict = {model.graph.input[0].name: input_tensor} @@ -68,4 +69,5 @@ def test_brevitas_cnv_export_exec(wbits, abits): input_tensor = torch.from_numpy(input_tensor).float() expected = cnv.forward(input_tensor).detach().numpy() assert np.isclose(produced, expected, atol=1e-3).all() + assert np.argmax(produced) == 3 os.remove(export_onnx_path) diff --git a/tests/end2end/test_end2end_cnv_w1a1.py b/tests/end2end/test_end2end_cnv_w1a1.py index 34e0df8402ea0d1b880781185cd17e3ccb1a0ae0..1725eb3915b692e8f419924856eecb5f85faacf1 100644 --- a/tests/end2end/test_end2end_cnv_w1a1.py +++ b/tests/end2end/test_end2end_cnv_w1a1.py @@ -220,6 +220,7 @@ def test_end2end_cnv_w1a1_verify_all(): # load one of the test vectors fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) x = input_tensor # x = np.zeros(ishape, dtype=np.float32) @@ -253,6 +254,7 @@ def test_end2end_cnv_w1a1_verify_all(): assert np.isclose(y_golden, y_npysim).all() assert np.isclose(y_golden, y_nodebynode_rtlsim).all() assert np.isclose(y_golden, y_whole_rtlsim).all() + assert np.argmax(y_golden) == 3 def test_end2end_cnv_w1a1_make_pynq_proj(): @@ -299,6 +301,7 @@ def test_end2end_cnv_w1a1_run_on_pynq(): # load one of the test vectors fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) x = input_tensor # run using FINN-based execution @@ -320,6 +323,7 @@ def test_end2end_cnv_w1a1_run_on_pynq(): ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] assert np.isclose(y, y_golden).all() + assert np.argmax(y) == 3 except KeyError: pytest.skip("PYNQ board IP address not specified") diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 4effc0da9702850565d2651819d64f1ab3489877..46c39e45abe88c0a980228655eed7f2e31833a81 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -72,6 +72,7 @@ def test_convert_to_hls_layers_cnv_w1a1(): # load one of the test vectors fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # generate expected value from streamlined net input_dict = {"global_in": input_tensor} @@ -119,4 +120,5 @@ def test_convert_to_hls_layers_cnv_w1a1(): produced_ctx = oxe.execute_onnx(model, input_dict, True) produced = produced_ctx[model.graph.output[0].name] assert np.isclose(expected, produced, atol=1e-3).all() + assert np.argmax(produced) == 3 os.remove(export_onnx_path_cnv) diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index ec5bf441b736b5faed0024749b5b77f213949029..cfa5abe58b74c7f5ac2b708a753893fea0769d9b 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -65,6 +65,7 @@ def test_streamline_cnv(size, wbits, abits): # load one of the test vectors fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution input_dict = {"global_in": input_tensor} @@ -77,3 +78,4 @@ def test_streamline_cnv(size, wbits, abits): produced = produced_ctx[model.graph.output[0].name] assert np.isclose(expected, produced, atol=1e-3).all() assert model.graph.node[0].op_type == "MultiThreshold" + assert np.argmax(produced) == 3 diff --git a/tests/transformation/test_batchnorm_to_affine.py b/tests/transformation/test_batchnorm_to_affine.py index 997ca5ab110bf3612a4db1152ac844180daf7d43..43110c6bf9e5469b2ca21ac667d7f92808017fb8 100644 --- a/tests/transformation/test_batchnorm_to_affine.py +++ b/tests/transformation/test_batchnorm_to_affine.py @@ -55,6 +55,7 @@ def test_batchnorm_to_affine_cnv_w1a1(): model = model.transform(FoldConstants()) fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) input_dict = {"0": input_tensor} output_dict = oxe.execute_onnx(model, input_dict) @@ -66,6 +67,7 @@ def test_batchnorm_to_affine_cnv_w1a1(): output_dict_p = oxe.execute_onnx(new_model, input_dict) produced = output_dict_p[list(output_dict_p.keys())[0]] assert np.isclose(expected, produced).all() + assert np.argmax(produced) == 3 os.remove(export_onnx_path) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index 647a2b454a1a609c41707707e7fc9cf90dadd59c..2cbc8e558940517168678b05c3bb46af8170abce 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -52,6 +52,7 @@ def test_conv_lowering_cnv_w1a1(): model = model.transform(FoldConstants()) fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) + input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # execute imported model to get expected answer input_dict = {"0": input_tensor} @@ -62,4 +63,5 @@ def test_conv_lowering_cnv_w1a1(): output_dict_p = oxe.execute_onnx(model, input_dict) produced = output_dict_p[list(output_dict_p.keys())[0]] assert np.isclose(produced, expected).all() + assert np.argmax(produced) == 3 os.remove(export_onnx_path)