From 63f52f19db7078b23c7a2e0516c1cdaccaa4dc35 Mon Sep 17 00:00:00 2001
From: Yaman Umuroglu <yamanu@xilinx.com>
Date: Wed, 24 Nov 2021 11:27:16 +0100
Subject: [PATCH] [Test] enable 1D tests + restructure
 convert_to_hls_pool_batch

---
 .../test_convert_to_hls_pool_batch.py         | 64 +++++++++++++------
 1 file changed, 44 insertions(+), 20 deletions(-)

diff --git a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
index fa6848990..0dd9991b2 100644
--- a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
+++ b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
@@ -137,7 +137,7 @@ def prepare_inputs(input_tensor):
 # number of out channel computed in parallel
 @pytest.mark.parametrize("pe", [1, 2, 4])
 # pool type
-@pytest.mark.parametrize("op_type", ["QuantAvgPool2d", "MaxPool"])
+@pytest.mark.parametrize("op_type", ["QuantAvgPool2d", "MaxPool", "MaxPool1D"])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
 @pytest.mark.slow
@@ -156,7 +156,14 @@ def test_convert_to_hls_pool_batch(
     np.random.seed(0)
     ofm_dim = int(((ifm_dim + 2 * pad - k) / stride) + 1)
 
-    x = gen_finn_dt_tensor(idt, (1, ifm_ch, ifm_dim, ifm_dim))
+    ishape = (1, ifm_ch, ifm_dim, ifm_dim)
+    use_1d = False
+    if op_type == "MaxPool1D":
+        use_1d = True
+        ishape = (1, ifm_ch, 1, ifm_dim)
+        op_type = "MaxPool"
+
+    x = gen_finn_dt_tensor(idt, ishape)
     # prepare input data
     input_dict = prepare_inputs(x)
     if op_type == "MaxPool":
@@ -168,7 +175,7 @@ def test_convert_to_hls_pool_batch(
             pytest.skip("Skipping Maxpool with idt != odt")
 
         model = make_single_maxpool_modelwrapper(
-            k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt
+            k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt, use_1d
         )
     elif op_type == "QuantAvgPool2d":
         if pad != 0:
@@ -187,16 +194,40 @@ def test_convert_to_hls_pool_batch(
     new_model = model.transform(to_hls.InferPool_Batch())
     new_model = new_model.transform(GiveUniqueNodeNames())
 
-    if ifm_ch != pe:
-        new_model = new_model.transform(to_hls.InferConvInpGen())
-        # Folding
-        for n in new_model.graph.node:
-            if n.op_type == "ConvolutionInputGenerator":
-                inst = getCustomOp(n)
-                inst.set_nodeattr("SIMD", pe)
-            elif n.op_type == "Pool_Batch":
-                inst = getCustomOp(n)
-                inst.set_nodeattr("PE", pe)
+    new_model = new_model.transform(to_hls.InferConvInpGen())
+    # Folding
+    for n in new_model.graph.node:
+        if n.op_type.startswith("ConvolutionInputGenerator"):
+            inst = getCustomOp(n)
+            inst.set_nodeattr("SIMD", pe)
+        elif n.op_type == "Pool_Batch":
+            inst = getCustomOp(n)
+            inst.set_nodeattr("PE", pe)
+
+    if stride <= k:
+        if pad == 0:
+            assert len(new_model.graph.node) == 4
+            assert new_model.graph.node[0].op_type == "Transpose"
+            assert new_model.graph.node[1].op_type.startswith(
+                "ConvolutionInputGenerator"
+            )
+            assert new_model.graph.node[2].op_type == "Pool_Batch"
+            assert new_model.graph.node[3].op_type == "Transpose"
+        else:
+            assert len(new_model.graph.node) == 5
+            assert new_model.graph.node[0].op_type == "Transpose"
+            assert new_model.graph.node[1].op_type == "FMPadding_Batch"
+            assert new_model.graph.node[2].op_type.startswith(
+                "ConvolutionInputGenerator"
+            )
+            assert new_model.graph.node[3].op_type == "Pool_Batch"
+            assert new_model.graph.node[4].op_type == "Transpose"
+    else:
+        # not currently converted to HLS, node stays as-is
+        assert len(new_model.graph.node) == 1
+        assert new_model.graph.node[0].op_type in ["MaxPool", "QuantAvgPool2d"]
+        # no need to exec
+        return
 
     if exec_mode == "cppsim":
         new_model = new_model.transform(SetExecMode("cppsim"))
@@ -214,13 +245,6 @@ def test_convert_to_hls_pool_batch(
     # execute new_model
     y_produced = oxe.execute_onnx(new_model, input_dict)["outp"]
     assert (y_produced == y_expected).all()
-    if stride <= k:
-        if pad == 0 or ifm_ch == pe:
-            assert len(new_model.graph.node) == 4
-        else:
-            assert len(new_model.graph.node) == 5
-    else:
-        assert len(new_model.graph.node) == 1
 
     if exec_mode == "rtlsim":
         node = new_model.get_nodes_by_op_type("Pool_Batch")[0]
-- 
GitLab