Skip to content
Snippets Groups Projects
Commit 63f52f19 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[Test] enable 1D tests + restructure convert_to_hls_pool_batch

parent 123892ff
No related branches found
No related tags found
No related merge requests found
......@@ -137,7 +137,7 @@ def prepare_inputs(input_tensor):
# number of out channel computed in parallel
@pytest.mark.parametrize("pe", [1, 2, 4])
# pool type
@pytest.mark.parametrize("op_type", ["QuantAvgPool2d", "MaxPool"])
@pytest.mark.parametrize("op_type", ["QuantAvgPool2d", "MaxPool", "MaxPool1D"])
# execution mode
@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
@pytest.mark.slow
......@@ -156,7 +156,14 @@ def test_convert_to_hls_pool_batch(
np.random.seed(0)
ofm_dim = int(((ifm_dim + 2 * pad - k) / stride) + 1)
x = gen_finn_dt_tensor(idt, (1, ifm_ch, ifm_dim, ifm_dim))
ishape = (1, ifm_ch, ifm_dim, ifm_dim)
use_1d = False
if op_type == "MaxPool1D":
use_1d = True
ishape = (1, ifm_ch, 1, ifm_dim)
op_type = "MaxPool"
x = gen_finn_dt_tensor(idt, ishape)
# prepare input data
input_dict = prepare_inputs(x)
if op_type == "MaxPool":
......@@ -168,7 +175,7 @@ def test_convert_to_hls_pool_batch(
pytest.skip("Skipping Maxpool with idt != odt")
model = make_single_maxpool_modelwrapper(
k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt
k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt, use_1d
)
elif op_type == "QuantAvgPool2d":
if pad != 0:
......@@ -187,16 +194,40 @@ def test_convert_to_hls_pool_batch(
new_model = model.transform(to_hls.InferPool_Batch())
new_model = new_model.transform(GiveUniqueNodeNames())
if ifm_ch != pe:
new_model = new_model.transform(to_hls.InferConvInpGen())
# Folding
for n in new_model.graph.node:
if n.op_type == "ConvolutionInputGenerator":
inst = getCustomOp(n)
inst.set_nodeattr("SIMD", pe)
elif n.op_type == "Pool_Batch":
inst = getCustomOp(n)
inst.set_nodeattr("PE", pe)
new_model = new_model.transform(to_hls.InferConvInpGen())
# Folding
for n in new_model.graph.node:
if n.op_type.startswith("ConvolutionInputGenerator"):
inst = getCustomOp(n)
inst.set_nodeattr("SIMD", pe)
elif n.op_type == "Pool_Batch":
inst = getCustomOp(n)
inst.set_nodeattr("PE", pe)
if stride <= k:
if pad == 0:
assert len(new_model.graph.node) == 4
assert new_model.graph.node[0].op_type == "Transpose"
assert new_model.graph.node[1].op_type.startswith(
"ConvolutionInputGenerator"
)
assert new_model.graph.node[2].op_type == "Pool_Batch"
assert new_model.graph.node[3].op_type == "Transpose"
else:
assert len(new_model.graph.node) == 5
assert new_model.graph.node[0].op_type == "Transpose"
assert new_model.graph.node[1].op_type == "FMPadding_Batch"
assert new_model.graph.node[2].op_type.startswith(
"ConvolutionInputGenerator"
)
assert new_model.graph.node[3].op_type == "Pool_Batch"
assert new_model.graph.node[4].op_type == "Transpose"
else:
# not currently converted to HLS, node stays as-is
assert len(new_model.graph.node) == 1
assert new_model.graph.node[0].op_type in ["MaxPool", "QuantAvgPool2d"]
# no need to exec
return
if exec_mode == "cppsim":
new_model = new_model.transform(SetExecMode("cppsim"))
......@@ -214,13 +245,6 @@ def test_convert_to_hls_pool_batch(
# execute new_model
y_produced = oxe.execute_onnx(new_model, input_dict)["outp"]
assert (y_produced == y_expected).all()
if stride <= k:
if pad == 0 or ifm_ch == pe:
assert len(new_model.graph.node) == 4
else:
assert len(new_model.graph.node) == 5
else:
assert len(new_model.graph.node) == 1
if exec_mode == "rtlsim":
node = new_model.get_nodes_by_op_type("Pool_Batch")[0]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment