diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 94305b861cbe0c5e6b641c9dccee7976c73c236f..39b17e3e6eb006ea49a9f00a82c14ef76de0df96 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -65,8 +65,17 @@ class InferConvInpGen(Transformation): continue i2c_inst = getCustomOp(n) stride = i2c_inst.get_nodeattr("stride") - k = i2c_inst.get_nodeattr("kernel_size") - pad = i2c_inst.get_nodeattr("pad_amount") + k_attr = i2c_inst.get_nodeattr("kernel_size") + k_h = k_attr[0] + k_w = k_attr[1] + pad_attr = i2c_inst.get_nodeattr("pad_amount") + pad_h = pad_attr[0] + pad_attr[2] + pad_w = pad_attr[1] + pad_attr[3] + # temporary checks until non-square conv support is finalized + assert pad_h == pad_w, "Non-square images not yet supported." + assert k_h == k_w, "Non-square kernels not yet supported." + k = k_h + pad = pad_attr[0] pad_val = i2c_inst.get_nodeattr("pad_value") depthwise = i2c_inst.get_nodeattr("depthwise") ifm_ch = i2c_in_shape[-1] @@ -330,8 +339,8 @@ class InferPool_Batch(Transformation): [im2col_out], domain="finn.custom_op.general", stride=stride, - kernel_size=k, - pad_amount=pad, + kernel_size=[k, k], + pad_amount=[pad, pad, pad, pad], pad_value=pad_value, depthwise=1, input_shape="(1,{},{},{})".format(ifm_dim, ifm_dim, ifm_ch), diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 9d350a9342e3de56cbbb5b3fc4abec69bfc254dc..d88576583eaacb7579b02bc00e4e0f9b77b16f7e 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -77,7 +77,10 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): out_chn = 20 conv_param_shape = [out_chn, in_chn, kernel_size, kernel_size] - out_feature_dim = compute_conv_output_dim(in_feature_dim, kernel_size, stride, pad) + total_pad = 2 * pad + out_feature_dim = compute_conv_output_dim( + in_feature_dim, kernel_size, stride, total_pad + ) input_shape = [1, in_chn, in_feature_dim, in_feature_dim] output_shape = [1, out_chn, out_feature_dim, out_feature_dim] diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index 7c608fc3863ab72d1097f49b793af73664b2be48..c406d78158c52226fea881c48bc178139653fea5 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -57,7 +57,8 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): # set up reference model consisting of Im2Col + MatMul (+ MultiThreshold) ofm_ch = ifm_ch - ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad=padding) + total_pad = 2 * padding + ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad=total_pad) if act is None: odt = DataType.INT32 @@ -96,9 +97,9 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): domain="finn.custom_op.general", inputs=["inp"], outputs=["im2col_out"], - kernel_size=k, + kernel_size=[k, k], stride=stride, - pad_amount=padding, + pad_amount=[padding, padding, padding, padding], input_shape="(1, {}, {}, {})".format(ifm_dim, ifm_dim, ifm_ch), depthwise=1, ) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 0e2e60534bcc871592128fdbbd5ca52b3cc0fe4f..4e0e8c7c35a8fc8a30e0ba4c27a7c0d637e24d1f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -63,9 +63,9 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, i domain="finn.custom_op.general", backend="fpgadataflow", stride=stride, - kernel_size=k, + kernel_size=[k, k], input_shape=str((1, ifm_dim, ifm_dim, ifm_ch)), - pad_amount=0, + pad_amount=[0, 0, 0, 0], pad_value=0, ) graph = helper.make_graph( diff --git a/tests/transformation/streamline/test_move_mul_past_dw_conv.py b/tests/transformation/streamline/test_move_mul_past_dw_conv.py index 5e96d15867b087fbb5f4f1b467aea34cb33e3ff4..ce0cbcd0405f8a09efabbadd5555de1bd6b89e43 100644 --- a/tests/transformation/streamline/test_move_mul_past_dw_conv.py +++ b/tests/transformation/streamline/test_move_mul_past_dw_conv.py @@ -32,8 +32,8 @@ def test_move_mul_past_dw_conv(ifm_dim, ifm_ch, k, stride, pad_amt, dw): ofm_ch = ifm_ch + 2 groups = 1 W_shape = [ofm_ch, ifm_ch, k, k] - - ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt) + total_pad = 2 * pad_amt + ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad) # set up onnx model inp = helper.make_tensor_value_info(