Skip to content
Snippets Groups Projects
Commit ca235cf6 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

Merge branch 'dev' into feature/vitis_hls

parents af2ae401 93f1c925
No related branches found
No related tags found
No related merge requests found
......@@ -65,8 +65,17 @@ class InferConvInpGen(Transformation):
continue
i2c_inst = getCustomOp(n)
stride = i2c_inst.get_nodeattr("stride")
k = i2c_inst.get_nodeattr("kernel_size")
pad = i2c_inst.get_nodeattr("pad_amount")
k_attr = i2c_inst.get_nodeattr("kernel_size")
k_h = k_attr[0]
k_w = k_attr[1]
pad_attr = i2c_inst.get_nodeattr("pad_amount")
pad_h = pad_attr[0] + pad_attr[2]
pad_w = pad_attr[1] + pad_attr[3]
# temporary checks until non-square conv support is finalized
assert pad_h == pad_w, "Non-square images not yet supported."
assert k_h == k_w, "Non-square kernels not yet supported."
k = k_h
pad = pad_attr[0]
pad_val = i2c_inst.get_nodeattr("pad_value")
depthwise = i2c_inst.get_nodeattr("depthwise")
ifm_ch = i2c_in_shape[-1]
......@@ -330,8 +339,8 @@ class InferPool_Batch(Transformation):
[im2col_out],
domain="finn.custom_op.general",
stride=stride,
kernel_size=k,
pad_amount=pad,
kernel_size=[k, k],
pad_amount=[pad, pad, pad, pad],
pad_value=pad_value,
depthwise=1,
input_shape="(1,{},{},{})".format(ifm_dim, ifm_dim, ifm_ch),
......
......@@ -77,7 +77,10 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode):
out_chn = 20
conv_param_shape = [out_chn, in_chn, kernel_size, kernel_size]
out_feature_dim = compute_conv_output_dim(in_feature_dim, kernel_size, stride, pad)
total_pad = 2 * pad
out_feature_dim = compute_conv_output_dim(
in_feature_dim, kernel_size, stride, total_pad
)
input_shape = [1, in_chn, in_feature_dim, in_feature_dim]
output_shape = [1, out_chn, out_feature_dim, out_feature_dim]
......
......@@ -57,7 +57,8 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding):
# set up reference model consisting of Im2Col + MatMul (+ MultiThreshold)
ofm_ch = ifm_ch
ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad=padding)
total_pad = 2 * padding
ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad=total_pad)
if act is None:
odt = DataType.INT32
......@@ -96,9 +97,9 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding):
domain="finn.custom_op.general",
inputs=["inp"],
outputs=["im2col_out"],
kernel_size=k,
kernel_size=[k, k],
stride=stride,
pad_amount=padding,
pad_amount=[padding, padding, padding, padding],
input_shape="(1, {}, {}, {})".format(ifm_dim, ifm_dim, ifm_ch),
depthwise=1,
)
......
......@@ -63,9 +63,9 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, i
domain="finn.custom_op.general",
backend="fpgadataflow",
stride=stride,
kernel_size=k,
kernel_size=[k, k],
input_shape=str((1, ifm_dim, ifm_dim, ifm_ch)),
pad_amount=0,
pad_amount=[0, 0, 0, 0],
pad_value=0,
)
graph = helper.make_graph(
......
......@@ -32,8 +32,8 @@ def test_move_mul_past_dw_conv(ifm_dim, ifm_ch, k, stride, pad_amt, dw):
ofm_ch = ifm_ch + 2
groups = 1
W_shape = [ofm_ch, ifm_ch, k, k]
ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt)
total_pad = 2 * pad_amt
ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad)
# set up onnx model
inp = helper.make_tensor_value_info(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment