diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
index ee65326ec57fb7fa7fa0490a8980dbabb8efc13c..22c356a5869b25fcc7ae3ef0164ed61b53ef232c 100644
--- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
+++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
@@ -5,10 +5,15 @@ import pytest
 from finn.core.datatype import DataType
 from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
-from finn.transformation.infer_data_layouts import InferDataLayouts
+from finn.transformation.general import GiveUniqueNodeNames
 from finn.transformation.lower_convs_to_matmul import LowerConvsToMatMul
 
+from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
+from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
+from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
+from finn.transformation.fpgadataflow.replace_verilog_relpaths import (
+    ReplaceVerilogRelPaths,
+)
 import finn.core.onnx_exec as oxe
 from finn.core.modelwrapper import ModelWrapper
 from finn.util.basic import gen_finn_dt_tensor
@@ -17,47 +22,40 @@ import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls
 from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
 from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
 from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
+from finn.custom_op.im2col import compute_conv_output_dim
 
+# conv_config  kernel_size,stride, pad
 
-@pytest.mark.parametrize("padding", [True, False])
-@pytest.mark.parametrize("kernel_size", [3, 5])
+
+@pytest.mark.parametrize(
+    "conv_config", [(1, 2, 0), (1, 3, 0), (3, 2, 1), (3, 1, 0), (3, 1, 1), (5, 2, 1)]
+)
+@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
 @pytest.mark.slow
 @pytest.mark.vivado
-def test_convert_to_hls_conv_layer(padding, kernel_size):
-
-    assert (
-        kernel_size % 2 != 0
-    ), """test_convert_to_hls_conv_layer test only
-    supports odd kernel_size"""
-
+def test_convert_to_hls_conv_layer(conv_config, exec_mode):
+    kernel_size, stride, pad = conv_config
     np.random.seed(0)
-    padding = True
     idt = DataType.UINT4
 
     in_feature_dim = 7
-    in_chn = 3
+    in_chn = 16
+    out_chn = 20
 
-    stages = 1  # just one convolution
-
-    out_feature_dim = (
-        in_feature_dim if padding else in_feature_dim - (kernel_size // 2 * 2) * stages
-    )
+    out_feature_dim = compute_conv_output_dim(in_feature_dim, kernel_size, stride, pad)
 
     input_shape = [1, in_chn, in_feature_dim, in_feature_dim]
-    output_shape = [1, in_chn, out_feature_dim, out_feature_dim]
+    output_shape = [1, out_chn, out_feature_dim, out_feature_dim]
 
-    conv_param_shape = [in_chn, in_chn, kernel_size, kernel_size]
+    conv_param_shape = [out_chn, in_chn, kernel_size, kernel_size]
+    conv_weight_dt = DataType.UINT4
 
     conv_config = {}
     conv_config["dilations"] = [1, 1]
     conv_config["group"] = 1
     conv_config["kernel_shape"] = [kernel_size, kernel_size]
-    if padding:
-        pad = kernel_size // 2
-        conv_config["pads"] = [pad, pad, pad, pad]
-    else:
-        conv_config["pads"] = [0, 0, 0, 0]
-    conv_config["strides"] = [1, 1]
+    conv_config["pads"] = [pad, pad, pad, pad]
+    conv_config["strides"] = [stride, stride]
 
     top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape)
     top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape)
@@ -80,27 +78,35 @@ def test_convert_to_hls_conv_layer(padding, kernel_size):
     model = ModelWrapper(modelproto)
     model.set_tensor_datatype("top_in", idt)
     model.set_tensor_datatype("top_out", idt)
-    model.set_tensor_datatype("p1", DataType.UINT4)
+    model.set_tensor_datatype("p1", conv_weight_dt)
+    model.set_initializer("p1", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape))
 
     model = model.transform(InferShapes())
-    model.set_initializer(
-        "p1", np.round(np.random.rand(*conv_param_shape).astype(np.float32) * 16)
-    )
-
-    model.set_tensor_datatype(model.graph.input[0].name, idt)
-    model = model.transform(InferShapes())
-    model = model.transform(InferDataLayouts())
-    model = model.transform(GiveUniqueNodeNames())
-    model = model.transform(GiveReadableTensorNames())
     model = model.transform(InferDataTypes())
 
     new_model = model.transform(LowerConvsToMatMul())
     new_model = new_model.transform(to_hls.InferConvInpGen())
 
-    new_model = new_model.transform(PrepareCppSim())
-    new_model = new_model.transform(CompileCppSim())
-    new_model = new_model.transform(SetExecMode("cppsim"))
+    new_model = new_model.transform(GiveUniqueNodeNames())
+    new_model = new_model.transform(InferShapes())
+    new_model = new_model.transform(InferDataTypes())
+
+    if exec_mode == "cppsim":
+        new_model = new_model.transform(PrepareCppSim())
+        new_model = new_model.transform(CompileCppSim())
+        new_model = new_model.transform(SetExecMode("cppsim"))
+    elif exec_mode == "rtlsim":
+        new_model = new_model.transform(SetExecMode("rtlsim"))
+        new_model = new_model.transform(GiveUniqueNodeNames())
+        new_model = new_model.transform(PrepareIP("xc7z020clg400-1", 5))
+        new_model = new_model.transform(HLSSynthIP())
+        new_model = new_model.transform(ReplaceVerilogRelPaths())
+        new_model = new_model.transform(PrepareRTLSim())
+    else:
+        raise Exception("Unknown exec_mode")
 
     x = gen_finn_dt_tensor(idt, input_shape)
     inp_dict = {model.graph.input[0].name: x}
     assert oxe.compare_execution(model, new_model, inp_dict)
+    if kernel_size == 1 and stride > 1 and pad == 0:
+        assert new_model.graph.node[1].op_type == "DownSampler"