diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py
index ecfbe92b4b83e2bbc9143b92bee903c2f02b378b..5650d218857a7c7ff86c15ac057c4ebbc18df5ca 100644
--- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py
+++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py
@@ -68,10 +68,6 @@ class FMPadding_rtl(HLSCustomOp):
             "SIMD": ("i", False, 1),
             # FINN input datatype
             "inputDataType": ("s", True, ""),
-            # controls distribution of padded pixels
-            # in case of uneven padding -- see FMPadding fxn
-            # in hlslib
-            "PaddingStyle": ("i", False, 2, {2, 1}),
             # shape describing input vecs per execution
             "numInputVectors": ("i", False, 1),
             # Enable reprogrammable implementation to change FM dimensions,
@@ -136,7 +132,7 @@ class FMPadding_rtl(HLSCustomOp):
         exp_ishape = self.get_normal_input_shape()
         oshape = self.get_normal_output_shape()
         ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
-        assert ishape == exp_ishape, "Unexpect input shape for SameResize."
+        assert ishape == exp_ishape, "Unexpected input shape for FMPadding_rtl."
         return super().make_const_shape_op(oshape)
 
     def infer_node_datatype(self, model):
@@ -160,7 +156,7 @@ class FMPadding_rtl(HLSCustomOp):
         ret = DataType[self.get_nodeattr("inputDataType")]
         # the hlslib op always pads with zeros, so ensure that the DataType
         # is able to represent zeros
-        assert ret.allowed(0), "FMPadding_Batch DataType must support zero"
+        assert ret.allowed(0), "FMPadding_rtl DataType must support zero"
         return ret
 
     def get_output_datatype(self, ind=0):
diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
index 090a0d1e65205f766c7169c1ada8d02c41f0240e..8ab8a7aa4df2554422f9e43319e7e3acc7aaa666 100644
--- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
+++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
@@ -125,12 +125,6 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style):
     pad_h = pad[0] + pad[2]
     pad_w = pad[1] + pad[3]
 
-    if idim_h == idim_w and pad_h != pad_w and impl_style != "rtl":
-        pytest.skip(
-            """Only equal padding along the dimensions for square images
-            is supported for HLS, skipping"""
-        )
-
     # generate input data
     x = gen_finn_dt_tensor(idt, [1, idim_h, idim_w, num_ch])
     input_dict = {"inp": x}
@@ -150,8 +144,6 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style):
         model = model.transform(PrepareIP(test_fpga_part, target_clk_ns))
         model = model.transform(HLSSynthIP())
         model = model.transform(PrepareRTLSim())
-        node = model.get_nodes_by_op_type(optype)[0]
-        inst = getCustomOp(node)
 
     y_produced = oxe.execute_onnx(model, input_dict)["outp"]
     expected_oshape = (1, odim_h, odim_w, num_ch)