From a02215fc5fb7a34462caf01e222bf1061def3ed0 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu <maltanar@gmail.com> Date: Fri, 5 Jun 2020 20:12:54 +0100 Subject: [PATCH] [HLSCustomOp] remove OutputDim as FMPadding attribute, can compute --- src/finn/custom_op/fpgadataflow/fmpadding.py | 23 +++++++++++++++---- .../fpgadataflow/convert_to_hls_layers.py | 1 - .../test_fpgadataflow_fmpadding.py | 1 - 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding.py b/src/finn/custom_op/fpgadataflow/fmpadding.py index 675bbfee6..fa321dfa6 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding.py @@ -7,26 +7,39 @@ from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy class FMPadding_Batch(HLSCustomOp): - """Class that corresponds to finn-hlslib FMPadding_Batch function. - Implements 'same' padding on a given input image.""" + """Corresponds to finn-hlslib FMPadding_Batch function. + Pads input image by given amount.""" def __init__(self, onnx_node): super().__init__(onnx_node) def get_nodeattr_types(self): my_attrs = { + # spatial size of input images "ImgDim": ("i", True, 0), - "OutputDim": ("i", True, 0), + # total padding (per dimension) to apply "Padding": ("i", True, 2), + # number of channels in input image "NumChannels": ("i", True, 0), # FINN input datatype "inputDataType": ("s", True, ""), + # controls distribution of padded pixels + # in case of uneven padding -- see FMPadding fxn + # in hlslib "PaddingStyle": ("i", False, 2), + # shape describing input vecs per execution "numInputVectors": ("i", False, 1), } my_attrs.update(super().get_nodeattr_types()) return my_attrs + def get_padded_odim(self): + "Return the padded spatial size of the output." + + idim = self.get_nodeattr("ImgDim") + pad = self.get_nodeattr("Padding") + return idim + pad + def get_normal_input_shape(self): idim = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") @@ -35,7 +48,7 @@ class FMPadding_Batch(HLSCustomOp): return ishape def get_normal_output_shape(self): - odim = self.get_nodeattr("OutputDim") + odim = self.get_padded_odim() num_ch = self.get_nodeattr("NumChannels") oshape = (1, odim, odim, num_ch) @@ -124,7 +137,7 @@ class FMPadding_Batch(HLSCustomOp): #define Padding1 {}\n#define NumChannels1 {}\n #define PaddingStyle1 {}\n#define numReps {}\n""".format( self.get_nodeattr("ImgDim"), - self.get_nodeattr("OutputDim"), + self.get_padded_odim(), self.get_nodeattr("Padding"), self.get_nodeattr("NumChannels"), self.get_nodeattr("PaddingStyle"), diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 6f45d498c..d421a5f3e 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -91,7 +91,6 @@ class InferConvInpGen(Transformation): domain="finn", backend="fpgadataflow", ImgDim=ifm_dim, - OutputDim=odim_padding, Padding=2 * pad, NumChannels=ifm_ch, inputDataType=dt.name, diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 76ba9ced5..9d6390b26 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -42,7 +42,6 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, idt, pad_style): domain="finn", backend="fpgadataflow", ImgDim=idim, - OutputDim=odim, Padding=padding, NumChannels=num_ch, inputDataType=str(idt.name), -- GitLab