From 56f0fc123aafe868a80d9d1722e1db1ce95329dd Mon Sep 17 00:00:00 2001 From: auphelia <jakobapk@web.de> Date: Fri, 8 May 2020 10:46:53 +0100 Subject: [PATCH] [HLSCustomOp] Refactoring fpgadataflow custom ops by adding comments and adding two functions to base class --- src/finn/custom_op/fpgadataflow/__init__.py | 8 ++++++++ .../fpgadataflow/streamingdatawidthconverter_batch.py | 4 ---- src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py | 3 +++ src/finn/custom_op/fpgadataflow/streamingfifo.py | 4 ---- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index b3e30a07a..d47b687b6 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -496,6 +496,14 @@ compilation transformations? HLSCustomOp class but has to be filled by every node.""" pass + def get_normal_input_shape(self): + """Returns normal input shape if implemented.""" + raise Exception("get_normal_input_shape not implemented for this op") + + def get_normal_output_shape(self): + """Returns folded output shape if implemented.""" + raise Exception("get_normal_output_shape not implemented for this op") + def get_folded_input_shape(self): """Returns folded input shape (according to synapse folding), if implemented.""" raise Exception("get_folded_input_shape not implemented for this op") diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 1ca2c6d29..f666becdb 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -146,10 +146,6 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): folded_oshape = self.get_folded_output_shape() return np.prod(folded_oshape[:-1]) - def get_number_input_values(self): - folded_ishape = self.get_folded_input_shape() - return np.prod(folded_ishape[:-1]) - def get_instream_width(self): in_width = self.get_nodeattr("inWidth") return in_width diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index 3757e3a5f..f65044240 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -290,6 +290,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): return out_width def get_weightstream_width(self): + """Returns weight stream width. Used in decoupled mode.""" pe = self.get_nodeattr("PE") simd = self.get_nodeattr("SIMD") wp = self.get_weight_datatype().bitwidth() @@ -297,6 +298,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): return w_width def get_weightstream_width_padded(self): + """Returns weight stream width padded to a multiple of 8. This is required + by the AXI Stream spec. Used in decoupled mode.""" weight_width = self.get_weightstream_width() return roundup_to_integer_multiple(weight_width, 8) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 586d38a03..66190333c 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -278,10 +278,6 @@ class StreamingFIFO(HLSCustomOp): folded_oshape = self.get_folded_output_shape() return np.prod(folded_oshape[:-1]) - def get_number_input_values(self): - folded_ishape = self.get_folded_input_shape() - return np.prod(folded_ishape[:-1]) - def global_includes(self): pass -- GitLab