diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py
index b3e30a07a96a5590fdb755766c235d2ba99f4caf..d47b687b65d93ec45d936afd91c08c117cf8dbc8 100644
--- a/src/finn/custom_op/fpgadataflow/__init__.py
+++ b/src/finn/custom_op/fpgadataflow/__init__.py
@@ -496,6 +496,14 @@ compilation transformations?
         HLSCustomOp class but has to be filled by every node."""
         pass
 
+    def get_normal_input_shape(self):
+        """Returns normal input shape if implemented."""
+        raise Exception("get_normal_input_shape not implemented for this op")
+
+    def get_normal_output_shape(self):
+        """Returns folded output shape if implemented."""
+        raise Exception("get_normal_output_shape not implemented for this op")
+
     def get_folded_input_shape(self):
         """Returns folded input shape (according to synapse folding), if implemented."""
         raise Exception("get_folded_input_shape not implemented for this op")
diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py
index 1ca2c6d29313eb9d978a6ac0454b9226802f55a5..f666becdbcceca6ca202907610595f8c0069c5a0 100644
--- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py
@@ -146,10 +146,6 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp):
         folded_oshape = self.get_folded_output_shape()
         return np.prod(folded_oshape[:-1])
 
-    def get_number_input_values(self):
-        folded_ishape = self.get_folded_input_shape()
-        return np.prod(folded_ishape[:-1])
-
     def get_instream_width(self):
         in_width = self.get_nodeattr("inWidth")
         return in_width
diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
index 3757e3a5f1f29a1d6c88ccc73ce3f3715611cbc0..f650442401b49f1ad0a602b6b2ad3e50fbb5e5c2 100644
--- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
@@ -290,6 +290,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         return out_width
 
     def get_weightstream_width(self):
+        """Returns weight stream width. Used in decoupled mode."""
         pe = self.get_nodeattr("PE")
         simd = self.get_nodeattr("SIMD")
         wp = self.get_weight_datatype().bitwidth()
@@ -297,6 +298,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         return w_width
 
     def get_weightstream_width_padded(self):
+        """Returns weight stream width padded to a multiple of 8. This is required
+        by the AXI Stream spec. Used in decoupled mode."""
         weight_width = self.get_weightstream_width()
         return roundup_to_integer_multiple(weight_width, 8)
 
diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py
index 586d38a03f3717d1ea2cffcf7474ca434c9ea505..66190333ce8d71dafba99aaeae4fb2c973d67410 100644
--- a/src/finn/custom_op/fpgadataflow/streamingfifo.py
+++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py
@@ -278,10 +278,6 @@ class StreamingFIFO(HLSCustomOp):
         folded_oshape = self.get_folded_output_shape()
         return np.prod(folded_oshape[:-1])
 
-    def get_number_input_values(self):
-        folded_ishape = self.get_folded_input_shape()
-        return np.prod(folded_ishape[:-1])
-
     def global_includes(self):
         pass