diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py
index e0cf22bcb7cd98843cb2ab254e9c74027aed533f..f9d2c6ba5e837990d9a1995f0dba5660b5ea273e 100644
--- a/src/finn/custom_op/fpgadataflow/__init__.py
+++ b/src/finn/custom_op/fpgadataflow/__init__.py
@@ -41,14 +41,13 @@ class HLSCustomOp(CustomOp):
         self.code_gen_dir = util.get_by_name(onnx_node.attribute, "code_gen_dir")
         self.executable_path = ""
 
+    def get_nodeattr_types(self):
+        return {"code_gen_dir": ("s", False, ""), "executable_path": ("s", False, "")}
+
     def code_generation(self, model):
         node = self.onnx_node
-        if node.op_type == "StreamingFCLayer_Batch":
-            self.generate_weights(model)
-            try:
-                self.generate_thresholds(model)
-            except:
-                pass
+        self.generate_weights(model)
+        self.generate_thresholds(model)
         self.global_includes()
         self.defines()
         self.read_npy_data()
diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
index 0487b9a2bfb6716156b4ba37ffcbc4c042523c4f..3e06a3e516ab0fde9a8da8fd319fb598ff806d0b 100644
--- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
@@ -15,7 +15,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         super().__init__(onnx_node)
 
     def get_nodeattr_types(self):
-        return {
+        my_attrs = {
             "WMEM": ("i", True, 0),
             "TMEM": ("i", True, 0),
             "PE": ("i", True, 0),
@@ -29,6 +29,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
             "weightDataType": ("s", True, ""),
             "outputDataType": ("s", True, ""),
         }
+        my_attrs.update(super().get_nodeattr_types())
+        return my_attrs
 
     def make_shape_compatible_op(self):
         pass
@@ -186,32 +188,33 @@ class StreamingFCLayer_Batch(HLSCustomOp):
 
     def generate_thresholds(self, model):
         thresholds = model.get_initializer(self.onnx_node.input[2])
-        threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)
-        tdt = DataType.INT32
-        # use UINT32 threshold export for bipolar times bipolar
-        inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
-        wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR
-        if inp_is_bipolar and wt_is_bipolar:
-            tdt = DataType.UINT32
-        thresholds_hls_code = numpy_to_hls_code(
-            threshold_tensor, tdt, "thresholds", False, True
-        )
-        # write thresholds into thresh.h
-        f_thresh = open("{}/thresh.h".format(self.tmp_dir), "w")
-        tdt_hls = tdt.get_hls_datatype_str()
-        odt_hls = self.get_output_datatype().get_hls_datatype_str()
-        f_thresh.write(
-            "static ThresholdsActivation<{},{},{},{},{},{}> threshs = ".format(
-                self.get_nodeattr("TMEM"),
-                self.get_nodeattr("PE"),
-                threshold_tensor.shape[-1],
-                tdt_hls,
-                odt_hls,
-                self.get_nodeattr("ActVal"),
+        if thresholds is not None:
+            threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)
+            tdt = DataType.INT32
+            # use UINT32 threshold export for bipolar times bipolar
+            inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
+            wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR
+            if inp_is_bipolar and wt_is_bipolar:
+                tdt = DataType.UINT32
+            thresholds_hls_code = numpy_to_hls_code(
+                threshold_tensor, tdt, "thresholds", False, True
             )
-        )
-        f_thresh.write(thresholds_hls_code)
-        f_thresh.close()
+            # write thresholds into thresh.h
+            f_thresh = open("{}/thresh.h".format(self.tmp_dir), "w")
+            tdt_hls = tdt.get_hls_datatype_str()
+            odt_hls = self.get_output_datatype().get_hls_datatype_str()
+            f_thresh.write(
+                "static ThresholdsActivation<{},{},{},{},{},{}> threshs = ".format(
+                    self.get_nodeattr("TMEM"),
+                    self.get_nodeattr("PE"),
+                    threshold_tensor.shape[-1],
+                    tdt_hls,
+                    odt_hls,
+                    self.get_nodeattr("ActVal"),
+                )
+            )
+            f_thresh.write(thresholds_hls_code)
+            f_thresh.close()
 
     def execute_node(self, context, graph):
         node = self.onnx_node
diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool.py
index c0878ded1888d0a45f051c69a8e22d8b5a9dda91..ee35b476a32fe1f28316e355cd523e288325f944 100644
--- a/src/finn/custom_op/fpgadataflow/streamingmaxpool.py
+++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool.py
@@ -9,11 +9,13 @@ from finn.custom_op.fpgadataflow import HLSCustomOp
 
 class StreamingMaxPool(HLSCustomOp):
     def get_nodeattr_types(self):
-        return {
+        my_attrs = {
             "ImgDim": ("i", True, 0),
             "PoolDim": ("i", True, 0),
             "NumChannels": ("i", True, 0),
         }
+        my_attrs.update(super().get_nodeattr_types())
+        return my_attrs
 
     def make_shape_compatible_op(self):
         pass
diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
index 3185e0502a37d19d7659af48482c0be21079509d..e21ffeae195957ba4fc73cd3a62fa90bd6e3fd50 100644
--- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
@@ -9,11 +9,13 @@ from finn.custom_op.fpgadataflow import HLSCustomOp
 
 class StreamingMaxPool_Batch(HLSCustomOp):
     def get_nodeattr_types(self):
-        return {
+        my_attrs = {
             "ImgDim": ("i", True, 0),
             "PoolDim": ("i", True, 0),
             "NumChannels": ("i", True, 0),
         }
+        my_attrs.update(super().get_nodeattr_types())
+        return my_attrs
 
     def make_shape_compatible_op(self):
         pass