diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh
index bfb09f86b8a493a44182450d09029de6486b8fbd..8117a251c4c92a0d19ce8f5fbeba6849a93f8e8f 100644
--- a/docker/finn_entrypoint.sh
+++ b/docker/finn_entrypoint.sh
@@ -12,11 +12,11 @@ gecho () {
 
 # checkout the correct dependency repo commits
 # the repos themselves are cloned in the Dockerfile
-FINN_BASE_COMMIT=8908c6a3f6674c4fa790954bd41c23ee5bf053df
+FINN_BASE_COMMIT=2c08044c5e9011c19911e731a18ac20d775bbf46
 FINN_EXP_COMMIT=e9f97dcdb4db2f889b0f36af079a6a1792b7d4de
 BREVITAS_COMMIT=14abbe1e7ef82485d79415871fcf5766b0a40a00
 CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4
-HLSLIB_COMMIT=2e49322d1bbc4969ca293843bda1f3f9c05456fc
+HLSLIB_COMMIT=4d74baefa79df48b5a0348d63f39a26df075de51
 PYVERILATOR_COMMIT=e2ff74030de3992dcac54bf1b6aad2915946e8cb
 OMX_COMMIT=1bae737669901e762f581af73348332b5c4b2ada
 
diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py
index 068950b89ae543f5a37c28d83d87ecfa605eaab4..a68a2975501806d662e8f0e5fe6519c2fe0f3944 100644
--- a/src/finn/custom_op/fpgadataflow/__init__.py
+++ b/src/finn/custom_op/fpgadataflow/__init__.py
@@ -49,6 +49,9 @@ from finn.custom_op.fpgadataflow.vector_vector_activate_batch import (
 )
 from finn.custom_op.fpgadataflow.channelwise_op_batch import ChannelwiseOp_Batch
 from finn.custom_op.fpgadataflow.iodma import IODMA
+from finn.custom_op.fpgadataflow.streamingdataflowpartition import (
+    StreamingDataflowPartition,
+)
 
 custom_op = dict()
 
@@ -71,3 +74,4 @@ custom_op["DuplicateStreams_Batch"] = DuplicateStreams_Batch
 custom_op["Vector_Vector_Activate_Batch"] = Vector_Vector_Activate_Batch
 custom_op["ChannelwiseOp_Batch"] = ChannelwiseOp_Batch
 custom_op["IODMA"] = IODMA
+custom_op["StreamingDataflowPartition"] = StreamingDataflowPartition
diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py
index 2ab070b2fdc059a554930345a81abc368c29bfa7..c07188430244b635ab6b1ec192337da74550d57d 100644
--- a/src/finn/custom_op/fpgadataflow/hlscustomop.py
+++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py
@@ -38,11 +38,11 @@ from finn.util.basic import (
     roundup_to_integer_multiple,
     get_rtlsim_trace_depth,
 )
-from finn.util.fpgadataflow import (
-    IPGenBuilder,
+from finn.util.pyverilator import (
     pyverilate_get_liveness_threshold_cycles,
     rtlsim_multi_io,
 )
+from finn.util.hls import CallHLS
 from . import templates
 
 try:
@@ -310,11 +310,11 @@ class HLSCustomOp(CustomOp):
         return []
 
     def ipgen_singlenode_code(self):
-        """Builds the bash script for ip generation using the IPGenBuilder from
-        finn.util.fpgadataflow."""
+        """Builds the bash script for ip generation using the CallHLS from
+        finn.util.hls."""
         node = self.onnx_node
         code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
-        builder = IPGenBuilder()
+        builder = CallHLS()
         builder.append_tcl(code_gen_dir + "/hls_syn_{}.tcl".format(node.name))
         builder.set_ipgen_path(code_gen_dir + "/project_{}".format(node.name))
         builder.build(code_gen_dir)
diff --git a/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py b/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py
new file mode 100644
index 0000000000000000000000000000000000000000..53446ff1f2aba30e69bf188c1673c738440567fb
--- /dev/null
+++ b/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2020 Xilinx, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of Xilinx nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from finn.custom_op.base import CustomOp
+
+# TODO move StreamingDataflowPartition to HLSCustomOp base class
+
+
+class StreamingDataflowPartition(CustomOp):
+    """Class that corresponds to the meta/container node StreamingDataflowPartition
+    which is a placeholder for a group of fpgadataflow nodes that have been separated
+    out into a FINN-ONNX model of its own. Note that is does not produce any HLS or
+    bitfile by itself."""
+
+    def get_nodeattr_types(self):
+        return {
+            "model": ("s", True, ""),
+            "res_estimate": ("s", False, ""),
+            "res_hls": ("s", False, ""),
+            "res_synth": ("s", False, ""),
+            "slr": ("i", False, -1),
+            "partition_id": ("i", False, 0),
+            "device_id": ("i", False, 0),
+            "mem_port": ("s", False, ""),
+        }
+
+    def make_shape_compatible_op(self, model):
+        pass
+
+    def infer_node_datatype(self, model):
+        pass
+
+    def execute_node(self, context, graph):
+        # TODO add RPC execution with synthesized bitfile?
+        # whole-design rtlsim with PyVerilator may also be an alternative
+        pass
+
+    def verify_node(self):
+        info_messages = []
+
+        # verify number of attributes
+        num_of_attr = 1
+        if len(self.onnx_node.attribute) == num_of_attr:
+            info_messages.append("The number of attributes is correct")
+        else:
+            info_messages.append(
+                """The number of attributes is incorrect,
+            {} should have {} attributes""".format(
+                    self.onnx_node.op_type, num_of_attr
+                )
+            )
+        # verify that all necessary attributes exist
+        try:
+            self.get_nodeattr("model")
+            info_messages.append("All necessary attributes exist")
+        except Exception:
+            info_messages.append(
+                """The necessary attributes do not exist.
+                StreamingDataflowPartition needs the following attribute(s):
+                model"""
+            )
+
+        # verify the number of inputs
+        if len(self.onnx_node.input) >= 1:
+            info_messages.append("The number of inputs is correct")
+        else:
+            info_messages.append("StreamingDataflowPartition needs 1 data input")
+
+        return info_messages
diff --git a/src/finn/transformation/fpgadataflow/create_dataflow_partition.py b/src/finn/transformation/fpgadataflow/create_dataflow_partition.py
index 56bfb4306e555c716a9156d6f0949c339193eb38..419a6d8c494651862f55e63e6829a61fe8040599 100644
--- a/src/finn/transformation/fpgadataflow/create_dataflow_partition.py
+++ b/src/finn/transformation/fpgadataflow/create_dataflow_partition.py
@@ -148,7 +148,7 @@ class CreateDataflowPartition(Transformation):
                     [df_out],
                     # use the model attribute to mark the df model
                     model=df_model_filename,
-                    domain="finn.custom_op.general",
+                    domain="finn.custom_op.fpgadataflow",
                     partition_id=target_partition_id,
                     slr=slr,
                     mem_port=mem_port,
diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py
index f7d59978d8f8866aefb3028d570bb6b434df33b4..ea27eee04db6f90b50a58296ceaf6f6ed58602ac 100644
--- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py
+++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py
@@ -39,8 +39,8 @@ from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP
 from finn.transformation.fpgadataflow.insert_dwc import InsertDWC
 from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO
 from finn.transformation.general import GiveUniqueNodeNames, GiveReadableTensorNames
-from finn.util.fpgadataflow import pyverilate_stitched_ip, is_fpgadataflow_node
-from finn.util.pyverilator import reset_rtlsim, toggle_clk
+from finn.util.fpgadataflow import is_fpgadataflow_node
+from finn.util.pyverilator import pyverilate_stitched_ip, reset_rtlsim, toggle_clk
 
 
 def reset_implementation(node):
diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
index 23d7610dfdf434602f326e1117b072f312962295..4fa780548a544d92e02b28486ae1e325ff1f9a9b 100644
--- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
+++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
@@ -52,7 +52,7 @@ from finn.util.basic import (
     alveo_part_map,
     alveo_default_platform,
 )
-from finn.util.fpgadataflow import pyverilate_stitched_ip
+from finn.util.pyverilator import pyverilate_stitched_ip
 from finn.util.test import load_test_checkpoint_or_skip
 from finn.transformation.fpgadataflow.synth_ooc import SynthOutOfContext
 from finn.transformation.infer_data_layouts import InferDataLayouts