From 87127a994f45da9f2e0620d197f24655e769abfb Mon Sep 17 00:00:00 2001
From: Yaman Umuroglu <maltanar@gmail.com>
Date: Thu, 24 Sep 2020 22:27:57 +0200
Subject: [PATCH] [Refactor] remove unused init files + those moved to
 finn-base

---
 src/__init__.py                               |  27 -
 src/finn/__init__.py                          |  39 --
 src/finn/analysis/__init__.py                 |  36 --
 src/finn/analysis/fpgadataflow/__init__.py    |   0
 src/finn/analysis/topology.py                 | 104 ----
 src/finn/core/__init__.py                     |  27 -
 src/finn/core/data_layout.py                  |  35 --
 src/finn/core/datatype.py                     | 229 -------
 src/finn/core/execute_custom_node.py          |  42 --
 src/finn/core/modelwrapper.py                 | 585 ------------------
 src/finn/core/onnx_exec.py                    | 266 --------
 src/finn/core/remote_exec.py                  | 119 ----
 src/finn/core/rtlsim_exec.py                  | 194 ------
 src/finn/core/throughput_test.py              | 157 -----
 src/finn/custom_op/debugmarker.py             |  66 --
 src/finn/custom_op/im2col.py                  | 203 ------
 src/finn/custom_op/maxpoolnhwc.py             | 126 ----
 src/finn/custom_op/multithreshold.py          | 176 ------
 src/finn/custom_op/quantavgpool2d.py          | 136 ----
 src/finn/custom_op/registry.py                | 100 ---
 .../custom_op/streamingdataflowpartition.py   |  96 ---
 src/finn/custom_op/xnorpopcount.py            | 131 ----
 src/finn/data/onnx/mnist-conv/model.onnx      | Bin 26454 -> 0 bytes
 .../mnist-conv/test_data_set_0/input_0.pb     | Bin 3149 -> 0 bytes
 .../mnist-conv/test_data_set_0/output_0.pb    |   2 -
 src/finn/transformation/__init__.py           | 115 ----
 .../transformation/batchnorm_to_affine.py     | 114 ----
 src/finn/transformation/bipolar_to_xnor.py    | 153 -----
 src/finn/transformation/change_datalayout.py  | 110 ----
 .../transformation/double_to_single_float.py  |  45 --
 src/finn/transformation/fold_constants.py     |  62 --
 .../transformation/fpgadataflow/__init__.py   |  27 -
 src/finn/transformation/general.py            | 257 --------
 src/finn/transformation/infer_data_layouts.py | 127 ----
 src/finn/transformation/infer_datatypes.py    |  96 ---
 src/finn/transformation/infer_shapes.py       |  90 ---
 src/finn/transformation/insert_topk.py        |  96 ---
 .../transformation/lower_convs_to_matmul.py   | 171 -----
 src/finn/transformation/merge_onnx_models.py  | 164 -----
 src/finn/util/__init__.py                     |  27 -
 src/finn/util/basic.py                        | 442 -------------
 src/finn/util/create.py                       | 178 ------
 src/finn/util/data_packing.py                 | 397 ------------
 src/finn/util/fpgadataflow.py                 | 218 -------
 src/finn/util/onnx.py                         |  75 ---
 tests/analysis/test_is_linear.py              |  85 ---
 tests/analysis/test_topology_checks.py        | 205 ------
 tests/core/test_basic_onnx_exec.py            |  98 ---
 tests/core/test_custom_onnx_exec.py           | 277 ---------
 tests/core/test_datatypes.py                  |  75 ---
 tests/core/test_mixed_onnx_exec.py            | 252 --------
 tests/core/test_modelwrapper.py               | 176 ------
 tests/custom_op/test_im2col.py                | 320 ----------
 tests/custom_op/test_multithreshold.py        | 322 ----------
 tests/custom_op/test_xnorpopcountmatmul.py    | 108 ----
 .../transformation/test_change_datalayout.py  | 112 ----
 .../test_general_transformation.py            | 120 ----
 tests/transformation/test_infer_shapes.py     |  89 ---
 .../transformation/test_merge_onnx_models.py  | 126 ----
 tests/transformation/test_renaming.py         |  75 ---
 tests/transformation/test_sort_graph.py       | 150 -----
 tests/util/test_create.py                     |  64 --
 tests/util/test_gen_finn_dt_tensor.py         | 105 ----
 tests/util/test_padding.py                    |  57 --
 tests/util/test_rtlsim2npy.py                 | 107 ----
 tests/util/test_shape_utils.py                |  41 --
 66 files changed, 8824 deletions(-)
 delete mode 100644 src/__init__.py
 delete mode 100644 src/finn/__init__.py
 delete mode 100644 src/finn/analysis/__init__.py
 delete mode 100644 src/finn/analysis/fpgadataflow/__init__.py
 delete mode 100644 src/finn/analysis/topology.py
 delete mode 100644 src/finn/core/__init__.py
 delete mode 100644 src/finn/core/data_layout.py
 delete mode 100644 src/finn/core/datatype.py
 delete mode 100644 src/finn/core/execute_custom_node.py
 delete mode 100644 src/finn/core/modelwrapper.py
 delete mode 100644 src/finn/core/onnx_exec.py
 delete mode 100644 src/finn/core/remote_exec.py
 delete mode 100644 src/finn/core/rtlsim_exec.py
 delete mode 100644 src/finn/core/throughput_test.py
 delete mode 100644 src/finn/custom_op/debugmarker.py
 delete mode 100644 src/finn/custom_op/im2col.py
 delete mode 100644 src/finn/custom_op/maxpoolnhwc.py
 delete mode 100644 src/finn/custom_op/multithreshold.py
 delete mode 100644 src/finn/custom_op/quantavgpool2d.py
 delete mode 100644 src/finn/custom_op/registry.py
 delete mode 100644 src/finn/custom_op/streamingdataflowpartition.py
 delete mode 100644 src/finn/custom_op/xnorpopcount.py
 delete mode 100644 src/finn/data/onnx/mnist-conv/model.onnx
 delete mode 100644 src/finn/data/onnx/mnist-conv/test_data_set_0/input_0.pb
 delete mode 100644 src/finn/data/onnx/mnist-conv/test_data_set_0/output_0.pb
 delete mode 100644 src/finn/transformation/__init__.py
 delete mode 100644 src/finn/transformation/batchnorm_to_affine.py
 delete mode 100644 src/finn/transformation/bipolar_to_xnor.py
 delete mode 100644 src/finn/transformation/change_datalayout.py
 delete mode 100644 src/finn/transformation/double_to_single_float.py
 delete mode 100644 src/finn/transformation/fold_constants.py
 delete mode 100644 src/finn/transformation/fpgadataflow/__init__.py
 delete mode 100644 src/finn/transformation/general.py
 delete mode 100644 src/finn/transformation/infer_data_layouts.py
 delete mode 100644 src/finn/transformation/infer_datatypes.py
 delete mode 100644 src/finn/transformation/infer_shapes.py
 delete mode 100644 src/finn/transformation/insert_topk.py
 delete mode 100644 src/finn/transformation/lower_convs_to_matmul.py
 delete mode 100644 src/finn/transformation/merge_onnx_models.py
 delete mode 100644 src/finn/util/__init__.py
 delete mode 100644 src/finn/util/basic.py
 delete mode 100644 src/finn/util/create.py
 delete mode 100644 src/finn/util/data_packing.py
 delete mode 100644 src/finn/util/fpgadataflow.py
 delete mode 100644 src/finn/util/onnx.py
 delete mode 100644 tests/analysis/test_is_linear.py
 delete mode 100644 tests/analysis/test_topology_checks.py
 delete mode 100644 tests/core/test_basic_onnx_exec.py
 delete mode 100644 tests/core/test_custom_onnx_exec.py
 delete mode 100644 tests/core/test_datatypes.py
 delete mode 100644 tests/core/test_mixed_onnx_exec.py
 delete mode 100644 tests/core/test_modelwrapper.py
 delete mode 100644 tests/custom_op/test_im2col.py
 delete mode 100644 tests/custom_op/test_multithreshold.py
 delete mode 100644 tests/custom_op/test_xnorpopcountmatmul.py
 delete mode 100644 tests/transformation/test_change_datalayout.py
 delete mode 100644 tests/transformation/test_general_transformation.py
 delete mode 100644 tests/transformation/test_infer_shapes.py
 delete mode 100644 tests/transformation/test_merge_onnx_models.py
 delete mode 100644 tests/transformation/test_renaming.py
 delete mode 100644 tests/transformation/test_sort_graph.py
 delete mode 100644 tests/util/test_create.py
 delete mode 100644 tests/util/test_gen_finn_dt_tensor.py
 delete mode 100644 tests/util/test_padding.py
 delete mode 100644 tests/util/test_rtlsim2npy.py
 delete mode 100644 tests/util/test_shape_utils.py

diff --git a/src/__init__.py b/src/__init__.py
deleted file mode 100644
index 83c8e8bed..000000000
--- a/src/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/finn/__init__.py b/src/finn/__init__.py
deleted file mode 100644
index 76b4dacdd..000000000
--- a/src/finn/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# -*- coding: utf-8 -*-
-from pkg_resources import get_distribution, DistributionNotFound
-
-try:
-    # Change here if project is renamed and does not equal the package name
-    dist_name = "FINN"
-    __version__ = get_distribution(dist_name).version
-except DistributionNotFound:
-    __version__ = "unknown"
-finally:
-    del get_distribution, DistributionNotFound
diff --git a/src/finn/analysis/__init__.py b/src/finn/analysis/__init__.py
deleted file mode 100644
index c3f810e6f..000000000
--- a/src/finn/analysis/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""
-How to write an analysis pass for FINN
---------------------------------------
-
-An analysis pass traverses the graph structure and produces information about
-certain properties. The convention is to take in a ModelWrapper, and return
-a dictionary of named properties that the analysis extracts.
-"""
diff --git a/src/finn/analysis/fpgadataflow/__init__.py b/src/finn/analysis/fpgadataflow/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/src/finn/analysis/topology.py b/src/finn/analysis/topology.py
deleted file mode 100644
index acdb8ed7f..000000000
--- a/src/finn/analysis/topology.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-
-
-def is_linear(model):
-    """Checks whether the given model graph is linear. This is done by looking
-    at the fan-out of each tensor. All tensors have a fan-out <= 1 in a linear
-    graph.
-
-    Returns {"is_linear": Bool}."""
-    per_tensor_fanouts = get_per_tensor_fanouts(model)
-    # check for tensors that have fanout > 1
-    multi_fanouts = list(filter(lambda x: x[1] > 1, per_tensor_fanouts.items()))
-    return {"is_linear": len(multi_fanouts) == 0}
-
-
-def get_per_tensor_fanouts(model):
-    """Returns a dictionary of {tensor_name: tensor_fanout} for the model."""
-    # make execution context to get a list of tensors
-    per_tensor_fanouts = model.make_empty_exec_context()
-    # replace every tensor with its fanout
-    for tensor_name in per_tensor_fanouts.keys():
-        per_tensor_fanouts[tensor_name] = model.get_tensor_fanout(tensor_name)
-    return per_tensor_fanouts
-
-
-def all_tensors_f32(model):
-    """Checks whether all tensors have a float32 dtype, extra quantization
-    annotations notwithstanding.
-
-    Returns {"all_tensors_f32": Bool}."""
-    all_tensors = model.make_empty_exec_context().items()
-    non_f32_tensors = filter(lambda x: x[1].dtype != np.float32, all_tensors)
-    return {"all_tensors_f32": len(list(non_f32_tensors)) == 0}
-
-
-def node_inputs_in_expected_order(model):
-    """Verifies that the node inputs are ordered in the way that FINN expects
-    them. When a node has a mixture of static (= constant, initialized) inputs
-    and dynamic inputs, the dynamic input should come first, followed by the
-    static one. Only verifiable for a small subset of op_types for now.
-
-    Returns {"node_inputs_in_expected_order": Bool}."""
-    op_types = ["MatMul", "Conv", "Add", "Mul"]
-    nodes = filter(lambda x: x.op_type in op_types, model.graph.node)
-    all_OK = True
-    for n in nodes:
-        all_OK = all_OK and len(list(n.input)) == 2
-        # input 0 should be dynamic, no initializer
-        all_OK = all_OK and (model.get_initializer(n.input[0]) is None)
-        # input 1 should be static (unless eltwise add)
-        if n.op_type != "Add":
-            all_OK = all_OK and (model.get_initializer(n.input[1]) is not None)
-    return {"node_inputs_in_expected_order": all_OK}
-
-
-def nodes_topologically_sorted(model):
-    """Verifies that graph.node is topologically sorted. This is required by the
-    ONNX specification.
-
-    Returns {"nodes_topologically_sorted": Bool}."""
-
-    # get successors of every node and check that
-    # successor index > current node index
-
-    all_OK = True
-    for n in model.graph.node:
-        successors = model.find_direct_successors(n)
-        if successors is not None:
-            for successor in successors:
-                # check the condition by checking the antithesis
-                index_n = model.get_node_index(n)
-                index_suc = model.get_node_index(successor)
-                if index_n > index_suc:
-                    all_OK = False
-
-    return {"nodes_topologically_sorted": all_OK}
diff --git a/src/finn/core/__init__.py b/src/finn/core/__init__.py
deleted file mode 100644
index 83c8e8bed..000000000
--- a/src/finn/core/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/finn/core/data_layout.py b/src/finn/core/data_layout.py
deleted file mode 100644
index 3971d2215..000000000
--- a/src/finn/core/data_layout.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# predefined lists of strings to have a cannonical way of expresing data layout
-# annotations
-
-NHWC = ["N", "H", "W", "C"]
-NCHW = ["N", "C", "H", "W"]
-NC = ["N", "C"]
-UNKNOWN = []
diff --git a/src/finn/core/datatype.py b/src/finn/core/datatype.py
deleted file mode 100644
index df895a1ad..000000000
--- a/src/finn/core/datatype.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-from enum import Enum, auto
-
-import numpy as np
-
-
-class DataType(Enum):
-    """Enum class that contains FINN data types to set the quantization annotation.
-    ONNX does not support data types smaller than 8-bit integers, whereas in FINN we are
-    interested in smaller integers down to ternary and bipolar.
-
-    Assignment of DataTypes to indices based on following ordering:
-
-    * unsigned to signed
-
-    * fewer to more bits
-
-    Currently supported DataTypes: """
-
-    # important: the get_smallest_possible() member function is dependent on ordering.
-    BINARY = auto()
-    UINT2 = auto()
-    UINT3 = auto()
-    UINT4 = auto()
-    UINT5 = auto()
-    UINT6 = auto()
-    UINT7 = auto()
-    UINT8 = auto()
-    UINT9 = auto()
-    UINT10 = auto()
-    UINT11 = auto()
-    UINT12 = auto()
-    UINT13 = auto()
-    UINT14 = auto()
-    UINT15 = auto()
-    UINT16 = auto()
-    UINT17 = auto()
-    UINT18 = auto()
-    UINT19 = auto()
-    UINT20 = auto()
-    UINT21 = auto()
-    UINT22 = auto()
-    UINT23 = auto()
-    UINT24 = auto()
-    UINT25 = auto()
-    UINT26 = auto()
-    UINT27 = auto()
-    UINT28 = auto()
-    UINT29 = auto()
-    UINT30 = auto()
-    UINT31 = auto()
-    UINT32 = auto()
-    UINT64 = auto()
-    BIPOLAR = auto()
-    TERNARY = auto()
-    INT2 = auto()
-    INT3 = auto()
-    INT4 = auto()
-    INT5 = auto()
-    INT6 = auto()
-    INT7 = auto()
-    INT8 = auto()
-    INT9 = auto()
-    INT10 = auto()
-    INT11 = auto()
-    INT12 = auto()
-    INT13 = auto()
-    INT14 = auto()
-    INT15 = auto()
-    INT16 = auto()
-    INT17 = auto()
-    INT18 = auto()
-    INT19 = auto()
-    INT20 = auto()
-    INT21 = auto()
-    INT22 = auto()
-    INT23 = auto()
-    INT24 = auto()
-    INT25 = auto()
-    INT26 = auto()
-    INT27 = auto()
-    INT28 = auto()
-    INT29 = auto()
-    INT30 = auto()
-    INT31 = auto()
-    INT32 = auto()
-    INT64 = auto()
-    FLOAT32 = auto()
-
-    def bitwidth(self):
-        """Returns the number of bits required for this DataType."""
-
-        if self.name.startswith("UINT"):
-            return int(self.name.strip("UINT"))
-        elif self.name.startswith("INT"):
-            return int(self.name.strip("INT"))
-        elif "FLOAT" in self.name:
-            return int(self.name.strip("FLOAT"))
-        elif self.name in ["BINARY", "BIPOLAR"]:
-            return 1
-        elif self.name == "TERNARY":
-            return 2
-        else:
-            raise Exception("Unrecognized data type: %s" % self.name)
-
-    def min(self):
-        """Returns the smallest possible value allowed by this DataType."""
-
-        if self.name.startswith("UINT") or self.name == "BINARY":
-            return 0
-        elif self.name.startswith("INT"):
-            return -(2 ** (self.bitwidth() - 1))
-        elif self.name == "FLOAT32":
-            return np.finfo(np.float32).min
-        elif self.name == "BIPOLAR":
-            return -1
-        elif self.name == "TERNARY":
-            return -1
-        else:
-            raise Exception("Unrecognized data type: %s" % self.name)
-
-    def max(self):
-        """Returns the largest possible value allowed by this DataType."""
-
-        if self.name.startswith("UINT"):
-            return (2 ** (self.bitwidth())) - 1
-        elif self.name == "BINARY":
-            return +1
-        elif self.name.startswith("INT"):
-            return (2 ** (self.bitwidth() - 1)) - 1
-        elif self.name == "FLOAT32":
-            return np.finfo(np.float32).max
-        elif self.name == "BIPOLAR":
-            return +1
-        elif self.name == "TERNARY":
-            return +1
-        else:
-            raise Exception("Unrecognized data type: %s" % self.name)
-
-    def allowed(self, value):
-        """Check whether given value is allowed for this DataType.
-
-        * value (float32): value to be checked"""
-
-        if "FLOAT" in self.name:
-            return True
-        elif "INT" in self.name:
-            return (
-                (self.min() <= value)
-                and (value <= self.max())
-                and float(value).is_integer()
-            )
-        elif self.name == "BINARY":
-            return value in [0, 1]
-        elif self.name == "BIPOLAR":
-            return value in [-1, +1]
-        elif self.name == "TERNARY":
-            return value in [-1, 0, +1]
-        else:
-            raise Exception("Unrecognized data type: %s" % self.name)
-
-    def get_num_possible_values(self):
-        """Returns the number of possible values this DataType can take. Only
-        implemented for integer types for now."""
-        assert self.is_integer(), """This function only works for integers for now,
-        not for the DataType you used this function with."""
-        if "INT" in self.name:
-            return abs(self.min()) + abs(self.max()) + 1
-        elif self.name == "BINARY" or self.name == "BIPOLAR":
-            return 2
-        elif self.name == "TERNARY":
-            return 3
-
-    def get_smallest_possible(value):
-        """Returns smallest (fewest bits) possible DataType that can represent
-      value. Prefers unsigned integers where possible."""
-        if not int(value) == value:
-            return DataType["FLOAT32"]
-        for k in DataType.__members__:
-            dt = DataType[k]
-            if (dt.min() <= value) and (value <= dt.max()):
-                return dt
-
-    def signed(self):
-        """Returns whether this DataType can represent negative numbers."""
-        return self.min() < 0
-
-    def is_integer(self):
-        """Returns whether this DataType represents integer values only."""
-        # only FLOAT32 is noninteger for now
-        return self != DataType.FLOAT32
-
-    def get_hls_datatype_str(self):
-        """Returns the corresponding Vivado HLS datatype name."""
-        if self.is_integer():
-            if self.signed():
-                return "ap_int<%d>" % self.bitwidth()
-            else:
-                return "ap_uint<%d>" % self.bitwidth()
-        else:
-            return "float"
diff --git a/src/finn/core/execute_custom_node.py b/src/finn/core/execute_custom_node.py
deleted file mode 100644
index 86f7114a7..000000000
--- a/src/finn/core/execute_custom_node.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.custom_op.registry as registry
-
-
-def execute_custom_node(node, context, graph):
-    """Call custom implementation to execute a single custom node.
-    Input/output provided via context."""
-    op_type = node.op_type
-    try:
-        # lookup op_type in registry of CustomOps
-        inst = registry.custom_op[op_type](node)
-        inst.execute_node(context, graph)
-    except KeyError:
-        # exception if op_type is not supported
-        raise Exception("Custom op_type %s is currently not supported." % op_type)
diff --git a/src/finn/core/modelwrapper.py b/src/finn/core/modelwrapper.py
deleted file mode 100644
index 11fda3da5..000000000
--- a/src/finn/core/modelwrapper.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import copy
-import os
-import onnx
-import onnx.helper as oh
-import onnx.numpy_helper as np_helper
-from onnx import TensorProto
-
-import finn.util.basic as util
-import finn.util.onnx as onnxutil
-from finn.core.datatype import DataType
-from finn.transformation.general import (
-    RemoveUnusedTensors,
-    RemoveStaticGraphInputs,
-    SortGraph,
-)
-from finn.transformation.double_to_single_float import DoubleToSingleFloat
-
-
-class ModelWrapper:
-    """A wrapper around ONNX ModelProto that exposes some useful utility
-    functions for graph manipulation and exploration."""
-
-    def __init__(self, onnx_model_proto, make_deepcopy=False):
-        """Creates a ModelWrapper instance.
-        onnx_model_proto can be either a ModelProto instance, or a string
-        with the path to a stored .onnx file on disk, or serialized bytes.
-
-        - make_deepcopy : controls whether a deep copy of the ModelProto
-        is made internally.
-        """
-        if isinstance(onnx_model_proto, str):
-            assert os.path.isfile(onnx_model_proto)
-            self._model_proto = onnx.load(onnx_model_proto)
-        elif isinstance(onnx_model_proto, bytes):
-            self._model_proto = onnx.load_from_string(onnx_model_proto)
-        else:
-            if make_deepcopy:
-                self._model_proto = copy.deepcopy(onnx_model_proto)
-            else:
-                self._model_proto = onnx_model_proto
-
-    @property
-    def graph(self):
-        """Returns the graph of the model."""
-        return self._model_proto.graph
-
-    @graph.setter
-    def graph(self, value):
-        """Sets the graph of the model according to value"""
-        self._model_proto.graph = value
-
-    @property
-    def model(self):
-        """Returns the model."""
-        return self._model_proto
-
-    @model.setter
-    def model(self, value):
-        """Sets the model according to value."""
-        self._model_proto = value
-
-    def save(self, filename):
-        """Saves the wrapper ONNX ModelProto into a file with given name."""
-        onnx.save(self._model_proto, filename)
-
-    def analysis(self, analysis_fxn):
-        """Runs given anaylsis_fxn on this model and return resulting dict."""
-        return analysis_fxn(self)
-
-    def transform(
-        self, transformation, make_deepcopy=True, cleanup=True, fix_float64=True
-    ):
-        """Applies given Transformation repeatedly until no more changes can be made
-        and returns a transformed ModelWrapper instance.
-
-        - make_deepcopy : operates on a new (deep)copy of model.
-        - fix_float64 : DoubleToSingleFloat correction before starting
-        - cleanup : execute cleanup transformations before returning
-        """
-        transformed_model = self
-        if make_deepcopy:
-            transformed_model = copy.deepcopy(self)
-        if fix_float64:
-            (transformed_model, model_was_changed) = DoubleToSingleFloat().apply(
-                transformed_model
-            )
-        model_was_changed = True
-        while model_was_changed:
-            (transformed_model, model_was_changed) = transformation.apply(
-                transformed_model
-            )
-        if cleanup:
-            transformed_model.cleanup()
-        return transformed_model
-
-    def cleanup(self):
-        "Run cleanup transformations on the model."
-        transformed_model = self
-        cleanup_transforms = [
-            RemoveUnusedTensors(),
-            RemoveStaticGraphInputs(),
-            SortGraph(),
-        ]
-        for trn in cleanup_transforms:
-            transformed_model = transformed_model.transform(
-                trn, cleanup=False, make_deepcopy=False
-            )
-        return transformed_model
-
-    def check_compatibility(self):
-        """Checks this model for FINN compatibility:
-
-        * no embedded subgraphs
-
-        * all tensor shapes are specified, including activations
-
-        * all constants are initializers
-        """
-        # TODO check for no embedded subgraphs
-        # TODO check that all shapes are inferred
-        # TODO check that all constants are initializers
-        return True
-
-    def get_tensor_datatype(self, tensor_name):
-        """Returns the FINN DataType of tensor with given name."""
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret = util.get_by_name(
-                ret.quant_parameter_tensor_names, "finn_datatype", "key"
-            )
-            if ret is not None:
-                return DataType[ret.value]
-        # TODO maybe use native ONNX tensor type instead of assuming fp32?
-        return DataType["FLOAT32"]
-
-    def set_tensor_datatype(self, tensor_name, datatype):
-        """Sets the FINN DataType of tensor with given name."""
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret_dt = util.get_by_name(
-                ret.quant_parameter_tensor_names, "finn_datatype", "key"
-            )
-            if ret_dt is not None:
-                ret_dt.value = datatype.name
-            else:
-                dt = onnx.StringStringEntryProto()
-                dt.key = "finn_datatype"
-                dt.value = datatype.name
-                ret.quant_parameter_tensor_names.append(dt)
-        else:
-            qa = onnx.TensorAnnotation()
-            dt = onnx.StringStringEntryProto()
-            dt.key = "finn_datatype"
-            dt.value = datatype.name
-            qa.tensor_name = tensor_name
-            qa.quant_parameter_tensor_names.append(dt)
-            qnt_annotations.append(qa)
-
-    def get_tensor_valueinfo(self, tensor_name):
-        """Returns ValueInfoProto of tensor with given name, if it has one."""
-        graph = self._model_proto.graph
-        vi_names = [(x.name, x) for x in graph.input]
-        vi_names += [(x.name, x) for x in graph.output]
-        vi_names += [(x.name, x) for x in graph.value_info]
-        try:
-            vi_ind = [x[0] for x in vi_names].index(tensor_name)
-            vi = vi_names[vi_ind][1]
-            return vi
-        except ValueError:
-            return None
-
-    def get_tensor_shape(self, tensor_name):
-        """Returns the shape of tensor with given name, if it has ValueInfoProto."""
-        graph = self._model_proto.graph
-        vi_names = [(x.name, x) for x in graph.input]
-        vi_names += [(x.name, x) for x in graph.output]
-        vi_names += [(x.name, x) for x in graph.value_info]
-        try:
-            vi_ind = [x[0] for x in vi_names].index(tensor_name)
-            vi = vi_names[vi_ind][1]
-            dims = [x.dim_value for x in vi.type.tensor_type.shape.dim]
-            return dims
-        except ValueError:
-            return None
-
-    def set_tensor_shape(self, tensor_name, tensor_shape, dtype=TensorProto.FLOAT):
-        """Assigns shape in ValueInfoProto for tensor with given name."""
-        new_vi = oh.make_tensor_value_info(tensor_name, dtype, tensor_shape)
-        # find what container tis tensor's ValueInfo lives in
-        # if not found anywhere, we assume it's a new value_info
-        target_container = self.graph.value_info
-        if util.get_by_name(self.graph.input, tensor_name) is not None:
-            target_container = self.graph.input
-        if util.get_by_name(self.graph.output, tensor_name) is not None:
-            target_container = self.graph.output
-        # remove from target container and add new
-        util.remove_by_name(target_container, tensor_name)
-        target_container.append(new_vi)
-
-    def set_initializer(self, tensor_name, tensor_value):
-        """Sets the initializer value for tensor with given name."""
-        graph = self._model_proto.graph
-        # convert tensor_value (numpy array) into TensorProto w/ correct name
-        tensor_init_proto = np_helper.from_array(tensor_value)
-        tensor_init_proto.name = tensor_name
-        # first, remove if an initializer already exists
-        init_names = [x.name for x in graph.initializer]
-        try:
-            init_ind = init_names.index(tensor_name)
-            init_old = graph.initializer[init_ind]
-            graph.initializer.remove(init_old)
-        except ValueError:
-            pass
-        # create and insert new initializer
-        graph.initializer.append(tensor_init_proto)
-        # set shape
-        dtype = tensor_init_proto.data_type
-        self.set_tensor_shape(tensor_name, list(tensor_value.shape), dtype)
-
-    def rename_tensor(self, old_name, new_name):
-        """Renames a tensor from old_name to new_name."""
-        graph = self.graph
-        # sweep over inputs
-        if util.get_by_name(graph.input, old_name) is not None:
-            util.get_by_name(graph.input, old_name).name = new_name
-        # sweep over outputs
-        if util.get_by_name(graph.output, old_name) is not None:
-            util.get_by_name(graph.output, old_name).name = new_name
-        # sweep over value_info
-        if util.get_by_name(graph.value_info, old_name) is not None:
-            util.get_by_name(graph.value_info, old_name).name = new_name
-        # sweep over initializers
-        if util.get_by_name(graph.initializer, old_name) is not None:
-            util.get_by_name(graph.initializer, old_name).name = new_name
-        # sweep over quantization annotations
-        if (
-            util.get_by_name(graph.quantization_annotation, old_name, "tensor_name")
-            is not None
-        ):
-            util.get_by_name(
-                graph.quantization_annotation, old_name, "tensor_name"
-            ).tensor_name = new_name
-        # sweep over node i/o
-        for n in graph.node:
-            if old_name in n.input:
-                n.input[list(n.input).index(old_name)] = new_name
-            if old_name in n.output:
-                n.output[list(n.output).index(old_name)] = new_name
-
-    def get_initializer(self, tensor_name):
-        """Gets the initializer value for tensor with given name, if any."""
-        graph = self._model_proto.graph
-        init_names = [x.name for x in graph.initializer]
-        try:
-            init_ind = init_names.index(tensor_name)
-            return np_helper.to_array(graph.initializer[init_ind])
-        except ValueError:
-            return None
-
-    def find_producer(self, tensor_name):
-        """Finds and returns the node that produces the tensor with given name."""
-        for x in self._model_proto.graph.node:
-            if tensor_name in x.output:
-                return x
-        return None
-
-    def find_upstream(self, tensor_name, finder_fxn):
-        """Follow the producer chain upstream, calling finder_fxn on each upstream
-        node until it returns True or there are no nodes left. Returns the list
-        of nodes visited, or None if finder_fxn did not return True."""
-        visit_list = []
-        current_tensor = tensor_name
-        while True:
-            current_producer = self.find_producer(current_tensor)
-            if current_producer is None:
-                return []
-            else:
-                found = finder_fxn(current_producer)
-                visit_list.append(current_producer)
-                if found:
-                    return visit_list
-                else:
-                    current_tensor = current_producer.input[0]
-
-    def find_consumer(self, tensor_name):
-        """Finds and returns the node that consumes the tensor with given name.
-        Currently only works for linear graphs."""
-        all_inputs = [x.input[0] for x in self._model_proto.graph.node]
-        try:
-            consumer_ind = all_inputs.index(tensor_name)
-            return self._model_proto.graph.node[consumer_ind]
-        except ValueError:
-            return None
-
-    def find_consumers(self, tensor_name):
-        """Finds and returns a list of the nodes that consume tensor with
-        given name."""
-        consumers = []
-        for n in self._model_proto.graph.node:
-            for inp_tensor in n.input:
-                if inp_tensor == tensor_name:
-                    consumers.append(n)
-        if consumers != []:
-            return consumers
-        else:
-            return None
-
-    def find_direct_successors(self, node):
-        """Finds and returns a list of the nodes that are successors of
-        given node."""
-        successors = []
-        for outp_tensor in node.output:
-            tensor_consumer_list = self.find_consumers(outp_tensor)
-            if tensor_consumer_list is not None:
-                for consumer in tensor_consumer_list:
-                    successors.append(consumer)
-        if successors != []:
-            return successors
-        else:
-            return None
-
-    def find_direct_predecessors(self, node):
-        """Finds and returns a list of the nodes that are predecessors of
-        given node."""
-        predecessors = []
-        for inp_tensor in node.input:
-            producer = self.find_producer(inp_tensor)
-            if producer is not None:
-                predecessors.append(producer)
-        if predecessors != []:
-            return predecessors
-        else:
-            return None
-
-    def is_fork_node(self, node):
-        """Checks if the given node is a fork, that is, the node has multiple
-        direct successors"""
-        direct_successors = self.find_direct_successors(node)
-        is_fork = False if direct_successors is None else (len(direct_successors) > 1)
-        return is_fork
-
-    def is_join_node(self, node):
-        """Checks if the given node is a join, that is, the node has multiple
-        direct predecessors"""
-        direct_predecessors = self.find_direct_predecessors(node)
-        is_join = (
-            False if direct_predecessors is None else (len(direct_predecessors) > 1)
-        )
-        return is_join
-
-    def get_all_tensor_names(self):
-        """Returns a list of all (input, output and value_info) tensor names
-        in the graph."""
-        graph = self.graph
-        names = [x.name for x in graph.value_info]
-        names += [x.name for x in graph.input]
-        names += [x.name for x in graph.output]
-        return names
-
-    def make_new_valueinfo_name(self):
-        """Returns a name that can be used for a new value_info."""
-        names = self.get_all_tensor_names()
-        candidate = util.random_string()
-        while candidate in names:
-            candidate = util.random_string()
-        return candidate
-
-    def make_empty_exec_context(self):
-        """Creates an empty execution context for this model.
-
-        The execution context is a dictionary of all tensors used for the
-        inference computation. Any initializer values will be taken into
-        account, all other tensors will be zero."""
-        execution_context = dict()
-        graph = self._model_proto.graph
-        # make empty tensors for all the graph inputs and outputs
-        for vi in graph.input:
-            new_tensor = onnxutil.valueinfo_to_tensor(vi)
-            execution_context[vi.name] = new_tensor
-        for vi in graph.output:
-            new_tensor = onnxutil.valueinfo_to_tensor(vi)
-            execution_context[vi.name] = new_tensor
-        # make empty tensors for all intermediate buffers
-        for vi in graph.value_info:
-            new_tensor = onnxutil.valueinfo_to_tensor(vi)
-            execution_context[vi.name] = new_tensor
-        # fill in the constants provided by the initializers (TensorProto to npy)
-        for t in graph.initializer:
-            execution_context[t.name] = np_helper.to_array(t)
-        return execution_context
-
-    def check_all_tensor_shapes_specified(self):
-        """Checks whether all tensors have a specified shape (ValueInfo).
-        The ONNX standard allows for intermediate activations to have no
-        associated ValueInfo, but FINN expects this."""
-        graph = self._model_proto.graph
-        ret = True
-        for n in graph.node:
-            for i in n.input:
-                ret = ret and (self.get_tensor_shape(i) is not None)
-            for o in n.output:
-                ret = ret and (self.get_tensor_shape(o) is not None)
-        return ret
-
-    def get_tensor_fanout(self, tensor_name):
-        """Returns the number of nodes for which the tensor with given name is
-        as input."""
-        graph = self.graph
-        fanout = 0
-        for n in graph.node:
-            if tensor_name in n.input:
-                fanout += 1
-        return fanout
-
-    def get_metadata_prop(self, key):
-        """Returns the value associated with metadata_prop with given key,
-        or None otherwise."""
-        metadata_prop = util.get_by_name(self.model.metadata_props, key, "key")
-        if metadata_prop is None:
-            return None
-        else:
-            return metadata_prop.value
-
-    def set_metadata_prop(self, key, value):
-        """Sets metadata property with given key to the given value."""
-        metadata_prop = util.get_by_name(self.model.metadata_props, key, "key")
-        if metadata_prop is None:
-            metadata_prop = onnx.StringStringEntryProto()
-            metadata_prop.key = key
-            metadata_prop.value = value
-            self.model.metadata_props.append(metadata_prop)
-        else:
-            metadata_prop.value = value
-            
-    def get_nodes_by_name(self, op_name):
-        """Returns a list of nodes with specified name."""
-        return list(filter(lambda x: x.name == op_name, self.graph.node))
-    
-    def get_nodes_by_op_type(self, op_type):
-        """Returns a list of nodes with specified op_type."""
-        return list(filter(lambda x: x.op_type == op_type, self.graph.node))
-
-    def get_finn_nodes(self):
-        """Returns a list of nodes where domain == 'finn'."""
-        return list(filter(lambda x: x.domain == "finn", self.graph.node))
-
-    def get_non_finn_nodes(self):
-        """Returns a list of nodes where domain != 'finn'."""
-        return list(filter(lambda x: x.domain != "finn", self.graph.node))
-
-    def get_node_index(self, node):
-        """Returns current index of given node."""
-        n_ind = 0
-        try:
-            for n in self.graph.node:
-                if n == node:
-                    return n_ind
-                n_ind += 1
-        except ValueError:
-            return None
-
-    def get_tensor_layout(self, tensor_name):
-        """Returns the data layout annotation of tensor with given name.
-        The data layout is expressed as a list of strings with as many
-        elements as the number of dimensions in the tensor shape. Each
-        string annotates what is contained in that dimension. If there is no
-        data layout annotation, None will be returned.
-        Examples of data layout annotations:
-        ["N", "C"] is tensor[batch][channel]
-        ["N", "C", "H", "W"] is tensor[batch][channel][height][width]
-        ["N", "H", "W", "C"] is tensor[batch][height][width][channel]
-        """
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret = util.get_by_name(
-                ret.quant_parameter_tensor_names, "tensor_layout", "key"
-            )
-            if ret is not None:
-                return eval(ret.value)
-        return None
-
-    def set_tensor_layout(self, tensor_name, data_layout):
-        """Sets the data layout annotation of tensor with given name. See
-        get_tensor_layout for examples."""
-        tensor_shape = self.get_tensor_shape(tensor_name)
-        assert type(data_layout) == list, "data_layout must be a list"
-        if tensor_shape is not None:
-            assert len(tensor_shape) == len(
-                data_layout
-            ), """Mismatch between number
-            of dimensions of tensor shape and data layout annotation."""
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret_tl = util.get_by_name(
-                ret.quant_parameter_tensor_names, "tensor_layout", "key"
-            )
-            if ret_tl is not None:
-                ret_tl.value = str(data_layout)
-            else:
-                tl = onnx.StringStringEntryProto()
-                tl.key = "tensor_layout"
-                tl.value = str(data_layout)
-                ret.quant_parameter_tensor_names.append(tl)
-        else:
-            qa = onnx.TensorAnnotation()
-            dt = onnx.StringStringEntryProto()
-            dt.key = "tensor_layout"
-            dt.value = str(data_layout)
-            qa.tensor_name = tensor_name
-            qa.quant_parameter_tensor_names.append(dt)
-            qnt_annotations.append(qa)
-
-    def get_tensor_sparsity(self, tensor_name):
-        """Returns the sparsity of a given tensor as dictionary."""
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret = util.get_by_name(
-                ret.quant_parameter_tensor_names, "tensor_sparsity", "key"
-            )
-            if ret is not None:
-                return eval(ret.value)
-        return None
-
-    def set_tensor_sparsity(self, tensor_name, sparsity_dict):
-        """Sets the sparsity annotation of a tensor with given name."""
-        graph = self._model_proto.graph
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, tensor_name, "tensor_name")
-        if ret is not None:
-            ret_ts = util.get_by_name(
-                ret.quant_parameter_tensor_names, "tensor_sparsity", "key"
-            )
-            if ret_ts is not None:
-                ret_ts.value = str(sparsity_dict)
-            else:
-                ts = onnx.StringStringEntryProto()
-                ts.key = "tensor_sparsity"
-                ts.value = str(sparsity_dict)
-                ret.quant_parameter_tensor_names.append(ts)
-        else:
-            qa = onnx.TensorAnnotation()
-            dt = onnx.StringStringEntryProto()
-            dt.key = "tensor_sparsity"
-            dt.value = str(sparsity_dict)
-            qa.tensor_name = tensor_name
-            qa.quant_parameter_tensor_names.append(dt)
-            qnt_annotations.append(qa)
diff --git a/src/finn/core/onnx_exec.py b/src/finn/core/onnx_exec.py
deleted file mode 100644
index 85b52c0f3..000000000
--- a/src/finn/core/onnx_exec.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import copy
-
-import numpy as np
-import onnx.helper as helper
-import onnxruntime as rt
-
-import finn.core.execute_custom_node as ex_cu_node
-from finn.core.modelwrapper import ModelWrapper
-from finn.core.remote_exec import remote_exec
-from finn.core.rtlsim_exec import rtlsim_exec
-from finn.custom_op.registry import getCustomOp
-import finn.analysis.topology as ta
-from finn.util.basic import sanitize_quant_values, get_sanitize_quant_tensors
-
-
-def execute_node(node, context, graph, return_full_exec_context=False):
-    """Executes a single node by using onnxruntime, with custom function or
-    if dataflow partition by using remote execution or rtlsim.
-
-    Input/output provided via context."""
-
-    if node.op_type == "StreamingDataflowPartition":
-        sdp_node = getCustomOp(node)
-        model = ModelWrapper(sdp_node.get_nodeattr("model"))
-        inp_ctx = dict(filter(lambda x: x[0] in node.input, context.items()))
-        # input may have been renamed in partition
-        assert len(inp_ctx) == 1
-        old_iname = node.input[0]
-        new_iname = model.graph.input[0].name
-        if old_iname != new_iname:
-            inp_ctx[new_iname] = inp_ctx[old_iname]
-            del inp_ctx[old_iname]
-        ret = execute_onnx(model, inp_ctx, return_full_exec_context)
-        # if the model was in ip-stitched rtlsim mode, may get annotation
-        # for numbet of elapsed cycles, save again
-        if model.get_metadata_prop("exec_mode") == "rtlsim":
-            model.save(sdp_node.get_nodeattr("model"))
-        # output may have been renamed in partition
-        assert len(model.graph.output) == 1
-        node_oname = node.output[0]
-        model_oname = model.graph.output[0].name
-        context[node_oname] = ret[model_oname]
-        # prefix and insert exec context entries
-        if return_full_exec_context:
-            for tname in ret.keys():
-                if tname != model_oname:
-                    context[node.name + "_" + tname] = ret[tname]
-    else:
-        if node.domain == "finn":
-
-            ex_cu_node.execute_custom_node(node, context, graph)
-
-        else:
-
-            # onnxruntime unfortunately does not implement run_node as defined by ONNX,
-            # it can only execute entire models -- so we create a model which solely
-            # consists of our current node.
-            # note: ensure that the same ValueInfo does not appear both in
-            # graph.value_info as well as graph.output or graph.input
-            # nodes with multiple outputs that are a mix of value_info and
-            # input/outputs may get them reordered below
-            node_inputs = list(filter(lambda x: x.name in node.input, graph.input))
-            node_inputs += list(
-                filter(lambda x: x.name in node.input, graph.value_info)
-            )
-            node_outputs = list(filter(lambda x: x.name in node.output, graph.output))
-            node_outputs += list(
-                filter(lambda x: x.name in node.output, graph.value_info)
-            )
-            node_graph = helper.make_graph(
-                nodes=[node],
-                name="single-node-exec",
-                inputs=node_inputs,
-                outputs=node_outputs,
-            )
-            node_model = helper.make_model(node_graph)
-            input_dict = dict()
-            for inp in node.input:
-                input_dict[inp] = context[inp]
-
-            sess = rt.InferenceSession(node_model.SerializeToString())
-            output_list = sess.run(None, input_dict)
-
-            for output_ind in range(len(node.output)):
-                # get the name of the target buffer from node.output
-                outp = node.output[output_ind]
-
-                # retrieve the index of that name in node_outputs
-                for i in range(len(node_outputs)):
-                    if outp == node_outputs[i].name:
-                        list_ind = i
-
-                # use that index to index output_list
-                if output_list[list_ind].shape != context[outp].shape:
-                    raise Exception(
-                        """Output shapes disagree after node execution:
-                        found %s vs expected %s"""
-                        % (str(output_list[list_ind].shape), str(context[outp].shape))
-                    )
-                context[outp] = output_list[list_ind]
-
-
-def execute_onnx(
-    model, input_dict, return_full_exec_context=False, start_node=None, end_node=None
-):
-    """Executes given ONNX ModelWrapper with given named inputs.
-
-    If return_full_exec_context is False, a dict of named outputs is returned
-    as indicated by the model.graph.output.
-
-    If return return_full_exec_context is True, the full set of tensors used by
-    the execution (including inputs, weights, activations and final outputs)
-    will be returned as a dict.
-
-    When start_node and end_node are set to None, the whole graph is executed.
-    If they are set to particular ONNX nodes, only the subgraph between (and
-    including) those nodes is executed.
-    """
-
-    if not model.check_all_tensor_shapes_specified():
-        raise Exception("Found unspecified tensor shapes, try infer_shapes")
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert (
-        ret["nodes_topologically_sorted"] is True
-    ), """Nodes must be
-    topologically sorted."""
-
-    graph = model.graph
-    # first, we need to make sure that every variable required by the graph has
-    # some buffer associated with it. this includes graph inputs (which includes
-    # the input data as well as the trained parameters) and the graph ValueInfo
-    # (intermediate tensors between layers)
-    # this is provided by the execution_context, which is a dict of np.ndarray
-    execution_context = model.make_empty_exec_context()
-    # fill in any inputs provided to this function
-    for inp_name in input_dict.keys():
-        if inp_name in execution_context:
-            if execution_context[inp_name].shape == input_dict[inp_name].shape:
-                execution_context[inp_name] = input_dict[inp_name]
-            else:
-                raise Exception(
-                    "Shape mismatch for provided input %s: found %s expected %s "
-                    % (
-                        inp_name,
-                        str(execution_context[inp_name].shape),
-                        str(input_dict[inp_name].shape),
-                    )
-                )
-        # else:
-        # raise Exception("Provided input not found in graph context: %s" % inp_name)
-
-    # check if model has an execution mode set
-    # if None, execute model node by node using execute_node()
-    # if set to "remote_pynq" execute model on PYNQ board
-    # if set to "rtlsim" execute model using pyverilator
-    model_exec_mode = model.get_metadata_prop("exec_mode")
-    if (model_exec_mode is None) or (model_exec_mode == ""):
-        # execute the model node by node
-        # we can simply walk down the list since the ONNX spec guarantees that it is
-        # topologically sorted
-        subgraph = []
-        if start_node is None:
-            start_node = model.graph.node[0]
-        if end_node is None:
-            end_node = model.graph.node[-1]
-        # select the nodes between specified start/end nodes
-        start_ind = model.get_node_index(start_node)
-        end_ind = model.get_node_index(end_node) + 1
-        assert end_ind >= start_ind, "Start/end nodes must define valid subgraph"
-        subgraph = graph.node[start_ind:end_ind]
-        for node in subgraph:
-            if get_sanitize_quant_tensors() != 0:
-                # round input values to match quantization annotation
-                execution_context = sanitize_quant_values(
-                    model, node.input, execution_context
-                )
-            execute_node(node, execution_context, graph, return_full_exec_context)
-            if get_sanitize_quant_tensors() != 0:
-                # round output values to quantization annotation
-                execution_context = sanitize_quant_values(
-                    model, node.output, execution_context
-                )
-    elif model_exec_mode == "remote_pynq":
-        # use remote exec metadata built into model to execute on a remote PYNQ
-        remote_exec(model, execution_context)
-    elif model_exec_mode == "rtlsim":
-        # use stitched IP for rtlsim
-        rtlsim_exec(model, execution_context)
-    else:
-        raise Exception(
-            """Metadata property "exec_mode" is set to an unknown value.
-        Can be left unset or has to be set to "remote_pynq" for remote execution
-        on PYNQ board or "rtlsim" for execution using pyverilator!"""
-        )
-
-    if return_full_exec_context:
-        return execution_context
-    else:
-        # provide outputs as dict
-        output_dict = dict()
-        for out_tensor in graph.output:
-            out_name = out_tensor.name
-            output_dict[out_name] = execution_context[out_name]
-        return output_dict
-
-
-def execute_onnx_and_make_model(model, input_dict):
-    """Executes given ONNX ModelWrapper with given named inputs and return a new
-    ModelWrapper where an initializer is provided for each tensor as taken from
-    the execution. This new model is useful for debugging, since it contains
-    all the intermediate activation values."""
-
-    # retrieve the full execution context
-    execution_context = execute_onnx(model, input_dict, True)
-    new_model = copy.deepcopy(model)
-    # create value_info entries and initializers for everything
-    for i in execution_context.keys():
-        new_model.set_initializer(i, execution_context[i])
-    for vi in new_model.graph.value_info:
-        new_model.graph.output.append(vi)
-    # import pdb; pdb.set_trace()
-    return new_model
-
-
-def compare_execution(
-    model_a,
-    model_b,
-    input_dict,
-    compare_fxn=lambda x, y: np.isclose(x, y, atol=1e-3).all(),
-):
-    """Executes two ONNX models and compare their outputs using given function.
-
-    compare_fxn should take in two tensors and return a Boolean"""
-    # compare values from first output tensors produced
-    res_a = list(execute_onnx(model_a, input_dict).items())[0][1]
-    res_b = list(execute_onnx(model_b, input_dict).items())[0][1]
-    return compare_fxn(res_a, res_b)
diff --git a/src/finn/core/remote_exec.py b/src/finn/core/remote_exec.py
deleted file mode 100644
index 2e139065e..000000000
--- a/src/finn/core/remote_exec.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import subprocess
-import warnings
-import numpy as np
-
-
-def remote_exec(model, execution_context):
-    """Executes the given model remotely on the pynq board. The metadata properties
-    related to the pynq board have to be set. The execution context contains the
-    input values."""
-    # TODO fix for multi input-output
-    pynq_ip = model.get_metadata_prop("pynq_ip")
-    pynq_port = int(model.get_metadata_prop("pynq_port"))
-    pynq_username = model.get_metadata_prop("pynq_username")
-    pynq_password = model.get_metadata_prop("pynq_password")
-    pynq_target_dir = model.get_metadata_prop("pynq_target_dir")
-    deployment_dir = model.get_metadata_prop("pynq_deploy_dir")
-    platform = model.get_metadata_prop("platform")
-    assert platform in ["alveo", "zynq-iodma"]
-    bitfile = model.get_metadata_prop("bitfile")
-    bitfile = os.path.basename(bitfile)
-    if pynq_password == "":
-        if "zynq" in platform:
-            raise Exception("PYNQ board remote exec needs password for sudo")
-        else:
-            local_prefix = ""  # assume we are using an ssh key
-            warnings.warn("Empty password, make sure you've set up an ssh key")
-    else:
-        local_prefix = "sshpass -p %s " % pynq_password
-
-    if platform == "alveo":
-        # Alveo can run without sudo
-        remote_prefix = ""
-    elif "zynq" in platform:
-        # PYNQ Zynq boards need to execute with sudo
-        remote_prefix = "echo %s | sudo -S " % pynq_password
-
-    inp = execution_context[model.graph.input[0].name]
-    # make copy of array before saving it
-    inp = inp.copy()
-    batchsize = inp.shape[0]
-    np.save(os.path.join(deployment_dir, "input.npy"), inp)
-    # extracting last folder of absolute path (deployment_dir)
-    deployment_folder = os.path.basename(os.path.normpath(deployment_dir))
-    # copy input to PYNQ board
-    cmd = local_prefix + "scp -P{} -r {}/input.npy {}@{}:{}/{}".format(
-        pynq_port,
-        deployment_dir,
-        pynq_username,
-        pynq_ip,
-        pynq_target_dir,
-        deployment_folder,
-    )
-    bash_command = ["/bin/bash", "-c", cmd]
-    process_scp_in = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-    process_scp_in.communicate()
-
-    # use platform attribute for correct remote execution
-    if platform == "alveo":
-        remote_cmd = "bash -ic 'bash alveo_run.sh execute %d' \"" % batchsize
-    else:
-        remote_cmd = (
-            "python3.6 driver.py --exec_mode=execute --batchsize={} "
-            "--bitfile={} --inputfile=input.npy --outputfile=output.npy "
-            '--platform={} "'
-        ).format(batchsize, bitfile, platform)
-    cmd = (
-        local_prefix + 'ssh {}@{} -p {} "cd {}/{}; ' + remote_prefix + remote_cmd
-    ).format(pynq_username, pynq_ip, pynq_port, pynq_target_dir, deployment_folder)
-    bash_command = ["/bin/bash", "-c", cmd]
-    process_exec_accel = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-    process_exec_accel.communicate()
-    # remove stale output file from local dir, if any
-    try:
-        os.remove("{}/output.npy".format(deployment_dir))
-    except FileNotFoundError:
-        pass
-    # copy generated output to local
-    cmd = local_prefix + "scp -P{} {}@{}:{}/{}/output.npy {}".format(
-        pynq_port,
-        pynq_username,
-        pynq_ip,
-        pynq_target_dir,
-        deployment_folder,
-        deployment_dir,
-    )
-    bash_command = ["/bin/bash", "-c", cmd]
-    process_scp_out = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-    process_scp_out.communicate()
-    outp = np.load("{}/output.npy".format(deployment_dir))
-    execution_context[model.graph.output[0].name] = outp
diff --git a/src/finn/core/rtlsim_exec.py b/src/finn/core/rtlsim_exec.py
deleted file mode 100644
index d83bcd3a7..000000000
--- a/src/finn/core/rtlsim_exec.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-
-from finn.custom_op.registry import getCustomOp
-from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
-from finn.util.fpgadataflow import (
-    pyverilate_get_liveness_threshold_cycles,
-    pyverilate_stitched_ip,
-)
-
-try:
-    from pyverilator import PyVerilator
-except ModuleNotFoundError:
-    PyVerilator = None
-
-
-def rtlsim_exec(model, execution_context):
-    """Use PyVerilator to execute given model with stitched IP. The execution
-    context contains the input values."""
-
-    if PyVerilator is None:
-        raise ImportError("Installation of PyVerilator is required.")
-    # ensure stitched ip project already exists
-    assert os.path.isfile(
-        model.get_metadata_prop("wrapper_filename")
-    ), """The
-    file name from metadata property "wrapper_filename" doesn't exist."""
-    assert os.path.isdir(
-        model.get_metadata_prop("vivado_stitch_proj")
-    ), """The
-    directory from metadata property "vivado_stitch_proj" doesn't exist"""
-    trace_file = model.get_metadata_prop("rtlsim_trace")
-    # extract input shape
-    # TODO extend for multiple inputs
-    i_name = model.graph.input[0].name
-    i_tensor = execution_context[i_name]
-    i_dt = model.get_tensor_datatype(i_name)
-    first_node = getCustomOp(model.find_consumer(i_name))
-    i_stream_w = first_node.get_instream_width()
-    # convert input into time multiplexed shape
-    i_folded_shape = first_node.get_folded_input_shape()
-    batchsize = i_tensor.shape[0]
-    # override batch size for input
-    i_folded_shape = list(i_folded_shape)
-    i_folded_shape[0] = batchsize
-    i_folded_shape = tuple(i_folded_shape)
-    # TODO any other layout transformations need to happen here!
-    i_tensor = i_tensor.reshape(i_folded_shape)
-    # extract output shape
-    o_name = model.graph.output[0].name
-    o_shape = model.get_tensor_shape(o_name)
-    o_dt = model.get_tensor_datatype(o_name)
-    last_node = getCustomOp(model.find_producer(o_name))
-    o_folded_shape = last_node.get_folded_output_shape()
-    # override batch size from actual input
-    o_shape = list(o_shape)
-    o_shape[0] = batchsize
-    o_shape = tuple(o_shape)
-    o_folded_shape = list(o_folded_shape)
-    o_folded_shape[0] = batchsize
-    o_folded_shape = tuple(o_folded_shape)
-    o_stream_w = last_node.get_outstream_width()
-    packedBits = o_stream_w
-    targetBits = o_dt.bitwidth()
-    # pack input
-    packed_input = npy_to_rtlsim_input(i_tensor, i_dt, i_stream_w)
-    num_out_values = last_node.get_number_output_values()
-    num_out_values *= batchsize
-    # prepare pyverilator model
-    rtlsim_so = model.get_metadata_prop("rtlsim_so")
-    if (rtlsim_so is None) or (not os.path.isfile(rtlsim_so)):
-        sim = pyverilate_stitched_ip(model)
-        model.set_metadata_prop("rtlsim_so", sim.lib._name)
-    else:
-        sim = PyVerilator(rtlsim_so, auto_eval=False)
-    ret = _run_rtlsim(sim, packed_input, num_out_values, trace_file)
-    packed_output = ret[0]
-    model.set_metadata_prop("cycles_rtlsim", str(ret[1]))
-    # unpack output and put into context
-    o_folded_tensor = rtlsim_output_to_npy(
-        packed_output, None, o_dt, o_folded_shape, packedBits, targetBits
-    )
-    execution_context[o_name] = o_folded_tensor.reshape(o_shape)
-
-
-# TODO move the rtlsim functions below into a common location such as utils
-def _reset_rtlsim(sim):
-    """Sets reset input in pyverilator to zero, toggles the clock and set it
-    back to one"""
-    sim.io.ap_rst_n = 0
-    _toggle_clk(sim)
-    _toggle_clk(sim)
-    sim.io.ap_rst_n = 1
-    _toggle_clk(sim)
-    _toggle_clk(sim)
-
-
-def _toggle_clk(sim):
-    """Toggles the clock input in pyverilator once."""
-    sim.io.ap_clk = 0
-    sim.eval()
-    sim.io.ap_clk = 1
-    sim.eval()
-
-
-def _run_rtlsim(sim, inp, num_out_values, trace_file=None, reset=True):
-    """Runs the pyverilator simulation by passing the input values to the simulation,
-    toggle the clock and observing the execution time. Argument num_out_values contains
-    the number of expected output values, so the simulation is closed after all
-    outputs are calculated. Function contains also an observation loop that can
-    abort the simulation if no output value is produced after a certain time
-    (liveness_threshold from function pyverilate_get_liveness_threshold_cycles()
-    from finn.util.fpgadataflow)"""
-    inputs = inp
-    outputs = []
-    sim.io.m_axis_0_tready = 1
-
-    # observe if output is completely calculated
-    # observation_count will contain the number of cycles the calculation ran
-    output_observed = False
-    observation_count = 0
-
-    # avoid infinite looping of simulation by aborting when there is no change in
-    # output values after LIVENESS_THRESHOLD cycles
-    no_change_count = 0
-    old_outputs = outputs
-    liveness_threshold = pyverilate_get_liveness_threshold_cycles()
-
-    if trace_file is not None:
-        sim.start_vcd_trace(trace_file)
-    if reset:
-        _reset_rtlsim(sim)
-
-    while not (output_observed):
-        sim.io.s_axis_0_tvalid = 1 if len(inputs) > 0 else 0
-        sim.io.s_axis_0_tdata = inputs[0] if len(inputs) > 0 else 0
-        if sim.io.s_axis_0_tready == 1 and sim.io.s_axis_0_tvalid == 1:
-            inputs = inputs[1:]
-        if sim.io.m_axis_0_tvalid == 1 and sim.io.m_axis_0_tready == 1:
-            outputs = outputs + [sim.io.m_axis_0_tdata]
-        _toggle_clk(sim)
-
-        observation_count = observation_count + 1
-        no_change_count = no_change_count + 1
-
-        if len(outputs) == num_out_values:
-            cycles_rtlsim = observation_count
-            output_observed = True
-
-        if no_change_count == liveness_threshold:
-            if old_outputs == outputs:
-                if trace_file is not None:
-                    sim.flush_vcd_trace()
-                    sim.stop_vcd_trace()
-                raise Exception(
-                    "Error in simulation! Takes too long to produce output."
-                    "Consider setting the LIVENESS_THRESHOLD env.var. to a "
-                    "larger value."
-                )
-            else:
-                no_change_count = 0
-                old_outputs = outputs
-    if trace_file is not None:
-        sim.flush_vcd_trace()
-        sim.stop_vcd_trace()
-
-    return (outputs, cycles_rtlsim)
diff --git a/src/finn/core/throughput_test.py b/src/finn/core/throughput_test.py
deleted file mode 100644
index 1306edfa2..000000000
--- a/src/finn/core/throughput_test.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import subprocess
-import numpy as np
-import warnings
-from finn.util.basic import gen_finn_dt_tensor
-from finn.core.rtlsim_exec import rtlsim_exec
-
-
-def throughput_test_remote(model, batchsize=1000):
-    """Runs the throughput test for the given model remotely on the pynq board.
-    The metadata properties related to the pynq board have to be set.
-    Returns a dictionary with results of the throughput test. Returns None
-    if the test fails."""
-
-    pynq_ip = model.get_metadata_prop("pynq_ip")
-    pynq_port = int(model.get_metadata_prop("pynq_port"))
-    pynq_username = model.get_metadata_prop("pynq_username")
-    pynq_password = model.get_metadata_prop("pynq_password")
-    pynq_target_dir = model.get_metadata_prop("pynq_target_dir")
-    deployment_dir = model.get_metadata_prop("pynq_deploy_dir")
-    # extracting last folder of absolute path (deployment_dir)
-    deployment_folder = os.path.basename(os.path.normpath(deployment_dir))
-    platform = model.get_metadata_prop("platform")
-    assert platform in ["alveo", "zynq-iodma"]
-    bitfile = model.get_metadata_prop("bitfile")
-    bitfile = os.path.basename(bitfile)
-    if pynq_password == "":
-        if "zynq" in platform:
-            raise Exception("PYNQ board remote exec needs password for sudo")
-        else:
-            local_prefix = ""  # assume we are using an ssh key
-            warnings.warn("Empty password, make sure you've set up an ssh key")
-    else:
-        local_prefix = "sshpass -p %s " % pynq_password
-
-    if platform == "alveo":
-        # Alveo can run without sudo but needs correct environment
-        remote_prefix = "conda activate finn-pynq-alveo; "
-    elif "zynq" in platform:
-        # PYNQ Zynq boards need to execute with sudo
-        remote_prefix = "echo %s | sudo -S " % pynq_password
-
-    # use platform attribute for correct remote execution
-    if platform == "alveo":
-        remote_cmd = "bash -ic 'bash alveo_run.sh throughput_test %d' \"" % batchsize
-    else:
-        remote_cmd = (
-            "python3.6 driver.py --exec_mode=throughput_test --batchsize={} "
-            "--bitfile={} --inputfile=input.npy --outputfile=output.npy "
-            '--platform={} "'
-        ).format(batchsize, bitfile, platform)
-    cmd = (
-        local_prefix + 'ssh {}@{} -p {} "cd {}/{}; ' + remote_prefix + remote_cmd
-    ).format(pynq_username, pynq_ip, pynq_port, pynq_target_dir, deployment_folder)
-    bash_command = ["/bin/bash", "-c", cmd]
-    process_throughput_test = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-    process_throughput_test.communicate()
-
-    # remove any pre-existing metrics file
-    try:
-        os.remove("{}/nw_metrics.txt".format(deployment_dir))
-    except FileNotFoundError:
-        pass
-
-    cmd = local_prefix + "scp -P{} {}@{}:{}/{}/nw_metrics.txt {}".format(
-        pynq_port,
-        pynq_username,
-        pynq_ip,
-        pynq_target_dir,
-        deployment_folder,
-        deployment_dir,
-    )
-    bash_command = ["/bin/bash", "-c", cmd]
-    process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-    process_compile.communicate()
-
-    try:
-        with open("{}/nw_metrics.txt".format(deployment_dir), "r") as file:
-            res = eval(file.read())
-        return res
-    except FileNotFoundError:
-        return None
-
-
-def throughput_test_rtlsim(model, batchsize=100):
-    """Runs a throughput test for the given IP-stitched model. When combined
-    with tracing, useful to determine bottlenecks and required FIFO sizes."""
-
-    assert (
-        model.get_metadata_prop("exec_mode") == "rtlsim"
-    ), """Top-level exec_mode
-    metadata_prop must be set to rtlsim"""
-
-    # create random input
-    iname = model.graph.input[0].name
-    ishape = model.get_tensor_shape(iname)
-    ishape_batch = ishape
-    ishape_batch[0] = batchsize
-    idt = model.get_tensor_datatype(iname)
-    dummy_input = gen_finn_dt_tensor(idt, ishape_batch)
-    # compute input/output sizes
-    oname = model.graph.output[0].name
-    oshape = model.get_tensor_shape(oname)
-    oshape_batch = oshape
-    oshape_batch[0] = batchsize
-    odt = model.get_tensor_datatype(oname)
-    i_bytes = (np.prod(ishape_batch) * idt.bitwidth()) / 8
-    o_bytes = (np.prod(oshape_batch) * odt.bitwidth()) / 8
-    # make empty exec context and insert input
-    ctx = model.make_empty_exec_context()
-    ctx[iname] = dummy_input
-    # remove liveness threshold, launch rtlsim
-    os.environ["LIVENESS_THRESHOLD"] = "-1"
-    rtlsim_exec(model, ctx)
-    # extract metrics
-    cycles = int(model.get_metadata_prop("cycles_rtlsim"))
-    clk_ns = float(model.get_metadata_prop("clk_ns"))
-    fclk_mhz = 1 / (clk_ns * 0.001)
-    runtime_s = (cycles * clk_ns) * (10 ** -9)
-    res = dict()
-    res["cycles"] = cycles
-    res["runtime[ms]"] = runtime_s * 1000
-    res["throughput[images/s]"] = batchsize / runtime_s
-    res["DRAM_in_bandwidth[Mb/s]"] = i_bytes * 0.000001 / runtime_s
-    res["DRAM_out_bandwidth[Mb/s]"] = o_bytes * 0.000001 / runtime_s
-    res["fclk[mhz]"] = fclk_mhz
-    res["N"] = batchsize
-
-    return res
diff --git a/src/finn/custom_op/debugmarker.py b/src/finn/custom_op/debugmarker.py
deleted file mode 100644
index 6c02f0dc8..000000000
--- a/src/finn/custom_op/debugmarker.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from finn.custom_op import CustomOp
-from onnx import helper
-
-
-class DebugMarker(CustomOp):
-    def get_nodeattr_types(self):
-        return {"export_debug_name": ("s", True, "")}
-
-    def make_shape_compatible_op(self, model):
-        node = self.onnx_node
-        return helper.make_node("Identity", [node.input[0]], [node.output[0]])
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        # data type stays the same
-        dtype = model.get_tensor_datatype(node.input[0])
-        model.set_tensor_datatype(node.output[0], dtype)
-        # create quantization annotation for debug marker
-        model.set_tensor_datatype(self.get_nodeattr("export_debug_name"), dtype)
-
-    def execute_node(self, context, graph):
-        node = self.onnx_node
-        inp_name = node.input[0]
-        out_name = node.output[0]
-        inp = context[inp_name]
-        context[out_name] = inp
-        # insert debug marker output as separate tensor
-        context[self.get_nodeattr("export_debug_name")] = inp
-
-    def verify_node(self):
-        info_messages = []
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-        return info_messages
diff --git a/src/finn/custom_op/im2col.py b/src/finn/custom_op/im2col.py
deleted file mode 100644
index 8ed004170..000000000
--- a/src/finn/custom_op/im2col.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import numpy as np
-from onnx import TensorProto, helper
-
-from finn.custom_op import CustomOp
-import finn.util.basic as util
-from finn.core.datatype import DataType
-
-# adapted from A. Karpathy's CS231 im2col code
-# utilities to generate a patch matrix from a multichannel image
-# of shape (batches, channels, height, width)
-
-
-def compute_conv_output_dim(ifm_dim, k, stride, pad=0):
-    """Returns spatial output dimension size for convolution with given params."""
-    return int(((ifm_dim + 2 * pad - k) / stride) + 1)
-
-
-def get_im2col_indices_nchw(
-    x_shape, field_height, field_width, padding=0, stride_y=1, stride_x=1
-):
-    """Returns im2col indices."""
-    # First figure out what the size of the output should be
-    N, C, H, W = x_shape
-    out_height = compute_conv_output_dim(H, field_height, stride_y, padding)
-    out_width = compute_conv_output_dim(W, field_width, stride_x, padding)
-
-    i0 = np.repeat(np.arange(field_height), field_width)
-    i0 = np.tile(i0, C)
-    i1 = stride_y * np.repeat(np.arange(out_height), out_width)
-    j0 = np.tile(np.arange(field_width), field_height * C)
-    j1 = stride_x * np.tile(np.arange(out_width), out_height)
-    i = i0.reshape(-1, 1) + i1.reshape(1, -1)
-    j = j0.reshape(-1, 1) + j1.reshape(1, -1)
-
-    k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
-
-    return (k, i, j)
-
-
-def im2col_indices_nchw(
-    x, field_height, field_width, padding=0, stride_y=1, stride_x=1, pad_val=0
-):
-    """Performs im2col on x with given field height and width, as well as values
-    for padding and stride size.
-    Returns result of im2col."""
-    # Zero-pad the input
-    p = padding
-    x_padded = np.pad(
-        x, ((0, 0), (0, 0), (p, p), (p, p)), mode="constant", constant_values=pad_val
-    )
-
-    k, i, j = get_im2col_indices_nchw(
-        x.shape, field_height, field_width, padding, stride_y, stride_x
-    )
-
-    cols = x_padded[:, k, i, j]
-    C = x.shape[1]
-    cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
-    return cols
-
-
-# ONNX i/o tensor shape assumptions for Im2Col:
-# input 0 is the input vector, shape (1, ih, iw, ifm)
-# output 0 is the output vector, shape (1, oh, ow, k*k*ifm)
-# where:
-# * ih, iw are the height and width of the input image
-# * oh, ow are the height and width of the output (lowered) image
-# * ifm is the number of input channels
-# * k is the convolutional kernel size
-
-# note: for the innermost (dot product) dimension of k*k*ifm, we
-# assume an internal ordering (k, k, ifm)
-
-
-class Im2Col(CustomOp):
-    def get_nodeattr_types(self):
-        return {
-            "stride": ("i", True, 1),
-            "kernel_size": ("i", True, 1),
-            "input_shape": ("s", True, ""),
-            "pad_amount": ("i", False, 0),
-            "pad_value": ("i", False, 0),
-            # depthwise: if != 0, infer ConvolutionInputGenerator with depthwise == 1
-            "depthwise": ("i", False, 0),
-        }
-
-    def make_shape_compatible_op(self, model):
-        k = self.get_nodeattr("kernel_size")
-        stride = self.get_nodeattr("stride")
-        ishape = self.get_nodeattr("input_shape")
-        pad = self.get_nodeattr("pad_amount")
-
-        # convert string into list of integers
-        ishape = ishape.strip("(")
-        ishape = ishape.strip(")")
-        ishape = ishape.split(",")
-        for i in range(0, len(ishape)):
-            ishape[i] = int(ishape[i])
-
-        # extract all necessary information and determine output dimensions
-        ifm_ch = ishape[-1]
-        assert len(ishape) == 4, "Unexpected input shape for Im2Col"
-        assert ishape[1] == ishape[2], "Im2Col for non-square images unsupported"
-        ifm_dim = ishape[1]
-        ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad)
-
-        # implement tensor with correct shape
-        values = np.random.randn(1, ofm_dim, ofm_dim, k * k * ifm_ch).astype(np.float32)
-        return helper.make_node(
-            "Constant",
-            inputs=[],
-            outputs=[self.onnx_node.output[0]],
-            value=helper.make_tensor(
-                name="const_tensor",
-                data_type=TensorProto.FLOAT,
-                dims=values.shape,
-                vals=values.flatten().astype(float),
-            ),
-        )
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        # data type stays the same
-        dtype = model.get_tensor_datatype(node.input[0])
-        model.set_tensor_datatype(node.output[0], dtype)
-
-    def execute_node(self, context, graph):
-        node = self.onnx_node
-        k = self.get_nodeattr("kernel_size")
-        stride = self.get_nodeattr("stride")
-        pad = self.get_nodeattr("pad_amount")
-        pad_val = self.get_nodeattr("pad_value")
-        iname = node.input[0]
-        x = context[iname]
-        qnt_annotations = graph.quantization_annotation
-        ret = util.get_by_name(qnt_annotations, iname, "tensor_name")
-        ret = util.get_by_name(ret.quant_parameter_tensor_names, "finn_datatype", "key")
-        idt = DataType[ret.value]
-        if pad != 0:
-            assert idt.allowed(pad_val), "Im2Col dtype must allow pad_val"
-        # check that input is NHWC
-        assert x.ndim == 4, "Unexpected number of input dims for Im2Col"
-        N, H, W, C = x.shape
-        assert H == W, "Unexpected input shape for Im2Col"
-        out_dim = compute_conv_output_dim(H, k, stride, pad)
-        # internally convert input to NCHW
-        x = x.transpose(0, 3, 1, 2)
-        # call NCHW im2col implementation
-        ret = im2col_indices_nchw(x, k, k, pad, stride, stride, pad_val=pad_val)
-        # result shape is (k*k*N, out_dim*out_dim), convert to NCHW
-        ret = ret.reshape(N, C, k, k, out_dim, out_dim)
-        # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1)
-        ret = ret.transpose(0, 4, 5, 2, 3, 1)
-        ret = ret.reshape(N, out_dim, out_dim, k * k * C)
-
-        # ret = ret.reshape(N, k * k * C, out_dim, out_dim)
-        # convert output back to NHWC
-        # ret = ret.transpose(0, 2, 3, 1)
-        context[node.output[0]] = ret
-
-    def verify_node(self):
-        node = self.onnx_node
-
-        info_messages = []
-
-        # verify number of attributes
-        num_of_attr = 3
-        if len(node.attribute) == num_of_attr:
-            info_messages.append("The number of attributes is correct")
-        else:
-            info_messages.append(
-                """The number of attributes is incorrect,
-            {} should have {} attributes""".format(
-                    node.op_type, num_of_attr
-                )
-            )
-
-        # verify that "domain" is set to "finn"
-        domain_value = node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-
-        # verify that all necessary attributes exist
-        try:
-            self.get_nodeattr("stride")
-            self.get_nodeattr("kernel_size")
-            info_messages.append("All necessary attributes exist")
-        except Exception:
-            info_messages.append(
-                """The necessary attributes do not exist.
-                Im2Col needs the following attributes:
-                stride, kernel_size"""
-            )
-
-        # verify the number of inputs
-        if len(node.input) == 1:
-            info_messages.append("The number of inputs is correct")
-        else:
-            info_messages.append("{} needs 1 data input".format(node.op_type))
-
-        return info_messages
diff --git a/src/finn/custom_op/maxpoolnhwc.py b/src/finn/custom_op/maxpoolnhwc.py
deleted file mode 100644
index c623e4007..000000000
--- a/src/finn/custom_op/maxpoolnhwc.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from finn.custom_op import CustomOp
-import numpy as np
-from onnx import helper, TensorProto
-from finn.core.modelwrapper import ModelWrapper
-
-
-def compute_pool_output_dim(ifm_dim, k, stride, pad=0):
-    "Return spatial output dimension size for pooling with given params."
-    return int(((ifm_dim + 2 * pad - k) / stride) + 1)
-
-
-class MaxPoolNHWC(CustomOp):
-    # a MaxPool node, but using the NHWC data layout
-
-    def get_nodeattr_types(self):
-        # no specific attributes for MaxPoolNHWC
-        return {
-            "kernel_shape": ("ints", True, []),
-            "pads": ("ints", True, []),
-            "strides": ("ints", True, []),
-        }
-
-    def make_shape_compatible_op(self, model):
-        node = self.onnx_node
-        iname = node.input[0]
-        ishape = model.get_tensor_shape(iname)
-        kernel_shape = self.get_nodeattr("kernel_shape")
-        pads = self.get_nodeattr("pads")
-        strides = self.get_nodeattr("strides")
-        assert len(kernel_shape) == 2, "Non-2D MaxPoolNHWC not supported"
-        assert pads[0] == pads[2], "Uneven padding not supported"
-        assert pads[1] == pads[3], "Uneven padding not supported"
-        (n, hi, wi, c) = ishape
-        ho = compute_pool_output_dim(hi, kernel_shape[0], strides[0], pads[0])
-        wo = compute_pool_output_dim(wi, kernel_shape[1], strides[1], pads[2])
-        oshape = (n, ho, wo, c)
-        # implement tensor with correct shape
-        values = np.random.randn(*oshape).astype(np.float32)
-        return helper.make_node(
-            "Constant",
-            inputs=[],
-            outputs=[self.onnx_node.output[0]],
-            value=helper.make_tensor(
-                name="const_tensor",
-                data_type=TensorProto.FLOAT,
-                dims=values.shape,
-                vals=values.flatten().astype(float),
-            ),
-        )
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        # data type stays the same
-        dtype = model.get_tensor_datatype(node.input[0])
-        model.set_tensor_datatype(node.output[0], dtype)
-
-    def execute_node(self, context, graph):
-        node = self.onnx_node
-        inp_name = node.input[0]
-        out_name = node.output[0]
-        inp = context[inp_name]
-        dummy_out = context[out_name]
-        # convert i/o NHWC -> NCHW
-        inp = np.transpose(inp, (0, 3, 1, 2))
-        dummy_out = np.transpose(dummy_out, (0, 3, 1, 2))
-        # execute as regular MaxPool
-        node.domain = ""
-        node.op_type = "MaxPool"
-        inp_vi = helper.make_tensor_value_info(inp_name, TensorProto.FLOAT, inp.shape)
-        out_vi = helper.make_tensor_value_info(
-            out_name, TensorProto.FLOAT, dummy_out.shape
-        )
-        tmp_graph = helper.make_graph(
-            nodes=[node], name="tmp_graph", inputs=[inp_vi], outputs=[out_vi]
-        )
-        tmp_model = helper.make_model(tmp_graph, producer_name="finn")
-        tmp_model = ModelWrapper(tmp_model)
-        new_ctx = {inp_name: inp}
-        from finn.core.onnx_exec import execute_onnx
-
-        ret = execute_onnx(tmp_model, new_ctx)
-        # restore original node props
-        node.domain = "finn"
-        node.op_type = "MaxPoolNHWC"
-        outp = ret[out_name]
-        # convert output NCHW -> NHWC
-        outp = np.transpose(outp, (0, 2, 3, 1))
-        context[out_name] = outp
-
-    def verify_node(self):
-        info_messages = []
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-        return info_messages
diff --git a/src/finn/custom_op/multithreshold.py b/src/finn/custom_op/multithreshold.py
deleted file mode 100644
index bc0a454cd..000000000
--- a/src/finn/custom_op/multithreshold.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-import onnx.helper as helper
-
-from finn.core.datatype import DataType
-from finn.custom_op import CustomOp
-
-
-def multithreshold(v, thresholds, out_scale=None, out_bias=None):
-    """Given a set of threshold values t={t_0, t_1 ... t_n} the successive
-    thresholding maps any real number x to an integer in the interval [0, n],
-    where the returned integer is the number of thresholds x is greater than
-    or equal to.
-
-    The output tensor will be scaled by out_scale and biased by out_bias."""
-    # the inputs are expected to be in the shape (N,C,H,W) or (N, C)
-    # the MultiThreshold node supports a data_layout attribute that can be set
-    # to 'NHWC' to support (N,H,W,C) data layout mode for in-out as well
-    # N : Batch size
-    # C : Number of channels
-    # H : Heigth of the input images
-    # W : Width of the input images
-    #
-    # the thresholds are expected to be in the shape (C, B)
-    # C : Number of channels (must be the same value as C in input tensor
-    #     or 1 if all channels use the same threshold value)
-    # B : Desired activation steps => i.e. for 4-bit activation,
-    #     B=7 (2^(n)-1 and n=4)
-    # the output tensor will be scaled by out_scale and biased by out_bias
-    # assert threshold shape
-    is_global_threshold = thresholds.shape[0] == 1
-    assert (
-        v.shape[1] == thresholds.shape[0]
-    ) or is_global_threshold, """"Threshold
-    shape incorrect"""
-    # save the required shape sizes for the loops (N, C and B)
-    num_batch = v.shape[0]
-    num_channel = v.shape[1]
-    num_act = thresholds.shape[1]
-    # reshape inputs to enable channel-wise reading
-    vr = v.reshape((v.shape[0], v.shape[1], -1))
-    # initiate output tensor
-    ret = np.zeros_like(vr)
-    # iterate over thresholds channel-wise
-    for t in range(num_channel):
-        channel_thresh = thresholds[0] if is_global_threshold else thresholds[t]
-        # iterate over batches
-        for b in range(num_batch):
-            # iterate over the different thresholds for one channel
-            for a in range(num_act):
-                ret[b][t] += (vr[b][t] >= channel_thresh[a]).astype(int)
-
-    if out_scale is None:
-        out_scale = 1.0
-    if out_bias is None:
-        out_bias = 0.0
-    return out_scale * ret.reshape(v.shape) + out_bias
-
-
-class MultiThreshold(CustomOp):
-    """Class that corresponds to a multithresholding node."""
-
-    def get_nodeattr_types(self):
-        return {
-            "out_dtype": ("s", True, ""),
-            "out_scale": ("f", False, 1.0),
-            "out_bias": ("f", False, 0.0),
-            "data_layout": ("s", False, "NCHW"),
-        }
-
-    def make_shape_compatible_op(self, model):
-        node = self.onnx_node
-        return helper.make_node("Relu", [node.input[0]], [node.output[0]])
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        odt = self.get_nodeattr("out_dtype")
-        model.set_tensor_datatype(node.output[0], DataType[odt])
-
-    def execute_node(self, context, graph):
-        node = self.onnx_node
-        # save inputs
-        v = context[node.input[0]]
-        thresholds = context[node.input[1]]
-        # retrieve attributes if output scaling is used
-        out_scale = self.get_nodeattr("out_scale")
-        out_bias = self.get_nodeattr("out_bias")
-        # transpose input if NHWC data layout is chosen
-        data_layout = self.get_nodeattr("data_layout")
-        if data_layout == "NHWC":
-            if v.ndim == 4:
-                # NHWC -> NCHW
-                v = np.transpose(v, (0, 3, 1, 2))
-            elif v.ndim == 2:
-                # no HW dimension means NHWC and NCHW layouts are equivalent
-                pass
-            else:
-                raise Exception(
-                    "Unknown data_layout and input ndim"
-                    " combination for MultiThreshold."
-                )
-        # calculate output
-        output = multithreshold(v, thresholds, out_scale, out_bias)
-        # setting context according to output
-        if data_layout == "NHWC":
-            if output.ndim == 4:
-                # NCHW -> NHWC
-                output = np.transpose(output, (0, 2, 3, 1))
-            elif output.ndim == 2:
-                # no HW dimension means NHWC and NCHW layouts are equivalent
-                pass
-            else:
-                raise Exception(
-                    "Unknown data_layout and output ndim"
-                    " combination for MultiThreshold."
-                )
-        context[node.output[0]] = output
-
-    def verify_node(self):
-        info_messages = []
-
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-
-        # verify that all necessary attributes exist
-        try:
-            self.get_nodeattr("out_dtype")
-            info_messages.append("All necessary attributes exist")
-        except Exception:
-            info_messages.append(
-                """The necessary attributes do not exist.
-                MultiThreshold needs the following attributes:
-                out_scale, out_bias, out_dtype"""
-            )
-
-        # verify the number of inputs
-        if len(self.onnx_node.input) == 2:
-            info_messages.append("The number of inputs is correct")
-        else:
-            info_messages.append(
-                """MultiThreshold needs 2 inputs
-                    (data input and threshold values)"""
-            )
-
-        return info_messages
diff --git a/src/finn/custom_op/quantavgpool2d.py b/src/finn/custom_op/quantavgpool2d.py
deleted file mode 100644
index 28d010692..000000000
--- a/src/finn/custom_op/quantavgpool2d.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import numpy as np
-from onnx import TensorProto, helper
-import onnxruntime as rt
-
-from finn.custom_op import CustomOp
-from finn.core.datatype import DataType
-from finn.custom_op.maxpoolnhwc import compute_pool_output_dim
-
-
-class QuantAvgPool2d(CustomOp):
-    """Class that corresponds to the quantized average pooling
-    layer from brevitas"""
-
-    def get_nodeattr_types(self):
-        return {
-            "stride": ("i", True, 1),
-            "kernel": ("i", True, 1),
-            "ibits": ("i", True, 1),
-            "obits": ("i", True, 1),
-            # determines if values are signed (set to "1") or unsigned ("0")
-            "signed": ("i", True, 0),
-            # data layout attribute can be set to "NCHW" or "NHWC"
-            "data_layout": ("s", False, "NCHW"),
-        }
-
-    def make_shape_compatible_op(self, model):
-        node = self.onnx_node
-        k = self.get_nodeattr("kernel")
-        s = self.get_nodeattr("stride")
-        data_layout = self.get_nodeattr("data_layout")
-        if data_layout == "NCHW":
-            return helper.make_node(
-                "AveragePool",
-                inputs=[node.input[0]],
-                outputs=[node.output[0]],
-                kernel_shape=[k, k],
-                strides=[s, s],
-            )
-        elif data_layout == "NHWC":
-            iname = node.input[0]
-            ishape = model.get_tensor_shape(iname)
-            (n, hi, wi, c) = ishape
-            ho = compute_pool_output_dim(hi, k, s)
-            wo = compute_pool_output_dim(wi, k, s)
-            oshape = (n, ho, wo, c)
-            # implement tensor with correct shape
-            values = np.random.randn(*oshape).astype(np.float32)
-            return helper.make_node(
-                "Constant",
-                inputs=[],
-                outputs=[node.output[0]],
-                value=helper.make_tensor(
-                    name="const_tensor",
-                    data_type=TensorProto.FLOAT,
-                    dims=values.shape,
-                    vals=values.flatten().astype(float),
-                ),
-            )
-
-        else:
-            raise Exception(
-                """Datalayout for QuantAvgPool2d is set to an invalid value.
-                    Has to be set to "NCHW" or "NHWC"."""
-            )
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        bw = self.get_nodeattr("obits")
-        if bw in [2, 4, 8, 16, 32]:
-            if self.get_nodeattr("signed") == 0:
-                dtype = DataType["UINT%d" % bw]
-            else:
-                dtype = DataType["INT%d" % bw]
-        else:
-            raise Exception("Unsupported output datatype for QuantAvgPool2d")
-        model.set_tensor_datatype(node.output[0], dtype)
-
-    def get_accum_size(self):
-        ibits = self.get_nodeattr("ibits")
-        k = self.get_nodeattr("kernel")
-        max_value = 2 ** ibits - 1
-        max_value = max_value * k * k
-        max_bit_width = int(max_value).bit_length()
-        return max_bit_width
-
-    def get_shifts(self):
-        shift_bits = self.get_accum_size() - self.get_nodeattr("obits")
-        shift_bits = shift_bits if shift_bits >= 0 else 0
-        return shift_bits
-
-    def execute_node(self, context, graph):
-        # create a standard average pooling node to help calculate the result
-        node = self.onnx_node
-        k = self.get_nodeattr("kernel")
-        s = self.get_nodeattr("stride")
-        inp_values = context[node.input[0]]
-        oshape = context[node.output[0]].shape
-        if self.get_nodeattr("data_layout") == "NHWC":
-            inp_values = inp_values.transpose(0, 3, 1, 2)
-            oshape = (context[node.output[0]]).transpose(0, 3, 1, 2).shape
-        ishape = inp_values.shape
-        inp = helper.make_tensor_value_info(node.input[0], TensorProto.FLOAT, ishape)
-        outp = helper.make_tensor_value_info(node.output[0], TensorProto.FLOAT, oshape)
-        node_avgpool = helper.make_node(
-            "AveragePool",
-            inputs=[node.input[0]],
-            outputs=[node.output[0]],
-            kernel_shape=[k, k],
-            strides=[s, s],
-        )
-        graph_avgpool = helper.make_graph(
-            nodes=[node_avgpool],
-            name="single-avgpool-exec",
-            inputs=[inp],
-            outputs=[outp],
-        )
-        model_avgpool = helper.make_model(graph_avgpool)
-        idict = {node.input[0]: inp_values}
-        sess = rt.InferenceSession(model_avgpool.SerializeToString())
-        result_temp = sess.run(None, idict)
-        # remove scaling introduced by average
-        result_temp = result_temp[0] * (k * k)
-        result = np.right_shift(result_temp.astype(int), self.get_shifts())
-        if self.get_nodeattr("data_layout") == "NHWC":
-            result = result.transpose(0, 2, 3, 1)
-        context[node.output[0]] = result.astype(np.float32)
-
-    def verify_node(self):
-        info_messages = []
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-        return info_messages
diff --git a/src/finn/custom_op/registry.py b/src/finn/custom_op/registry.py
deleted file mode 100644
index ecf2a711f..000000000
--- a/src/finn/custom_op/registry.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# make sure new CustomOp subclasses are imported here so that they get
-# registered and plug in correctly into the infrastructure
-from finn.custom_op.fpgadataflow.convolutioninputgenerator import (
-    ConvolutionInputGenerator,
-)
-from finn.custom_op.fpgadataflow.downsampler import DownSampler
-from finn.custom_op.fpgadataflow.streamingfclayer_batch import StreamingFCLayer_Batch
-from finn.custom_op.fpgadataflow.streamingmaxpool_batch import StreamingMaxPool_Batch
-from finn.custom_op.fpgadataflow.streamingfifo import StreamingFIFO
-from finn.custom_op.im2col import Im2Col
-from finn.custom_op.fpgadataflow.tlastmarker import TLastMarker
-from finn.custom_op.multithreshold import MultiThreshold
-from finn.custom_op.streamingdataflowpartition import StreamingDataflowPartition
-from finn.custom_op.xnorpopcount import XnorPopcountMatMul
-from finn.custom_op.maxpoolnhwc import MaxPoolNHWC
-from finn.custom_op.fpgadataflow.streamingdatawidthconverter_batch import (
-    StreamingDataWidthConverter_Batch,
-)
-from finn.custom_op.fpgadataflow.globalaccpool_batch import GlobalAccPool_Batch
-from finn.custom_op.fpgadataflow.pool_batch import Pool_Batch
-from finn.custom_op.fpgadataflow.fmpadding_batch import FMPadding_Batch
-from finn.custom_op.fpgadataflow.thresholding_batch import Thresholding_Batch
-from finn.custom_op.fpgadataflow.addstreams_batch import AddStreams_Batch
-from finn.custom_op.fpgadataflow.labelselect_batch import LabelSelect_Batch
-from finn.custom_op.quantavgpool2d import QuantAvgPool2d
-from finn.custom_op.fpgadataflow.duplicatestreams_batch import DuplicateStreams_Batch
-from finn.custom_op.fpgadataflow.vector_vector_activate_batch import (
-    Vector_Vector_Activate_Batch,
-)
-from finn.custom_op.fpgadataflow.channelwise_op_batch import ChannelwiseOp_Batch
-from finn.custom_op.fpgadataflow.iodma import IODMA
-from finn.custom_op.debugmarker import DebugMarker
-
-# create a mapping of all known CustomOp names and classes
-custom_op = {}
-
-custom_op["MultiThreshold"] = MultiThreshold
-custom_op["DownSampler"] = DownSampler
-custom_op["XnorPopcountMatMul"] = XnorPopcountMatMul
-custom_op["Im2Col"] = Im2Col
-custom_op["StreamingMaxPool_Batch"] = StreamingMaxPool_Batch
-custom_op["StreamingFCLayer_Batch"] = StreamingFCLayer_Batch
-custom_op["ConvolutionInputGenerator"] = ConvolutionInputGenerator
-custom_op["TLastMarker"] = TLastMarker
-custom_op["StreamingDataflowPartition"] = StreamingDataflowPartition
-custom_op["MaxPoolNHWC"] = MaxPoolNHWC
-custom_op["StreamingDataWidthConverter_Batch"] = StreamingDataWidthConverter_Batch
-custom_op["StreamingFIFO"] = StreamingFIFO
-custom_op["GlobalAccPool_Batch"] = GlobalAccPool_Batch
-custom_op["Pool_Batch"] = Pool_Batch
-custom_op["FMPadding_Batch"] = FMPadding_Batch
-custom_op["Thresholding_Batch"] = Thresholding_Batch
-custom_op["AddStreams_Batch"] = AddStreams_Batch
-custom_op["LabelSelect_Batch"] = LabelSelect_Batch
-custom_op["QuantAvgPool2d"] = QuantAvgPool2d
-custom_op["DuplicateStreams_Batch"] = DuplicateStreams_Batch
-custom_op["Vector_Vector_Activate_Batch"] = Vector_Vector_Activate_Batch
-custom_op["ChannelwiseOp_Batch"] = ChannelwiseOp_Batch
-custom_op["IODMA"] = IODMA
-custom_op["DebugMarker"] = DebugMarker
-
-
-def getCustomOp(node):
-    "Return a FINN CustomOp instance for the given ONNX node, if it exists."
-    op_type = node.op_type
-    try:
-        # lookup op_type in registry of CustomOps
-        inst = custom_op[op_type](node)
-        return inst
-    except KeyError:
-        # exception if op_type is not supported
-        raise Exception("Custom op_type %s is currently not supported." % op_type)
diff --git a/src/finn/custom_op/streamingdataflowpartition.py b/src/finn/custom_op/streamingdataflowpartition.py
deleted file mode 100644
index 31cd38fea..000000000
--- a/src/finn/custom_op/streamingdataflowpartition.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from finn.custom_op import CustomOp
-
-
-class StreamingDataflowPartition(CustomOp):
-    """Class that corresponds to the meta/container node StreamingDataflowPartition
-    which is a placeholder for a group of fpgadataflow nodes that have been separated
-    out into a FINN-ONNX model of its own. Note that is does not produce any HLS or
-    bitfile by itself."""
-
-    def get_nodeattr_types(self):
-        return {
-            "model": ("s", True, ""),
-            "res_estimate": ("s", False, ""),
-            "res_hls": ("s", False, ""),
-            "res_synth": ("s", False, ""),
-        }
-
-    def make_shape_compatible_op(self, model):
-        pass
-
-    def infer_node_datatype(self, model):
-        pass
-
-    def execute_node(self, context, graph):
-        # TODO add RPC execution with synthesized bitfile?
-        # whole-design rtlsim with PyVerilator may also be an alternative
-        pass
-
-    def verify_node(self):
-        info_messages = []
-
-        # verify number of attributes
-        num_of_attr = 1
-        if len(self.onnx_node.attribute) == num_of_attr:
-            info_messages.append("The number of attributes is correct")
-        else:
-            info_messages.append(
-                """The number of attributes is incorrect,
-            {} should have {} attributes""".format(
-                    self.onnx_node.op_type, num_of_attr
-                )
-            )
-
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-
-        # verify that all necessary attributes exist
-        try:
-            self.get_nodeattr("model")
-            info_messages.append("All necessary attributes exist")
-        except Exception:
-            info_messages.append(
-                """The necessary attributes do not exist.
-                StreamingDataflowPartition needs the following attribute(s):
-                model"""
-            )
-
-        # verify the number of inputs
-        if len(self.onnx_node.input) >= 1:
-            info_messages.append("The number of inputs is correct")
-        else:
-            info_messages.append("StreamingDataflowPartition needs 1 data input")
-
-        return info_messages
diff --git a/src/finn/custom_op/xnorpopcount.py b/src/finn/custom_op/xnorpopcount.py
deleted file mode 100644
index 199b7a5d9..000000000
--- a/src/finn/custom_op/xnorpopcount.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-import onnx.helper as helper
-
-from finn.core.datatype import DataType
-from finn.custom_op import CustomOp
-
-
-def xnorpopcountmatmul(inp0, inp1):
-    """Simulates XNOR-popcount matrix multiplication as a regular bipolar
-    matrix multiplication followed by some post processing."""
-    # extract the operand shapes
-    # (M, K0) = inp0.shape
-    # (K1, N) = inp1.shape
-    K0 = inp0.shape[-1]
-    K1 = inp1.shape[0]
-    # make sure shapes are compatible with matmul
-    assert K0 == K1, "Matrix shapes are not compatible with matmul."
-    K = K0
-    # convert binary inputs to bipolar
-    inp0_bipolar = 2.0 * inp0 - 1.0
-    inp1_bipolar = 2.0 * inp1 - 1.0
-    # call regular numpy matrix multiplication
-    out = np.matmul(inp0_bipolar, inp1_bipolar)
-    # XNOR-popcount does not produce the regular dot product result --
-    # it returns the number of +1s after XNOR. let P be the number of +1s
-    # and N be the number of -1s. XNOR-popcount returns P, whereas the
-    # regular dot product result from numpy is P-N, so we need to apply
-    # some correction.
-    # out = P-N
-    # K = P+N
-    # out + K = 2P, so P = (out + K)/2
-    return (out + K) * 0.5
-
-
-class XnorPopcountMatMul(CustomOp):
-    """Class that corresponds to a XNOR-popcount matrix
-    multiplication node."""
-
-    def get_nodeattr_types(self):
-        return {}
-
-    def make_shape_compatible_op(self, model):
-        node = self.onnx_node
-        return helper.make_node(
-            "MatMul", [node.input[0], node.input[1]], [node.output[0]]
-        )
-
-    def infer_node_datatype(self, model):
-        node = self.onnx_node
-        # ensure inputs are binary
-        assert (
-            model.get_tensor_datatype(node.input[0]) == DataType["BINARY"]
-        ), """FINN
-        DataType of first input is not set to BINARY as it should be."""
-        assert (
-            model.get_tensor_datatype(node.input[1]) == DataType["BINARY"]
-        ), """FINN
-        DataTypes of second input is not set to BINARY as it should be."""
-        # XNOR-popcount produces unsigned integers, assume uint32
-        model.set_tensor_datatype(node.output[0], DataType["UINT32"])
-
-    def execute_node(self, context, graph):
-        node = self.onnx_node
-        # save inputs
-        inp0 = context[node.input[0]]
-        inp1 = context[node.input[1]]
-        # calculate output
-        output = xnorpopcountmatmul(inp0, inp1)
-        # set context according to output name
-        context[node.output[0]] = output
-
-    def verify_node(self):
-        info_messages = []
-
-        # verify number of attributes
-        num_of_attr = 0
-        if len(self.onnx_node.attribute) == num_of_attr:
-            info_messages.append("The number of attributes is correct")
-        else:
-            info_messages.append(
-                """The number of attributes is incorrect,
-            {} should have {} attributes""".format(
-                    self.onnx_node.op_type, num_of_attr
-                )
-            )
-
-        # verify that "domain" is set to "finn"
-        domain_value = self.onnx_node.domain
-        if domain_value == "finn":
-            info_messages.append("Attribute domain is set correctly")
-        else:
-            info_messages.append('Attribute domain should be set to "finn"')
-
-        # verify that all necessary attributes exist
-        info_messages.append("XnorPopcountMatMul should not have any attributes")
-
-        # verify the number of inputs
-        if len(self.onnx_node.input) == 2:
-            info_messages.append("The number of inputs is correct")
-        else:
-            info_messages.append("XnorPopcountMatMul needs 2 data inputs")
-
-        return info_messages
diff --git a/src/finn/data/onnx/mnist-conv/model.onnx b/src/finn/data/onnx/mnist-conv/model.onnx
deleted file mode 100644
index fc1a3f733c6e6243dd23dacb125b7a372de55a50..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 26454
zcmce-XH*r@vMx%F3P=zMf)bRVAR-8?t^yS>5+s?$04fGV5X?$Wf+%7_5d(^VqN1We
zS7AU5Ac{Go7%*eb%3J%KbNAUN+&9Mi^G5$zqpNGqRb4f^W`%F6$ZIPM@eLYjsMz1m
z$<Dz{d1{2+^vH#CtYqCPnq<|csde(78a;Jh_`>jL2UkZmqkp{%jSi2QH8m>SA@tvV
zwDtbihlX8(BIbq1^mpk0PkuAyfPWv^U&c*FtxQ%;X?SE*>_SI1wSUgwq}_GM{K&ZZ
zb7L1q%#ZBv66zPbP|6b8&#?1<p_?iE<JLw`t<#+F=*aN7|1OG$qKD!RS!Ek7HRYIv
z(Gg+cF&?rWvj3nMsHseiT{u59YHFC7THqibuh3wBf3JWYvhp^a)D&k%&yS7jCS{bf
z(N<Fpi<mq0AOHS;2LGG1-fI6B!Tv>fXKhXYxv?>h{r*#GrGF=w$qxz(`?q-psQq^y
zZOwr2QNfP=|7Fm>6U-C>!so{R+o-j&YX6-}TibvB{J9a4Go79PbEN7&X=chkQy2f^
z?tkb=&O`3M>F7TwUDOn$>WT4?`S<G|OnU#KHYH!bpg^x6smlIMq-t5U|3v~d^?#bu
zWuUhHzu3&d;a}9E@n0YO|8C6@YX3LU{`Cc1w6*?WJ_qR%{^v4O{twE3T6Cb=|KQTr
z`iBJ_90vT$xc{L1i&59hs{Ic>ZJqzHpo8<js7viXiT`)I%6rKFH@p4^<^P*q7peVk
zBB+`DrwIRissHm~_xle=VEq5a^FRGTzyInFCae9wj#vBN-Hi5sx|x64Nag={9y2AM
zsSAB#=l<J*^-}wvtpCN$4fuC+_x~?u`-i!ulz*CATlF7qb7=I`s982!{Z+J76r`7$
zmaJL4zhY?(Oh57+0vgM~*WeeVx?iN!ozEzLqBdz;o~8Qv`s{vQ10KxxrVV{N;iMcr
zEExWU@>GIx*M?cVy!JLGj){T?LALDrfOyhfPkv={3RXO>g;zgp#IPQ7arwSBKK#^N
z5<(-z+tza6*SM7(%hn6uU)Brlrt7gsH+yuLxS5vKtQGj|Ejp1^OuvT2!V&!L>g>>+
z7k2sv7nk3MXECSXROcM}9DY@_if@7Tv8Ci>S^{U@II_clKAanP3%pkK=Z#7GsNmCE
z?DXuLP;G6_5uu}SaeEU*5BS5+YxkkqSto4TaTVuA{f0mG*=)$~_=7%k%kMJrfVMoF
zwRwXWaRx7~p2x;Fui)E)D#0wS5c;pPt!-Yvl>V-92c1_+G`pk=zS}L&R_Ct^`gPi3
zcEv!f?5s*7mpv3`&Q(O`5I0h2nE+OQD}>q0&(yq`KaxgPo3Y!=hj7Evj{E<rz~RgL
z^QahWv?om-)#kv5U+dA1Ar`P?ttY;<w1b<cdh@cut(dx_0FG!s;>3~>P&RJ?A8@-s
zU$<Q5lfRj(GM1oEnqut^(=P6v%r)F?TW(OlaW1&-*9+2^Wk!Kz&&YL*QOzE8-x~eJ
zSE>K6t-=>xPxL09>N^g@)8-h^J??_V?e`(##4t`6BQNaUod82pqJ?;2Jvp@Ah1GAD
zvdq&Zf?PovZyu#YrBBRQW38V1mDL+*iL!$GUO7wL=i<W;#~*?x>(cp4r*G^~WeKio
z*Ts-=ThQy_WcF~~3tN_^qknE5Z&;O%C*Sz-xtzbWZpj(Rq}p(t-K>tqTb_v<ujg=3
zTeWa=ObWbOt;b%ynxWU~PJFE)9G>^sOF^ApN;>uga_NmWm=f#4Zf+-Vde9vHRrgo&
z=+rpA+TKhj*H**McbPaQRhOp}SHQ-G91^;z<Ee}9;r{S*HFYv3bXE5dH1;11pLDv?
zZ!8AgrTbz0PgPXtnn3+d9Fqi$+zx*))QPtTZ=zMp7o(cK51!s2$7XX+^5j!VygtYd
zKfleRUME_rGzvB8i){j4I<^U2t(`Dx!4;0m8pwZ#{$Mo)cXt!h{`BHTFXDSMA=PXh
zcMr&f{DWC+KVTCt$k~my!HraX>Nu8%cj8@#7gO@SN>Ho)L*~oAa{Re(G;c#b9c+?y
z@3GMmH8oVBU;iLc+v_phPjlt=t|2_4(GJ(d24iH)btrmng~2C-==ZYC_;K17vRv)N
z(MsCf?uj+#M|$wj%}c4itAL#?v(PT>1btiZk(9k!ggt%Au)a=*_q48OwRMU7%H<r@
zztD02ZoYvh+9QUYk7QBF4ZS~Diyys=Y4D~>N=dbJPnc_qGjGeH)|&HV<vRiEqtftM
z(L_j}Ke#ro-j7?yRp9ZuPCTY17romj;NpuHpt4^aznpB2CC;<B?}jYFqRvhD9_EPh
zD@TFPhV2lxRziIzpBL&Y14s*g^0B&9iu7%Qp<x+#zPmZetvwGdHfGe)yp`HY?w**|
zst6U8g}ioNC1j7uWBc<v$zaq<yzsWS`xNgyjt@MHD(!c9xK2FT<ywQ0<7xV=r_JL-
z^kC9EC0=IiD%f1HrE$kEi*lJxT$?f!E{}XEQTeG!m%4qUEw7EidG2ryduW6m!_r~V
zkh?<eRyRJG<HH}<ufUjI+F1R{S5#A%#l1)K#5)xr7JPgGJr0+05h(EvQ(e9!I|V!X
z<e{p1JeFGYM~$0TAkH$0_s2Y=eU8zT;iLt(oO@HF`Z2mOK1bZYQWLVTUn6t7HL!8|
zS{yzq9g<hs@XGWwXjpE==2p$v+&>;Nh9%P2@#}bSngo??|KP0lO?b;-HN%IWHGU)h
z@)PkOzN$G51Mg?EPqi10{<4@?xQC(j7Z;55E$71c3ql{wuXOobDNZ>S!)0@S3H_T(
zh1K2W(w8wKMSPXOBe(45r#t@Q&i73?B>jXy`r+JtyqqL7a5o!Od=`f1IpW846VU2(
zI{q2?2nN4j%Qk0=ak_Rke$YO}`#1Ng?c{Wa$31rB^jXvK<8~!?YwW=JUnI3NXXeq~
zo^llI+}-`)=PWFK>&hlq`w4*_YtUoZQ23PPiH)UQc-gg+AitvvU*A528sa-+s@zMk
zD(X@*`;)gYSYMvcwvEB@trwu>!~2>m<~7u|2D$D|Ex21w#fO)s3U^vI2}SOgU~%Rg
z$nK~Bt7FNmQ8*g<Cd)$Ny?l^8G?BxjfR{CA;N=JNcymiErnZbny+oB-m61EC>E2k3
z-D8LQhN-%T`;21skgA%{mqWRy%wNi!kqHNP{UEc*P)O*R&pIzOt9NX-$4_=%?%R)K
zvYzh(Q2nIM+k8)nrrwp};IjGrwqG{)4QzrdwkoXkL5<Iv8e)jGIu`hqk={jZbQ;hC
z7a|_ffghVBANuWsThk&)Crwf6zim+e@;kCvtcXR^-h!3>eu#0@<YD$}Y9fy0(|N~y
z(5}?Oq#GN^<f#>J8SO=rzrO^djn>>WtAW0bbYR^Z=fOWR1pHoW!_0wcuK7x>q<nN2
z1Sds^MI#47r?NWIh)N-khnof7dmB0)J%i&@9nfiLDh0oh2fr0*aJu~}IkmV^&8sR}
z*!3W|e8{4k*Z1MOkj||0EuJp*?uWj0bI7MsgEcbN@WcGbnpc^9xYXkkI&QfI=B{7G
zBMm#a_U+A@DVOrant>#$z7OKl)80Y7mq7mFcaR+V0HdE(;x^SmtoTfsEe1?N`3vjl
z((^c~kDM)TdVU_(20x&<!+(U(wsW+pu2Cqz6vsO|KN7ZTs!96!eTL@LdXNd;jr052
zaL}FJX!(03hwf7Vvt6g5X=Ng88hrsT%x>fmm%;2idI?`@V7&e6D4z^3m)z^-LOPlA
z;i!8m-<3p@if1KXc^8OXCuHK5eG^eh<~YCC(H3)j8rh+8is1S5wxG4S3(lMB$D2#m
zpj^*;RT?H<JigWpt4ER8d&6_#e(^i_l%FdOoHdjD+ji4vqfK<B_jRg1X9o(e58(v4
zN;YyxhGSir<h9;`#rg@jYVvx#7<htvz23pQoR7lN15Mn~G93DhO~GT0ZWQlRPG7Xo
zqTZ``{O<Er@X+z(K2vRlU(Qxsx%x6F_Sp<=dASfcNR_L{dUA>9M#-Cxfr8S|G_j$u
zzr=Q>o@iM68d64O!TxL4DEsSM7+SDcI5_38FfIIpSb6UV#Eh}SJN9?!Mao;UE$GZ{
zMt)*I<PBK<cs+?NcJTU033zL+7W({1LVMR%>M+#i?I}m$zVci=X>3W2yoEFl>}1#a
zO0l?6hYQm3DAdM`SC8+>im%d0bJQ{XuKidt`idb$r{!>(;uhiPs1jk>`7F?i_r-p;
zXJJCw2O7HgE4-MOEf%fwgRG(!2pwlf`fs0$=ciA{5rK-Rsy&wKJUSrt^dT7jcLbQ!
zcw?Uh#T*O)?D0E>)y`$J{=OwJ^Yl>c+haE*Me1|sLSMG;*g=Y#2gIv^rtY3<@~q`m
zO=x<X$}Z*Ml273@x$ivCJYGWPx3&ukZ3nPW$A&(2bL3D^#+64S#X065=tFE4+&phb
zr>(nS@SH@5GWOzn&lfbu+grS`<{oJ0<l};!9TZp7m10ykqRjAA?z(RR6{wce-Ink0
zF<VAd&fLV`ZHhtuTpcD}Is(1NrNWgfUGdvk8N6_AIt{rMk9Nivst;^;;m(___{_=o
zq#$`q4FUIPSsxoTxO)s_#aCeI?#+gpM*K4<94Z~vQDf*Iv7lvy7+<*$jG|JYGJhc*
zI@*oD%PC;1`cll>Zp$-nRzlcRZMgHMlI;D3n(E=IylLrNR{CuUMni1*>kDUm=6H%K
zo41JNft@hFqlo9;n2d%$OYlJQCq8HK49D**;Ik8qc~G_+Ui*EFo+w#kZf_eHq%6Vm
zCW)l>_f{UBV~EdlGby*;hL8R1#BL51P_WmM>{sZMx2G$2wzPuHxqqRmxGzO6Pozmt
z)L2|{0=Cc6!QDHD<LWiv#a^ZoIC~@&YF7=S(aDuiKhzR@x`)ujWK(G@?TM-7YB(yv
zLmcIu0A0f$i@khz3T7j=QQuL$;dYZMnZ@N$w)ZG%|I(H7&-=ppXNyF=H4mV;PXmOt
zsB_3iODdIAQ}=NTF#mHhzrNx`z3krzUFCz|;FFaStFj)ra=~Q?4G&_aypuFMB@yRI
zjCgzADSqA40MG5a3U9}TvgBcS^?8kV^g_29jC!fKj~@&;ZITJfRTNN$=2qBxBo?<o
z8!r)*B$+oFxLczlw^cTZo}St8VUi7bElUNr?+NtnxI3hm`-t1Ry`|ENk^JXGHrE7(
zin)~=Sogste&15VJZL}O_Vp#Uu%%SP6rR3v3qBw62%VQMgz#ipG1mVK%LYv0ac%Lq
z@__>$F7MAfXP9uitt&pXIgY6_9ch<6quh};{!{OctAsIP)m&{3^Vg~E9i+hHVja-v
z%^6&-+#N5EGUs`p^g&l|jTkq*GhI}6WP50V9-Fs-i8M~SCTC5?O_{KoP59vA-dGS2
zfvcao!}al>MdLr$0foVQ^{X0soIgP(jj6amT*o;tE!|(tR%bt-WVn;{Rmi`aja|k}
zXNwmWIDbPd_iPB~@bU<;r_M_>)t@astRK#%L$bK*GAC4hxgJg){7e;trMZoN3yhrN
z$c^JF39j0s+UFSBn(`7h+gsxn#iQIq=uDbX@9FOZRi5?5ob}z(FuPw1e6%=D+vYg1
z)9T4&zeWO=hZe&rzjDa)E`o&YMJ%_OQAuo~-~~OnG}9lFa!28Zx~1Hrp@zZ*O^H)b
zhZvUm9bUYX<;<RgdEI^+++FxXu&pz}^KxU!Yv3(m%d1&zv_+NwByYn$bC~oTfV0*&
z;_rlMyl7-UtUvovw0C(7OFx{T-}O^#BILW_#?gVGU~G@$)Qg1|y|ehMFq3~T*5$-o
zJ~Zc45=IUjh(7&p@aCw~g2w57Y*Q##o7DIlChXkCv%Yl$9_)pRavyQ@(T8;Z>p4m5
z2&8F~esH|CC8W(g00UFx-AlcsG4?A1T$F7=o&zTF*mFzq(_l3|8hlflce%K~ag^hA
zwrTu){4a>Lnj!h@Wm?;KF%f*5-wRzd_T$IQ4`Q!jEpXzZG%s`X<7w9xV|+vl<S*UB
zXM?)%td1nQRWk~szpKE+zk|d%mq{4hO%dEo8^G?drqDj;4!>29NB1#@aoU<(?zAui
zf9wyzYcGfJ1T4YOu&tc=+q$O6LWa)-$Kd+PWf;@l2NiXn(~+SG?CkV{nmzXM;6dYg
zkz+o+7=0F(L_~AIt83_6@sVb}w1lY>q9A?DJuJskZauJBd=s&SSDKw5`MT>g#cs63
zw@QY0yfCJ7%bI!EOcu^K4rHzTBJ4Pv$fHI+frmpb!JFmsh;bt%9&z#*BzKKIde1?H
zw}q_tQV(Sug5b^^SB&btjn?QU2~}20_{H+A=-&4(YbCG7b!Nt}*E>O|n6ne7C6tR=
z*M@=8#SqbTQZ`&WRwv1M<;E8;9|uYD8$n0GfE+@9LGs0w(ECLT9GzKBDph@P(X>50
z;*J)#HYeee;tojqHG#gUKa`A+yc4=as)(%(5}csmCn&VWQ0Uz@iHf%+Iu{3mdx<hF
z_3O&v{cNdb;yGclg(cmbyPmWoR#NQKi=^~^JFV&HfQbPSpt$D<gw9gHfZ?7sF6Xb%
z=q!7Q$F3Zjdi)IZj_ZM5udjoil@7{{9?6PvKKO0#N<O@1o!~O-9vDykjTVU+Jlpd-
zwW!PSj^Ht{?_CCe^14qN<3`}b#liUcn+N{c@|vvtw(+JR?X*I-g2(tOy00AmfeoS}
z`DTa-?$JxacEJ@7*H!TRq*8jKzmJA3@WVUq_V}=T0Ka?fz^M_L_~gbO`ucSp9BY5h
zxt~|V){iS_hk6G79<4#}zy)p_z7U4pnMj7?R-;|luOPD|gfe!AL9ff}-PXjOryPsp
z(9|yuTZ(P?VPtQ1_Hse}{ar+tn{8sB8hH+HF@^)G^5T1i8)z{ukmnte=bQPexKm!K
zW<&KF?ldTmKf^D&!`gg&=vUC>BC((0C*i%m5?QpA!>cn~Gw5nIR*hhq{-76x-$^FP
z`wW<zb%h@*J7L>kAG)IZgqsJbVCGn#ngwNDYA@YAiqWmc{5Wqt%-dCtvnI!jdp>Oy
zR!w_M{`H!y?zIn07w*I#?{?wwPP#Q!YRceTzmZlfoyV)G1ZZFhDx7Gib-z0E#l`QT
z$D@yAaBm4-jJ6fFf9}PpSq(H!zL{bVcOkRuBYE{iK)K7dZev~y5I+SXJ<^x1VWVLF
zt6?whUHBN(9~ZGo@>FX2n1}sF`d}$-;=Ir6>Df$Ae8k`QviEwt@<tQqS?1uT-Fjq^
zvj>i7*z<#izce;7lT*#l;mIjSIAvx7S=cq>-p9SsB{2%4w(X<U+Vi<7KMw7cgP`MK
zI*$9;K*|U7dCzMzp4>+OchgU>&{P_ipU~v}JA?4+FM(gqFvD{;9nf^;j#%yQ1Jik*
zP-;C7_TNk*uSKiGv3<JXr|CE8-XdA<Wn{?~na_mXR(9;UwI_1XR~Yxmil^3Ap>cdV
z=XUABvfgJvQknpJ-`?TKcTByjGNJ39>tH$Y80wF=7JQ5M(9{urr1r{(#p-h5>RbWW
zhV|vYD=UTamPd4T{$?8MtW#Um;}{;%`UVST1>k@~^0Z~MGA^)+=Cj63d)F#(X}5>)
zCgCIZSnomV{g-j%r#H0an}jW{ZE_d=$MA;bow#S$1N7eR3jR3R2d6B13C3@JgUQQd
z{C$QeX2k&?^-Bd!Egv*aDyCf_p1fP}03AABPM!TLYx?yI;i|z0!Q)VXB<f>V2)tzo
zF<-vZ?7uhQV$YL8^MuDVWonePW-y(W^zg0mm%BjgZD&I1=u%4lu#R#RO4#n_SoCP?
zOr}4F;A(Y6OwGLmx09o}i|s>5om0q9&-6vJbK7a-AvyQWmLFhdlmv(DwCC@m#-P`W
z&1_zH8b>S*5)aPKgAlt6u0QOHS0@ajojv}-h(BqBoC(7xq|*E!;h=R~g^KzD#awwx
zzQMIM>n{uzUcW9Ae-DmF)wD<C`&nL`elMCYd`YjVt{Q`;U$2r*L^&;U4JE_5{n7Pl
zDxSXDgI~RzAT}&g<=4Nm1X<(9FgE`Ov=n#eM<$=)m4XEr{TPq4_BwN+Wd*o=TE$93
z=1{w~7uY$z5bp%&fMnz(`aIbj;zxU9$E8E`c-R@ppTCDzPLRdM87(w_=tdrQVl9pM
z^_VWt-is>2BRJZ>4;g9$HvB$9HD5-Paaj$xoZCQy=G><@H^*VG^^;L@vm9#U7IFO3
zmBNG=J?w}|!0u2@je88SY>+cSi7LuA$#JieVLTwd7ze*6ZrFE%zu)MMN;M<+<Hrg7
zb7CMKiBsUrPF@sY^$8v;X^UCsGoZD2KF@iVD!Cm~!H?4lxOt^4YvimEcZb{3Z0jZr
z4szn)xHR-{aFY5qL)bRWo)(Rn$GxJX;qXOMeylZ27$1Ec-dq^K&(BVU`qEy^(TBk*
z^9n{B+Qg~nSFxFi9zKXrz<W<`a{QW5_&m2er>D-q*pN~P^6AB2-K%Jj;b6G&;xEvm
zBjUb?RrFZ8#|&+}NI$g&w3fz#lI)ed?~Jq-`$3+}7OLQx!;2`8w?g*|4s6)fRa)cO
z#-r-)!^ZFqiK|})Oia$<L4n$QWrTcf+3guPAV3y(8?44uu&2k9-D~ScdxOmad;A-o
z%=enS+|7G8vi+GeTo|tFu2YxD%^#iI_wSA8lUw)FZF@Ov&s##f+9GkY(ksEh`xK3v
z{F<`&s6w|xL-6j4Y$~5MoKpR==-1F7a!MY+(+!Y=jgC>#Gc&A@)Q3AtB~Y@?8CsXz
z5+~4F65}6$zNwP1M7JMTmssHEi@V6oJp>jVjKKQ2+c>c4J~*{afd_6^QSZWNp=TJd
zdqNXfefuQrj0@wUZFlL|fn08D>@R6Puth8heJac~XcL7YPUvd8p*k(RFF*U}2GN6L
zd3IAhEqi;Hwk=A8h3ke>3aPMiksQYlsS|Dvsu28A3gPTz8%_!d$LO&hxMIk6TB08d
z&5E+T{&yrV{<?t%1}3^ysV@<~4IarwRfBkZtUBJ_I~L2TkBVIiGsLiuYWOne5|mwe
zfmiqRX2bEbIbdRrkdZu|pH`fs?^k`%b5{!1EtrTApLB5G!-JAjvwp&;HGfI`oG4V>
zl;hDtIXoN@&ILou!Ew$&+I-fEHumf!j#z)6a@t>#{CP88+dm9F%HB}M=YE_RvPE=D
zN)&x%e1)#I=Y^#|^ij3S26I=8Wt|gILd^nio;=Bz3r^VK1H~;6SNn+)9UEw7vL5IU
zDMVpa9TliFQ=0a3JUr2xPsT6AS#}CstA1a2J}n6cri|pZ2jqE<jx0Hr-=Pp0V;VS8
z9$e?FB8RSf$aty>PK_&pwL$wJ=hk0g*eZLWVwtpGwDJw7cM-71jKjj2U%Tm->S|%l
zFekDawH0>Pc1Eq*`8aCQZgG8U1}wO-hH{ts!k>Fa;N<v|#5ENZlaNTSuJlC@`&N2-
zYXKO_yvDRSI^0}mgcoddv3|@p3bV{-nU!Nn>@pXx<b|`3=R;C28APW>c1GX+3A}hy
zcid)BOz$o@<HaotaC))}ce^nIW)!&L%wvnGyiN;F9xWq%xgMfH)ops*tBG_SH-UP1
zFKk)R3l*F4V9xt=emO7$GJ=QUB=i1wO1YF@EE~r^)DH<w<(Aa1yac*OkEAx|z7)D?
z0-g7Z!l&}txN?{QH0+5%*@7tH*0wA3FyR1XrH!VvYgdGiXB*jF?+|KRZ>HT&7hqsX
zcWfFoho4^ttlu^bp6a%WfqPHV=W(s{EV+~pth2_&(O!7wy(N!6v5tddrq{&%ZXvA~
zX_#?JnQFV&(nDJfPPSSH&7D&4-hzHuoSVo;+-A|hrVU^o8cP$BMY?fo2Jf&k;p>`P
zs%`eAW17A!3crg`X_*3#?`s9NS;l;QMmuJYjOV@c_K?`Q8^`-@g6s8rS#L}xjZgh8
zNoa}}m5ieCmeE`6Q@@6zZg=Bl&IMFAdI2u92_(1mGVZHKD%K7&>Lxf=^%D*A>QJ-1
zCy(~IClp4+bLd+YG#xur7_u=!+H*UNUuyQjJM^W#mN^`mXMkgk6!_?cE0A=klC)cQ
z(-gDEk}s2V`H=9Dp2!@AYem0F_0>6aN*gXYT{V(sXiVXn3&yy`Ko#dLdjreX$kS=V
zTlDpK2u`(nBbM%*$<O=M!+=v$x%}=g(2dgIiOT(8Xvsxde<%f>D83g)+&KvIHAnEy
zob&MD(*touR5xy1cUhcrcOi~jHc9ldX`~|+)s%Q7Nr?QEjYCJ>BAa=2v?8@HB-#5>
z>6vltZ2AEfgpLEnrTT2{b5GQEt%YCrE>o|JdQst;516@{i2Y^nK$oH-FzsQ$sU_>7
z)3kliH=n8aC&P2K9L~4xA<kRY3sVf-z)MB0My^XXU5Y}M?Aj=K`8$x`RLla8=e@9c
zR!@GcaEStCv*73cGLlhV30j#ctTj-=_r{y>gQ{uhIcGEaFY1Cey&ed?H3zX@$PIeg
z;~9)`_QJm{-yk!5r9>8O$-cilFFAQx@*?3c8MscA#%nQjUsQwsJ+sg<?Lp03*$WgZ
z`%9c0a~T|0WW$Il<znBaK(4j2z}N25KHB9DSoY~PZTL2WP2S8AL-tm~$L}UQMjAiq
zjyOY$%Xae6Z9Q?=qfQ*I+zKlkhP$Q4IbxL6den2b#7T+ntmj=$5z;<c?WIc)ck>uk
z9!e7yho+EfR425SB=CzHi|~0$5MI!9q{-8hvFU&l8a-9T=NY@X%aBH~x8Dlx{UU-p
z`)ovYX&p4Te_oAm!g}a`qzD@|a-nv0*P07`?$KLm9GKc3Tm5Tsrt}-VM=n1zc>J(h
zQgl4VHv0LT5_}X*Zl+Od-Y$ON7{SMEr&0E!PdIwSPW(jelIyu=d5BvRjp}TGy)OI}
zOrDm*>~F`xV8vze&VlpPRGNaFt_{a1{8i)r>o<C)N#oIuCxA~*(BKkV>~TMmm6fLO
z;*t^kyvGmttgsq}_teFf@l)A!8d7skiFj#ev0F-J7n~Fu&(;bngv;U~%ndorzx$O5
z^&bi_*f9!Zt{}J^ohHq}{?c*tLh1gng*La{Bh%!~RJhBKR;eaIuKNqveYF$YEwsVs
zV-=w)@;-!m_om5R%xYSV3Q&J~AFiLfj-P+2#J7D+QSPrBo;ak3?Ymvr<M9&K{+mIU
z-|C^Fo(#2ioZ<yyHO?+Q3h^8BNWU(JG{*mxK;Au;7MrL~_Id1^R>&a>UX!+IEWR+6
z)_PuwuzYe6efyG0-!~WWpX{;ttU!gI9*g3MX#@Gj*d>r~(4N+hpNu5~19@nx500I-
z2NYL@W2BjG?R*(8vJQ)8@3Q@vq@|6eYMb%U-wc-d;|I(8#){>S)wpVAys&uE457E5
zn){-&ci4@Rxnt-&=>0bpGxLi;ZqH(lmH!5<Mkl~J?I%u*8Hm64E~2;!yIS9dRQ{aW
znZqM;@a!mc^xY$Ea@lF~(SmB;)@&z)Akw?)XLQJ7E~ebgA(I{PxLEEJbU5dsT*(er
z4SWuBoD^!iC?3b(*S&c0%>ld<J;~&c37$AngN?f`(EC((I#)6bYb#WE%!p7D{W^*1
z`$zMlXQSDBg#+)LqXM$WqG+XFVNLzW!5r)S35>c{(+`bPV0Bju_nJ+^ZX;WThjugQ
z%zQH(_CSY+hFRgRy~eDlWy_~`*uu@JBF+7K22?KG5<5K_KqlG!`Ch_0+@0{92NZ7P
z;d4?^Bl9R7SU8?b&$eQKS`+EDUqIikoofx17DCEzW1d{Bg2nb9u~<(6^3H=<(yH!W
z-dMq5EaR?hn$G)Tc2S4TS@ylP4Y}P0lU81YmXfYw&Xq7YD~&7f&DDaf$)V(Fx2r}q
zTbbWIxk8Vh{1rYpe;1wg4p7`QBTBh?ooYSnNwWDex!3MNmy8TfzC02_w%-Bw?xm8C
z4=wp%|0XC1pF~GyjN!khK7+M#Dt7Cv&g0(83Awwfarl`$cD`eX)^>jcy`j}G?Z+F=
z|2mnEI~Uh@ch^IuwG;6Ej{vOOuEk29dZO7S4YBt1d6GT73l7R_vcuiwuy1Y#tnkYe
z8)sgG(NDr?;ksqQ!!|Y4Fpy!#vkEjSXEn%n4}|9nj+3*VHyh8ifKI(-ab`f7SpCQt
zy3Eetq$*oB4O77x?}wt{jR#`T+$mzevTj&>;W1grXYi3CJFI)Uoo7TT^5%ELA@Qvl
z_xZF+RKGMDR+?*~!I@ihbgVw#Z|uVoodODw=4cw%J!=*WapnyZhY5KpbI8rOmA<UG
zDmt!T57{r|F=6t3dVAcEN;gUCe;;Oxoi(46#Yx8Xe}Z{ou?F6M@{kTi1mZ1LXVqgx
zqW!pu@EBgh9Mu?ld@`5MsEuHg>SP@6;zCI}3aII!O!GS3fEjOZQf%icF1mh&eP^q<
zkL%vb(_BB|!Rom<K(e3D>RiVBw>|l0&vUf9ekKh&wVw?9b7@SwDUNvJij!<@`EThd
zYMcIz8vXNyZ<)RDXnQ7^zMT)DKK*Fp>n`Xi+lxcGAuptbVBI4FU5-2v4Gt@DcCaHZ
z4>V+<R1piy?N~X#5H73aa=zzJcyWYie?ljGe9w>dTxUpjrh8!J^p}F)LPN3t=PF^E
zxAgmJbK#05?Rb84FYf5q1anMxh>5U{rY82G{rx^l#GJ7t>++TqhRCwPl2{nIPn!?4
z-=OWCt)biNc3Ab|3&nU0z`aS1Z1yz?&s+Dw4Xy>^uOFUl;H%HQRrK-9%xJ#0@d4y}
zkA*>B5_tK;3%D%iQ;m%0HwskH;IALgk&MDnaP#TMkM2%@&Q=-Vn}19g_@%IFpr<@3
zC@rK3iRrN2(g?JZ*VC_xdE(bY7s+H%B(*PHL(`wFA`8z)bfiHY4PV}Y#p-tKf5DTZ
zM;7wbt?A@DA_F(%A@9j=z`QQC(!Tp#Shr#_PT7=#gQlbd58NyH^i!_3WUekX4O7G=
zlh)wffeP?Qqn54he1&6Hd8{9thSgt&@NG#F?7ZHBGm}qB`$iw=z?V$?nX>~9ExJR=
z8)n0g+)JW!h$00_eb=`=_SCGE0b4#42qV@ipwV_E)>pm)?G_jDZP%&nH>8Aj-%e)T
zM_0uhdk>Dtw}6sl1^0v@3bbg0GW@ulMoXLt6Wb4gY{6unaJ-lo4)({)92=;+-i?fh
zCSl^f6qXO!j8UvC?Q2)y;v0<|KIe>RnQe^k6OXXsx=Yy0t*U1GuSC}U@=J70{!Xv2
zYw=OtXo%WUK^{3(;B-S*u<v|Bl9h2<BHmXK@0&UE+8NKW>W>MxT^PeNr8<gr?}g6?
zr%>>dO1E#`z#o4<C-`=W#wuKZ**$%@w@IbM(YG3l=Rb$4iEp|0wq?*exRzI4Q*i(J
zNFEpIexyEQHMp=T2t$2VbAnwa?&~w16FCZX&gjwhwjVI~zhI_q^24O)Kou256~%v`
z1H;swc+<r2YC1UZR*l1QdDc^Ify=4;VOV4zPI{fmKKHhY>36bWTeBG$zVi?rA`(b>
znIZog_MF@eav&pZxLd~YQt%3MWt$@($!pCdNkVBn8VvEqZ-JrYG*F%&7~f&<oJ`KT
znan-E?SZ$}M<GYXR1n%{v$;_cEv~5$_6!|Kd(-=(g69JK<y0sdc&oAJSbM_3X(YE5
zG5D4%{+OuCU$=b~+y-d!{E`<i&-@ltbPE9ain%cJf(f@csPdb{o#4OW9YklV@m7Tn
z`e+=)*;l&o@2X|A_Q(N>{WXYZznRJnXC`sCw8K>NHW?0_PUhA3u9NDrQZ9U~%C?2F
zw78;8csu7fs1!dWnfLB2YZuP9M_+@Pe+QF+?j2FkJtX>ee=ST^_JmcV8>;cxb?_c_
zl=$})$}%c|{xfe2^1qV!dD%KP+ZoIU*X9Z)4>CbG(TRS3-pX(H%kr!y8!>t5d(isU
z7p=x*kip8S@K?nV75b#oywMtbNp3rTQ@lxPO9;&Jw&2YfOCe`SIN$j;nT1V<DQR(M
z*pc`H?%dbrBh^|w<=AbWF5iXd;y9SqyI$B>oFtlT`AWeFy|~b<o<fzhfpRAajlMb<
zx_qZFf7gEQNDO0#*Ot6_l`XbJwc&Tu4CDhHQdvUz5_RL&nyuu2D+$jWx(ciOQXn_+
z7Dp^B<jcuAq+I3>!&6>?tLAtLnWxTwCZ+RuyYbv!`$xQ&KN&7>w`2PWd0bd*O-?z>
zxpQa(1iTw6-8nB%?uty-pJc$*gAS7Y23bjZ`XpGo)E!QJu;9B#Ss1r21y)~QCiNtX
zA<8X482RHASs%Sk7jDnS_5q{un$ZceUbcca)aan!)gUnNs1rjkHA4FL2;#9@DPYtb
z64s7}&jC{OqwXp&b~53uPc(Vq_C11OM5|z7eud&EzZS0R%5z@yES_JG!M;%{*wI=D
zzpno#gD1MIt>M6p5nEvJqX}4dce7|X7RA8qo_zgd57PQNTin-ElT@yBLb+%0IHb2D
zq^e)wN7;oGzTN<b_6)?i{#u-M&Wn@s9Vq7f2^czTGHR;srxsm5-d?4|NtumgH}rA!
zhu$Kmp4-mT?XU5dg9=n#tHTfGmr~)oWIAD%&Y>?BQ1I$za(Sx@$7grK(`g1Y*sPXV
zuT`???j9jMat>av)e)}^vu7*Og|!<=5XA26*)2qD`lC#jlgnwp`*dEcpTMWAfS-(<
zL2X;kK-1Xk<an<yZ#D19aOa(yyZ3p?$MtnJ%^D#%??EEyPPE2hyRvb{ia8YSTmkF7
z^%1Jo(R`S(*w8A6cP_6LThj_@%uhwy?`g!j2i4KQ`>U{dS}rY;&WSm@Dc}5@aC7S|
ziK&bR5A=Tl`W7+L4oe0d^_OGqCG}#>)5&;bL>yL@N3iO`PNMmfVSMgsCAl7V<}EKq
za%}W=PCe2}^|L>dVx0}x8VbU*v?@xOXvwK7EqTeQTDswR8D4aoi{@HoxIe>;BfOu9
zH{Kax&2?qa_~ws`$K8i*E^?Thd0X)P0qi<)BByB^v9-!j9wb~Lo4e_B?~^M(Xqmxf
z4_8uD_!xFwP(e#?EAhqmpD5JUfd3w7ge%uS!^AfcSY!7}?67bGk5N5wZeI~(+Gb(d
zo(l2Xa8Go5y%9Fd7zX#Z^q_Us9wZYnn}bz!sjS=@M*Y}MZHH5M`MGMU(s5vk)<V8f
zenX0I?GUZsB-W$`kD?B#ejmk5#*h_0)coF-d|oF|PS0#=ZdVr+7cN12x6b&QoJIAC
z+T1YZDO^6c0W2;ap%s0HgHg6SeNgq_-_vdQiI%@KTRcG7r|$_i=7%ZkGxDr~A0+dO
z)!Fn_9FtBqXBpq266-P5kiDIX3!cN>w)2qYZANB2<?$gNflUiDacPzxhQ$ZrX9aV(
zf4>_m?5m}i$M<R0w4pq2ZzlUk_28>|cg0aB>S5z}4RLC~BJt|Sfo!Z9MRVnMQTmUm
zoFdhKlBZnl!u{Jh=4KbZtsEvy8>T|ZYnRfm=m>WF(uaqJzo&uYtLeQ`3heh@z>Urs
z;{GY^!pvhK*eAxG&5rKCVkxTdhCvLgJhbC1w@`jGXe6rt>Vv+0mxJB}dpa4B!MTlT
zurqc8UTrjht>bKQ`sF=5CNr7GAJ3!hI>R{5XdSGZSs)CH-9VZ1rqbGxw?*%G54Q1O
z*nV@6_;SC%@Z!6erLdQ-kNgN#I_mI6iuee3?I5i|CvZdVR&3j5jvJS}ht=}^(R$%z
z^geb$T)QHOu9Ovt{vCJVXWSdoc;5_mT@wkmzN5~fw4(bukH#ABfHK9Mg5B~va5AU_
z)OAd7YnM%2Q0>Y7<r~QK>=4jD;EO}APGjHTTn@N!fmb*vu<f`kvb9UZYdPm2c;kG!
zTiA(TCMEEzyJ>junJc{dz7otI3`DI(j_B-r3{H+E;XD=4k9X3jwF*flYd?)2lT3c;
zGW^wSKY5&(B`nwI0#7$t@!j~PEHNiOcf*B0)o5VKsjC3BjkH|flxx>q;={T{Fw_4X
zJp4TrI<*Gk+LbpUMsssbg3Tg{)%Za?cu@@`?mkAU(yX+oz>=@}cBfLup`?=@L<@gQ
zyI74!gyp;Cc))OPJ}@kkCyzP>rDta0?NMhy{ZcRLxjTm4=V)?-n+1ITFaclKuK~N?
zTd`$MCT%odM4q#!i+ROiIKAX6T#}Eciw=dfx;aNe|A-4z^4U;JgN>c1acF-_jvlv~
zo5R*|RntTUwN2EtcNoh&Tg?u$QaPj!neWX&i{t~sh&MrQCSC)Dtfo{5{Gm#^ae=J6
zI)D`x1o7awUE=vizMzqrFDccPqHJ0h;Eqp8BriV!rf~*7`+5j}gpI~|be(&4Ue4+r
zH%K>aAf4WEU#Rr2fJKerth91A7Yw-}QPIy6zqstjo{~T~vb&kGZqB8@1{S>jg^9S<
zJ%q(K6|jHEMGCyE&d-MXlcAqEPo1$BtUk^M!-F%)LW(ff5M`wMWOponqa_9N7mG<H
z)_gXmgtu-EXKRfOLgDlX7G>AK=By5A82=acZ}*|#?(8<}<T+?~oPsmI_66g&HRR-+
zjvhf?f`f7*{Ft->=iClqt9yH-=r$w1UXv+gDBlvg`>$uk$)@~1;4R@BYi`yXDn*FZ
z(96*WIsV-~I`dMGx712A)!Za1JA4bQlNF&n&VugFwxyGA^_UhRi2Xddm-ac@g7?JQ
zN)0jjwuJ9$DMH%aDc}@46QTpFAkAh2Rd)1(@5<|hQ)i{1{Ws+>OKT(e_-%q?X~VI-
ziwbTw-b)LTL^94kASSA$fc2DgA@am1-cfm;PwgHL6Cdm2!T3b+>N$6E?(4z*Z<|o>
zx;ly&u!tZ2c}yC+JILskH{W&~%O_-yQ}?Q8)SfhgpB#Hh(+w9=O5ckRaA;`FP50AK
zI3t^rqq4c+!#yZ_X@lx2rc$)nC5q6SNrz+mvP?=VTsb4p#wHQGTsTXM=?XP}ZYJCL
z)wI|DBAwkQ%ioG~=yIAWt-I^b2li<2e480u*Q1Q07oHK8Ra@ZqeLeYvPBhQbap!Xn
zOQ7!YZ;A4v!CY~_E4k!$;nLR<>8TKXxX`keC$6yOm0y8_*3IFvHO_R-I*1KSR#8LU
z9dY;9DmwCJt*G#FJSQmIvGXfmN?mAy)0f_)0#V>`k;SyJ@T_p><xcGWW}i5(L7&D5
ziDL2nHIVS~3VJtt;@N5=uJ~R=iYo_W*}X)(v#&~$>Y7MBcAOM<(MzGF*8`Hz+(A`s
zt&*U)FXE_pe^isb25q6sKzMzdnsf`G@5^+zfmc1zHDEq$?;FDVFSODyUn3lA+>K7R
zO`?%WGssQS7uTvd;N!mQNiEBSqrJ|-%L9kVu*eX+D!<dF-2?dYWCLop{Z8sWDOA%A
z;{9mkNoO4~H9Z1{&wCC^@?|jh%2&GaNs0*BJs#!Ur@-alB>eHrg?9BQpyl!(L`#)3
zn2GNpza~M@$s5Qc&DM%bG;YC5|2D~?5wWyqYi><IgC-wqNT(Y6p&aJtNnNUD2q9VH
z$TaU9gjT!Z#n{#8d_x}_>|5xz>4lmp?vaABUb`T(zb|cf)Z?vU8q3A_@#*k+Y^LFf
zN>wUoe#D4Vw@u~NfQ_7dXFat=hR_iIwGe4&i>~dxFs{;-4u)8>;M9{PTMyB#jhf<;
zQz_(gtDWNH^H_4G3l~dr=&0{TYTDuf;dN_a-ANSotUm8{(mn&%Pq>aXZ%VLx+&E}|
zSxxCHs%gRNA_{5VPTOx7v5m_b`k5_7wi&GzCeO(bP9)u+hS`QJb3l!6>rNFlY+J;;
z-@e27za#klgd1Y2;#J|#r3JL3V}|6a?<pF8Ly?n<7I5*S`*fyr2CELup&t_-3Q8s$
zadnTys5J8h6xN@H<$ifJ4e{CFD0~#UeNm#Yab<L~YY!UPLl<Va$C3Y^gD_}iHXS@^
z2wltCpt;Y68kvG1lI@a4<+r+1m|Cv5_0l9BmYK$}m*?~2shz29@p+izBf*#5EO4;n
zGK?SW2w(L_VE;QCVd#c%Tz@~7JnpR)Pr28V>b5gv)#a?jxj=?ryVk?SSr+J+c$)qe
zr?LGLf3At0Dwud1!-v*dvQh5I7yIQ>llwio-=;#!rrqHCgA+7t?S5MDP=+to8?s^K
zQ^~>bV7Pm;RM5ib@FYNr4C`Y6*L>3;^W;Op*kPKW*yjnI{xg^Ro|eFq^nLK_?hL*s
zbCtH3w~*Y7^HiGnOWd<(5Sl%7f>W#WMZa`4cJ#34<qEDC9P$8qHzkTPQ`C7@_fU?#
z`;mf8r7}o!4b$dWax7x1xo*Y#S7lSspBdm6zYnV0Wck(a0w{JXMk7^oEGV_4ldUpL
zuBll2c{lcsX$N`Ugjv_GqSv)Viomnrd-@9HXys9lgX;v1Uq7hyOBS5_Rm7QHo=K}9
z<9J7cBMnRl;4lYYK7V#5kDtAl*WLnB`f1D_kDU1Z$V!s$*vmKH-G)}fjd<>w2%d8H
z$<L{sqKuzG&<YFeUp55guVsq4+SOF4q#>kuA0^w_)A0JOUHJY}XAU~J2sUlA5OQbF
zfFJVVc%ZO1R1Z(CF?urGO=)QkYR7yQHjPrJ!}9~U#$ggKZ@S8ha0HLqIEm%emDuw0
zboPx*;PAi<de<(`Zs(r~-qPw$r^*s>>B@uT+tUWS%BbUMlVFh6h52lgBdhlujV@A@
z^wFm#d~9?AudQ^3oBr0Aw6PF%lHG(ogW{lQp%MGaJraJ~0R_f25I9zW-m#zL^k4>!
z^u9=AmOK|XHP%z$y4N)-&Ms2)P7?X1JrskEWwQHH7tWQQ7WDnHOvsU+%GI^&#l_*>
zd2)gcPpFmWkJ*{5plL;+VP^Po`ZJjGxd;X%4(CH1$GH5II*y(kiid)~35VCai9sJ`
zahxPU95%QwwoUDh+0Wv^a6k{~{Qg3CfhHVZa!njkeO45=x{CWhTf*eB4vF3MJMgU4
zhIJks<auXzkxH+0vY&IRM$6oWCqH7YO%7#W<)5HfmJ2&N-cj7F6#nS-l(w~&p^M@f
zc<92QIlrPh?ScSJ(GR64xGZY!k_)rWm_pvtBymt>50c$%!q!C{a6CT*n}(>06|O15
z^v}v@W|{$8rKtXkT3(>{zzkc)*y6YzA4oY?i_;cnQ)A+9$XR-cRQ#1`k#R1C3?EH>
zLnrdc6+L<S`F4;Tw{|VL*A?GxpDP-jD*$~tCDHuGX-auhCbs_i4Ym^%F+OBF+?Q0s
zq|%8XS1<yMC4sb~%mOqv$AMr{1XF59;ey`+cG{glfeRLkkB(o2CB}YO@#-`UmbKu+
zedcn>($#e2=2q%@XDdAX-XESs$5FcVBu+Q%&Sg#`VPMl#VZfEnd}*!>4x3T~wD<tI
z#B8C(IV|Wt&Bbpk^6>7B3uNA-fqX`K;klK|#XW6V@WI`ge~k_ow*T%Xp1j+{;l|bM
zD0e{!*xOF~*AC~%?TdMn>?R(Ra)zS+Z028fA$+x8GOf#~C8s(?Uf*pynHsMYO{S@E
z(3eY)x?A8RH>qA+G|=8!iNltZ3hy$92>Z{Z3s;x_rm<-bY<c}IG(sTX8PvOGs812C
z3QrWLdxYY*eIt0t6kD!0Pa*{;Id+<;j8~lv*stw2ot7eb8_w3j&%uvzP4pYO@qP$j
zTxQJyeH1z2Of*@U2XODJTiErHCa-J>5WePH;?D4y@Z5hA3#QWC(a)RY^-^HMq6Trr
zrVu{$U72HDgYiwtW?Wu&k>}6d>E^iH0)IDj!^PVDIBrcipFQ-G)<~gwX1~fQ=S3_p
z=o`dcp^duw6p~f)FJ8Vhg<r&-goDEtk(EwARuWFrrGN$K@&2>W`bCA8_HCygF^MoJ
z+Z$hcZxGhZeolYppP<x}3u|U~vY{xkRVeY#pwH6NK5AVz!0`ulu5Y*#1`pm%dB=R<
z*U`<iYlkjuKAQpW#T~-kisv=cy7#Pk*R3lSY&=1KKV*sH9}cd0zM&V?#HpZyhZjFP
z*&V|3w?gqMQ#!KRA8R&-QtK~e4!dv1-+D<APN)N+Cd&AzeJ02**XQQq0GgoPC^#pa
z29u&AkkBr0-o9dvT+~mB4qJsCKcB$HX+No3VmcRHnoeiE4`6^yKhgc78oS!pit^!R
zyva}nd6V>8?b;O%%U0tZzXLVz{#H`vJzG{faUIpP_JhNVBQ-fM`tkm+PW);>A`g%D
z<OA9hdC#TqLPqZbUZ<$fU-WwO>pfNC`6@F$xa$xPyO<{UHYVb%=9@T0&WY6@^x|j@
z18Jkv7u-+x1?XDL#lkM}_@zTI>$;0@v(AAUcH9s?KNG>`ikj$>S0f%$dPI$DhQRKI
zL$If31(kfhMsKf1bByH@A^zr8eEe1!%<62p$EW+SAW$2-^fbeDI#=kD$xJK^*#-gr
zU7+hU9WlLrDVnGD2ZJZd7!>)AzK-_*Dw5$&8x{HSmSo&=S`YU>3=`h=b>%5;zi9T;
zN$9m!5qCUa!S&a+lkvgZu&()#^mL#R9;tNU8*bZRQ-dYF&fAHRBNp%ksgIWQEfp<K
z%40`lhh&&$7#SVtMLt9Gga=2qLgjmX%AW1bg&tXK?GgnGcI=Xz?0k-{gbkOTpe~?p
zZPUO}Lyry*O5!vNZ5(%XHCUePL?gdXly>!fc>1#bD4*E#|7q;X<GJYCe#n}A3sK3E
zq{x!xH|N+YTPh_&DD5hRR+a1ti9{jUDtjqZ{N}nyX<tPtY2Rp*6pDJMXM6AadGF`>
zynoDR{+R1rb7s!WnK{?>{a#;|wVr}K-y)d<d!*sMN(R|?eIy+wx`KA;H;|^<YmD>$
zd(6Ec8*==dDYJ6TTUcf#OlJ!8Ft2@!Nb3$$%x;gu9r8EGEZsNYYqkm%yrNNRxHA<P
zeUR?m#xeeUCS!&V;bNd6<obpZ5*73vYDaXFO6f2P_bceOv)b(IvbiWE^$e5+quCi)
z36?60(CYP}y21??L2T1LygwrnH_9Kti^(onIBEx;`VfPqLJ#0W@F@J)CW>o@PU7;m
zYK*CT#LgA51o}gq)=Z8g<&NFt`+kPz2D#A|_c7?XR0<o53FIVlj5y*<%qLi4r}P%E
zPYFby_!r=^HUanVE`_$1sd%hCiHrYFLEEa;;Ql-cuP>}bA+b{2bU+mCz0~mVeIC3x
zV1YThdF*E?BoQHpnS$4zXf&PAOm`Ir6%Hf)b!#$&aT%<6%EPJfyGGJHYZ8A-vNRgZ
zy98e^s8Dh~mrM$X0;8+LK!~_v!H;m_x?KZ1=gZLQWO*X4l8Q<9^T?8=BBZoEld(-7
z1R1@9FzJ{+Z7(#VYZcU}n}<1;7-f)(P&vACn-;5c+8ak_I)Ye<Ej0@hs(Vqs1?x4H
z&~Tv`JU-q=rpD%DYM~m+3e4kOK01;@)G{2sCla!2y>N@mWU}vN9Z|9~z&TxS`207S
zc-J=s{d{b3ox%=?*bz$>2jya}$wXWdvJ5BgZh$4L&w<0>JZimA%=+_<W>Pawnk{;|
zgzoTaqLMo!Ny%4h5;S`wS{16$__!Dn7DK24H?Le1+{5bO12{iRiLU>eM%N5HA=(9x
zSysQC9SDwxh^`=NIjBYjU)rF1QZifpu8s8BC*TUxqp;evjaVI;3a27tQRAi+-aq$*
zut_?Yveb+m<<Kx=H7}4DEf;pm;B(mK8U|;RvzgwQ3qUp?qDmiG=&wo34pJ(9e;)d0
z&%~r{v#GE~J4xYU-d4N>Ov&M)*r{xQ_`AS1W#WQ@t+;rX1`eEDh@Kgv8I>w~QhrCB
z)%i9O^Ky*w@kKYluU*V-nNjS=A10V(e-_rBx(0CY2rH-`4aFCI`6p%V(NX9#?~>4b
zlE1-*W)?P3$;l$LR%;CnNll`GveRhE=R@%GXE7Vq*-V~oZs%ulRTi#GrqKi$Wyld)
zPA4{6(YI%{Fi|y*ytlgt$}tpYC#FM*w;>$KD<i_kk78|96FHe6jEg=^CPmj}@p{=J
zX!slti3P?Wq0O+B>{ED|QN%v`b*OHU+*Q_1FcU78=TViWW_I(aDRj5iB)Z$!pX_vA
zM-yV^5;al;_mlGZN!MCIgj?_PVuW#=$yG*90m-SHGq5Ak53khpfR1Guc`;lOr2{ij
zbbT-gsJl~3X#qBH?r54MC(bO?Na3E*cF;ZK0E~YuD>LOCasO&VMpT~yzS0hSBb<++
zonGj5H=Z4Sp%oVkmf|p}9^5zV8Lm9EAA9sF@hoBS%Ce(4-M1NZMtmmUOkMCmU@gRt
zTg7&z9D|M}a_~v#Jh%iehhh3BiH?{oI%&4R9sg03s_4>1=64~_q!K!XUGT2!RaW>*
z9K14fC83gcNwfJFdX{I8W18>6V7@Xv^!+ON>ekAPv)O<<L#o-nb;+3EsfN4Pji;@{
zYl*JPVzM>U3)QZhkUeYssm~J`x=6`}r|;DUtjloP6Jg58zuQQSbV@OJ$ysnasf44A
z{IP6L22;T0J)$%C_@THJ9o;`did`A&H|q=eP;Ekw*^MRV)GcuYdy%);f{T48^^nsc
zcj=h*Pk^>J6Uk4S$gYy33RwrK<*BpGQ5ki7-IK@9v=^qWPhQiveQBg*r!bO8ar9oj
zgjZtkPS<`@VxB78CSv<j*<-QIP&02AB)Xr3*nt5qvpk(DjBH^CZfCGgXE>zHsB$QF
z|IBJJ4k$Nn2wX4M;-_0(Q0Q|9RK*eYPE@7CtkhA<V<q<KDC1`hRrC$JPP(_JGoqR6
zY0K0OlC((@j&GO`p8NK|uptS$t4xnZZJL3Og`Y`yM-^NNw<aHhIW$~$AB1=1GcAYo
zaMf5LQk}35kBCQOz^!A9hs-<J);tf)oaW=ryE9?0`ED@c#UVdUn=F$t$2y@45ZQSV
z%&*44WPv%P^3YLIkuQs3bR<fU2gKps2RL!Q5t8E)u=w^Tc89JJ_4;xe-hBB8kAsyl
zaO4iQ?jh1bmj|GZ>2T_XFDvYDp7Gl&P3;$-CcJH){1=&r(do=AeC{EDq0767TT%}*
zXyy&eC6n>!T1ObrsRs4j`_TR*9U_l<U_*f_I^D2l@{L8YgfD|ndSvjY<p&~nvY+ie
zX@NV(+<{+((d4u-rN&NkP+@2ZCOxRe#X4qqTP}=FdwUVQKlrfUd*;LEm`EVD>Gb8d
zQLs`?i(1*}Gwo_AWSrp<x_;OlqWo|_l}J{i#oV#;J$$Ni(1xl`P9e|EULrHsmqYTc
zRHp4fBN3Ne3b3V)w15~nyxN3}a<zxB5q|W;&TTB6u7I;mG8u>6{iJ1%F&Vi!hMyE1
z!9P7<jjaiXi1uuMoT~f_vV%=%hnfW{L<&Rt>7#hg$qOxv^-)wbj%Sy;g~aORk;kGV
z!98yU8>QR<tCv0^s(q@I%lzUolMB!uz86*rJ3^a=Ach!RWr{C8W_-i@*oq=gY_Du%
z6t9WX=@;IUHZI=kcuR$J=<KFuwztW9_0hB>p_B&d9%7#C-vhqQr@`|Ox8^Q)fhUka
zTsefwckd$p%a@T%f}1HgE^Q(EI@jRNFPGqq#z8oICWPczT2S4&MeL#Q`DEGf91<at
z4J~#*$Sog+L^Zx5hmPve=bL8nzs)-V4#|eN__ZZAo|}oyN1w5IYWE=AP!qeh=0U`;
zg^+8J!srKDz_;4ltiif%khJzVQyv~i7HSG%qTyjef+kSaFN(zb#A^IJSB_?tucD2U
z3hAU2YrJZE8FybgSX(IT3QiN=!S23(aC$XF1YYIvmWYglj2Dxk)GCFX+hoGlFzbk5
zPZOIyx{BBueq@9$*Awv{$6)`Ai%i8yA6O)o3cn0P!7gbtt~8XwidT{NJgo&{0&0nv
z!Bc+gkTote4nW(Vd>DKh#7~uS#TS!?<52rKfSU2VE>#WY+HfZ#G&+{rgbyRP@)F>N
z{%Q!=YDdi%x-q$C;@I}Z6?dmUgH8`ElvZ?w{Ky^n&}2Mz&=7LTVj47A<+387lh8J-
zmUr?)7A(;*z@-t@FeIvv+g?6{pb4X3tepWJ|7!uw8nc!xSSAhjMa?ko{zJ0t2bVV&
zodyTa+T&s0Zjkw6j{_#c_|9Gjcdbmqk()Z9=8Q7G>9ROnjy7NgeRXilYjN;B?8JCP
z2omwkH{_u2LD-`s3*{G|vp0uJ(s%h${HhaQq3g~pp2kQQ;>$>a(bj129=ry}NCE8I
zaUG^+*0JCBh>#Yy9PrADB!U$Hb=K>!LCzHChV;XInI2;5f73$7WD?%YlSe1J({R_b
z9<&2$LAN0j+729n#I6f$claB2^W<FS_8NN{JK2riSejuKzxfkXObMnY9s%@GKo!|}
zI}%fE3!&U^BZdq~(^CIHS{`bK!#$(9>P|)|&^QBxYlori=p2ywu^TE(2B5iUGptyb
zNkqKMz(_(HcQF^qT-$rhpxy^|DDgE(G_7D<LV{`2E-4ymV}uJ`qiJ`iDm6c&f#;Qq
z`ODkxL3+Ia>2w!DkDO>`(ZF@+m@%IGcypW_TM>z0gR+>z>g)MknI-Hz6*b6Z&yZq`
zZ?K_15pE`DL$~llUW4~9a?4Es=BFoP%jarxV^}@W-an7jHo4J*&-YQkR6Vk-XcBr|
zs9`=eO2N~Gxg_228{_z;8@8$o<H@fwMEzhmiRl?eSNOERgZ-hX`AdY6N!viS5GUF;
zcLP(<I2S&D6J>UmTwtQdYT*0kmH6y|F@AJ81sSbH5P9PUqjlW{C&}jG1HU-jX)zzC
z9uUBt-y7Kd`^I1cUl|{sy#tlux5-*VEz<l;kD8<ez&Kw`IJWHtX<N3AJTtgT*UBl8
zu~D}f`FSFA@x?pT=Vdot(f^g-uk?gD=XH@es@cv~?G(i9^Bp{g!t*5c+ZNK;atcl=
zykZusK7nliqeNiHhjHcN^|uAK)15{2XQp<R!^b`^uvr@p)fROmzDtF<m@<hZt<uJG
z>kQbb<{r><d<w1D>Po^DZo+Jhnd}HtCo=xkXgt1j39;C4g(!{{#YnAtOoVYUvvn|o
zY%?At&p+zI{E36)&e0S`%dr7I$QY4VE7i!{84scD%z9S5sEhfLak$QS-w5Qw{@Cat
zL#2cE!D*{$bne0h)Hvr0fBl3i;{8yDI4dSGUI$k)mb3N9y5bNJ&GKby1g=BxL+%64
z^MTUqo}_}S!W5M@0KJ-mxTQ}3XFPpOHq_LTw<Fhsb`BROa%qL5!d`Uv%?0#%X%f*=
zjl#?Cwecly4--FRNV?8+5Sd%L*cY@DrMDij+V_qRYZs+LLF6qM;CtaU=hbA&*(J=m
z9?!bcdm@;f=Zl$hA2NAswZL(`2Nov$fL|iFnMbaph~LR>VqNtW=5ZCnnzvtqOG9($
z?Dw|Rz+NBr-n1qcmM!9tfmx6zUI;^L9z#lwFo&Bt#CmU=k1r(z@Vc!#I1q6TPY{h4
z2W{bd{AjW#UkCSoutmk?q39shK<053aMr~d(UNy+^k!{5;Ta|~I_tx6#NIvR=7SQV
zd|ru56syo>wT2kwcMbw?x$+J_vLO>TsE{?=PD7Ql2yxjy1vC18f#I94gsfIz3r+#g
zy?7oTFHRvpy)*d!+sgs9rjar0y%@`GJ+ewJ7h*4%5%1?Yq$Mtr|6r*(V-c{1^897V
zv=Q<+<Ioj;)aK_zs6dPQn^(b0K^I(S>jEKuO?9ugYJln6u~4!!g*o6npEq098jEhk
zP>+CbytGB39L|M1Z-Emz;ND5J#Z>XQKpHl12)gd%`>e#=<>+oFjUoE;uzHa+CN3=m
z6?;3Zc_NPYMkrzasul3KKpFcMO(VC$gGhN<IXfXQnh6i1wApeY8E=!oED3l7B2A-s
znGX7>Bl3ccSegcKUI{HuxnNJKCbQh>7dyA5931Rc!F;U&M)sL9`YsG(y#}U`{2y0g
zSjlcOF;o(39P&VG<5n`fyNB^kKT4eLE~2G#e(}b9HAQieQEc{9A67yp2o8t&V`;V-
zB##;a6~Sh>+a{hD{4o;R4bPKtk{duVjzgq(34wd!UUvIve`fMJJMzNE9HcVDF-OJ_
z?3+d5Y_J|I_UvX}rEa3p%VuFva16h@Rf8@$JQYh7Z1{2$H?n(|&!CnW(qyM*98A^m
zAa^?>IQHBaDl+yZSh;aH6E418T9yF2zXae%uY*u8_nk5CnT^h$mf)E~@8NUp69}Ga
zPE(a<P=U@2ViK~P=6c7#n-#a2O2uR3<JuT{eU}71A0^0oW;!w=TW%3a%k`9JeF+|R
z#Gy?(hl2>+PR1@g4+h_DaKYgZaBlf1Y`nmIlLF2_dfI+Q`_X0gXVp!ZR^fpWa`hz0
zZVY&@a7P<`M{XTziYEHqAo4+nJ}ooHew`)gxjc}3#C`Chy@jVF<VI6dWv$oEok@4a
zi{kA4`{{&_yQybM9-UqNku>~hA!3kEwpzu~Eq*$*rpgiWpYTCz{0d+;|AYx^<M2|G
z3`!b^k-J;NKqy6%sh=1^;{#mqq~~sOWwQ`@G<cL2NvYA^l_{*~#+g+4g$XLI8cRn$
z`v53oh~*RfdE!go!d-m<(zQkmzjk<F)?s0$N+N@uf0)ApT~J2Ns%0GVd4RO7X&|HP
zN0ChhY0#^XjpsBQh&{*3FI4Y^$)D%o9<v)@Jg1bbOGw9ErpNH<3rSp+beLQWSxFv+
zO(*NW>0`Z@0oE2r<0iXmW<k|il5yZ7i4?6R5<bIG>0|=0Ls=2@cP21A4MS8?5yBCk
zn!Hs)La6<E7ygu3g@YlpVD$nAaxC8wLYu{?YG4uBwIvPi-Uc$MV*y!k^A73!Foj9_
z@(GfkyD?^;#Hsd%_k>)q!mqZO&^4$)&jd~*oiD23o|+DJ%g8}y-BaGUN;mW~tbp*v
z=h(3k^|12J9uU@(!4TPdpy&LNv}je3MvqWtgQ6IoiO~b`Gvf5J)B~nOXCf23e}IHI
z4iKT^2{1Ts26?%pi?rX&Aj%a^#KWo-EO&N+NADcAu5SQvel^&?m_YmV1khou2ECqo
z8*HDZ;>&I)v^uOv{q}tVoB89oyiPFkX2|1>)?#8e=^9VIbBNi;v4LLHIbhZjCD^!E
z6Af&g@m}c>v=<6R1*z{$|C3yF)-Oe+#iud$X*xM^a}F&WZ-5)P|3U8yzPO}&KW!az
zosRzA4+)AUr1xYIRWqrjD(aC~(*BUB=|5%5gPriX`Ye=ijKXR9m&n5tYpB72G%~zy
zh%C(iLQL#6>E;oAOwEyMc=&KKy&gV}ro{;$`^X*jmb$`ErxqeTMHcVRR-!N1e5lYx
zSaf%Yo%)u;uV#7Cc9~6Nb5{XToH+wpbKkSQa-$(V_9~kr_#PhHX%T!N2*>+E@x&1+
z=vBT1x=-VYqjn;@Ytl`!yXPM8bhtS(Mh*w~d$7MSkIwL#2+7C2F|bY#<X#L%x#Uf-
zM{znCG3_)tbK@bYjSZr249DYn<u#bAHX0Y_9D>F6nfywD0J59OgvSeSu&EA{sJLnw
zG1@bgT7B~&u|oaKa%lys?sAPddF-O2^2Ldng9NkGBbg3Y^rL<<SunjvoX)&bK>F8s
z(&m|qDe1oqKdmC6mp_h;l(fXN>5j;1Gw8YgDcF6;W~9E!(NFgJbVafPIPbp$PA;K9
z!3>%OftRpp7_h3+h%-F!(~Lw^)Ud)EB26&SM4$L>PPTLvuZ7x+r=js!1$ilWnmtG_
z!jM%r&!NeXnC_Fsy9wrazta|7xr$L~q5VWDmSc;)IRuX#_Ttn9O^|qLD}8RnqtkC@
zvR}tVpu>$I*1f))JZsOu2*EU#NwkG9RXv=xEgl@=(#g&^3jL-}V4r#v?^Drn@*>=T
zNNCzn!}^)HSWX2k+^(?uFMHxW?jA1;^e37%FJRs~Q`#S&1y0YVlj_Gl*fsGgT)RW4
zfDpkeW+iZ^>>+#Y)+2cN<0c%SUPSkjCTf3(g2CP^#OX^f^I+U1E9Z@|R6%z+xi%^Y
z>w*vSbbc9Q?l&iD9GFP5R>fM4aNkM3I1hnQz5;4js^I-V235Gqa;?j((QmIYiZ0&8
zdwWJ2lT7?zm@l7cyyy>mcbhU>dA~rfUI!ADz3C*UD`Zi38Mz@7!L4!b!-F9!E5%p)
zsn@D{a_biZax=;qzlK7NC0<X(CwdU$r3R=SSwu=cy(Kq}sMFx~^)xNbnyyKar1s<k
z7=I8z_fNh!(s?|FMFgV4EqmNVF2g=0eMoyf7q+Q<WLJn>g|PTqkgj*eOBuVMO5!~n
zJXb`XwW$%^)~%TObqrRw$brnKUFfLV%1$&CBhj~%P_A+e3F;`o>9w+S>kU7eyrZ6d
zveg;YM1?VMlM!9fIfXtce#nlj*#O1CYILo)8NKYX0u=Nda82TC(0Fs3jP;Br7<`6N
z@o<3N+}-fRS{p3{Gbs&iAoq8<b9jfjC^aGhienn6Wn3!RKIbfSpREC9uHsMoj9j{9
zloqIR4C&O8YS48kCXLQ!bjWTWn>7@RiGI3ZIYpX!zm5R8#C0fd%;jP<;$UV)31n)_
zV@9w1!4!?RK*wP_nWm|(Wb1_vXgXtsWsEaO9qA?f=V55YRRb-$6OEQ4f|w)sl-%cX
zBJIO@==kb8=*fSF_LufpmUjq?gZQv%{u<B<6=q}%dLi{zG4^k=C0_#~Nqx8*H$REv
z8=F#SX8MTT-P@$`_y*K_ei=U42mnpUrj7}PWPJ2SESfbHPwA$zy;~w_s>Kc~t+{)#
zC#npNWn|E@<PavQ>;bgubMud7EL*003bQV@Vqd5vj2ZKdy%MN{<|9(@gwRO*wm*#2
z#lPo01TQp}T>x(<i<12AIYh<lG<ek=f;+q9QI@M*IN5s~v8)k7qiu)TdmhygYa&c0
z7ruhD=*7(5sRX^$1M%DFd^B2JfD0yEf%J}P%(HowaQeYcoM?WReOa%GF7o4<-mC~F
z-P)K)m9A!IH{4~C(`Bf-lra`8kYVMQy@YM}mb7te!)PAI*xnll?y9dq_VF!vmL-qn
zC%?g$tW_v$wHfnss$fZ(IGUbr<gHH5L3MX;wx(5>^cI|B!*ni@zQ=CZKn#i5BzYPu
z&5{6nYb>3mOsjY4)5Rb{)oV4GjiowR8&F3(qvud7#q%VbS<085qKpD{-*~!Qo;Gs%
z2GYDNiLCydgPU{1vFBkB(>fqZUw?9<H~l-%UM-ot4~m6T!LoR%zX2x8-+>~hM!w*l
zS9Oi==c9FFBW&9@A6NFwC5P2zxxXcUM#Z)h?EY98UB6a@qlO6lgB+44qTDIRb{q&n
zEjz*bK8qL_Yc59{5=Yi&FZhCvn$8@BaGG@|SDs__<sayAYY7Qj8)dB`AZ>k5^EsCK
zYM}&Q+S(;;r1gTg8R$KrUf*A-L9&BaP|F+sczbRydV8m1xsMj)xB1iYUcYG6yEZEM
za0c<SxCM$_3BMYh6n<l`h;>!sO=`1I4c|;FLcJT~aMsN;bmUc}cJt)w^R_qPw2mp5
z8?}lm`jyhwuZ>tW7>Fl|cY$3>5c}&%Doqk?WM7NiBWv3u=@_e8`tG@r_2jTJIvCMP
z8L=k1zPOlnwr!!}`rSA`B^!KxD&VrZDY*XRb}SUwT3=;sUO)S?c)imrjr#T3uTZzq
z1#>G(aYEfGT#=xJ3D!}dV{rhN8gmr<k3|UP1u$@72{$OcIMn2c`wLfte%W}M{YZz-
zOu3A{9MDcQbdVe=n?z2<uA(7_&yi!>kV+RVqH80~SrTVS3o46fTvHXDqV|#TryevU
z<T#bA^g-G?0CNP@`4jb4po5qWUiUjq%wD-sR+R%{%dOyoVKVjcXE%YGb~Ei?xRzZo
z;R|*zldbP6dk^;`<>=|kJ=A*pWOB-quu>hPnYqH-=@h-O)&X6Ut+%ql^c@$#g3eOh
za@ig`#`=)Kep#Ag$D^{dCF@<!^6F0=RIV4Qy@29#B&?IoPtqKNNEBIn5_P|O;)HdJ
z>51cd);}-Q(d4k<6c;^(*vd_0uS_=GQXWDpO}pu*V@s*S(aU71m5jCWejhAx@4}aF
z7vTG}Iy!yP0QKPC##IgND6oA8CIz?91=*QYAh-)}XJw<;qBCI1(a1WyyU55;Bh-<p
z#^8!k*dKF>#@kY=I4PB+Efc4VJ^|ei)vTj|DE2q;iHH7LTG>-U&k0VT!Ho^{V~qp-
z?we1?8Od3DmVbt@6OJ^M6{SP<_T<h3F*>mEEt9-M8V9}l;N^5@e4IY4KA0~LE&+C^
z_WB{lUrj~TcXnv^2ZadzsegNQ1UOdJ*fMTXs~PhcHy`Jd5~&0>-5?fRUW;IjT@iCc
ztRMc+ZmPZ~$Hx*7``yk)KBO29etQfj*2tpe6*u%)XO7RZ7ozcSaTH$Th`I}Q<5JTY
zJl&TK^+&y!#Ya;4oeNq}RLL9*=a2r=%WR=PNlZ{y)JjMoLeTF2Q6^ts_wVKrf)aNB
zO;<yoa=rasSzfQV&K#z={o)00NhIrPzB0Rl1u&AELf3=@z=;{oAaSysG%fRoKM~MO
zy~qAjUqfD5LQzaWURFX-OyD1zI=5H%*8YQd1*g36FDtQsSP64`Vb0J0-u<ca@)F!#
zl$Yd={L@OnTj!6e3Fd!r^!t>*&HnC4^N(g0fAI9Lo_QPnzk~)PB*er-<%e^Q1-*6u
zRKwro-%kG9=JzJyJ>kELnEcV@=s&qMm6w)~<SzN2mn#u4?(d2Ze-sM%cbh=H|D;zS
zBO%2(6PFMY;~oI6V}VBhNm=9%m-2hqexLtu!xlK<|7wW*@7Vnx)4z*}{7baqKYBIz
zD?N|Du$JcfAssm7KPqVaTa({o`}ZdLe<76d=YIdm_TMEm{-fVXf2mmU7v_I^7y=Fd
gyUv9CKL_ad%kq1W0=51^IO5;O+itkTFj=wx0j{Ws*Z=?k

diff --git a/src/finn/data/onnx/mnist-conv/test_data_set_0/input_0.pb b/src/finn/data/onnx/mnist-conv/test_data_set_0/input_0.pb
deleted file mode 100644
index f0072d51a480af615e92312608f75993be9f1136..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 3149
zcmeH|xr<am5QpDp6Nqdi1|l0Mg3%zJf!eMIDhi7zsL|pPh}U}G`Y<xwNW^GDG4c;E
zm`IGo$UqDRLjz+Y@%!d=Iqf*JtZO7#@SCo#uBu;Gb@!XrL~Ek8s<moz;$U}WwJ51~
zR#i#^*IGT#+q(w#S#*7sXNA47EdDnYvAxv9nRhDe+CZJ>30*8j`uQVoj3z4dvKII8
zBCpkHDU3}_^vPMNml@06TKiIJoXLBgV>HKm(3-BQCydV-H|?&f4PY61ol5q<(^ru5
zA-D_ffotG?#v{g!@O3j^Cy$ft=})Cc>^!*+lheGHfL!YG%Y7Rh0Q<lhe3kM{8_VK<
zPm;&{SMqp=G3s=5O<V=DU{B^u3)I)Y11X;|V+G<~JSP53&S_28!B=da;~MY@1h2Jz
zW8N9VWzNV8%Wo2UyiJaG;4}CF<SJ!<-bd8xTEv!qRT|9~_5SqL4=C-t_g)&@k*Dxm
zckqR8KVZyHa3u2RA2G8-z4>|X@CmWr;CcY>6ft6)lw4IBcqR2bpCLYK3y&^q_U+Ad
z!{_;WhI81oj}o~8!|TKLPHK8s^7{6-!f^{&qwmGNjgmxsC59#a+SAB?h&o<_m!N}>
zcuVe^a0vNkfHFCPvrbKIeFuK5r-yA0U*D{K@?LHx$98ZLcs_nxc7h$?G>Grb5%hWD
z%rQ>ta*f;{!8YQeKfYu4UEV)!ep|H9<x$;lgYR!tZGHZd3&VGOjXPiDkoPP&3gn5m
zt)F+p(VG44s9WoLFz&vzbD!OlamI*q;2`j<l%tG$nEMTN-9D->2liwaqxPtGBl@s(
zQ)77rmuGhaANS|IHueOt)>GgZu&2?-%jo_mg>=Z_Iol7{%(IZxsjAlE6Zzzp(;ixP
z^l>lx2Y4=Iu6g@IY!@iUu^YD<jFPVYS&Z>pu@2v9&<0z;8g=Xy>r?paCqg<|w@1os
z-a|7R{2HtD*BMiu564IDUedde>>qg|PLHq^-hDN=>NHsK4TpG-LPBq6Th!BEPvN!j
N`x|)u*Z!>z`~bgf>s9~&

diff --git a/src/finn/data/onnx/mnist-conv/test_data_set_0/output_0.pb b/src/finn/data/onnx/mnist-conv/test_data_set_0/output_0.pb
deleted file mode 100644
index a6f4cdf92..000000000
--- a/src/finn/data/onnx/mnist-conv/test_data_set_0/output_0.pb
+++ /dev/null
@@ -1,2 +0,0 @@
-
-J(ãêsDU®ÄŒtÍEÚ'DWQeÄYôÐÄQôÄ3vÂNKBÄñ³Ä
\ No newline at end of file
diff --git a/src/finn/transformation/__init__.py b/src/finn/transformation/__init__.py
deleted file mode 100644
index e9f5fe15f..000000000
--- a/src/finn/transformation/__init__.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""
-Guide to writing FINN transformations
--------------------------------------
-
-* Your transformation must inherit the Transformation abstract base class.
-* Your transformation's apply function should take in a ModelWrapper, and return
-  a tuple with (transformed_model: ModelWrapper, model_was_changed: Bool)
-* The transformations are meant to be applied using the .transform function
-  in ModelWrapper. This makes a deep copy of the input model by default, so
-  you don't have to.
-* model_was_changed indicates whether your transformation made any changes to
-  the model. If you know your transformation needs to be called only once and
-  repeated calls have no further effect, you can return False even if the model
-  was changed.
-* You MUST return model_was_changed=False at some point when your transformation
-  is called multiple times, otherwise apply_repeated() will loop infinitely.
-* If you cannot guarantee that the transformation will reach a fixed point,
-  you must declare this, return model_was_changed = False and let the user
-  manually re-apply the transform.
-"""
-
-from abc import ABC, abstractmethod
-from finn.util.basic import get_num_default_workers
-import multiprocessing as mp
-
-
-class Transformation(ABC):
-    """Transformation class all transformations are based on. Contains only
-    abstract method apply() every transformation has to fill."""
-
-    def __init__(self):
-        super().__init__()
-
-    @abstractmethod
-    def apply(self, model):
-        pass
-
-
-class NodeLocalTransformation(Transformation):
-    """
-    Parent class for transformations, which can be executed locally to one node
-    by accessing and modifying the attributes of only that node.
-    This class can then automatically parallelize the transformation.
-    Transformations sublcassing NodeLocalTransformation must implement the
-    abstract method applyNodeLocal().
-
-    To control the degree of parallelization, specify the num_workers argument
-    in the constructor, using one of the following values:
-    * None: use NUM_DEFAULT_WORKERS environment variable
-    * 0: use all available CPU cores
-    * (any other int>0): set number of parallel workers
-    """
-
-    def __init__(self, num_workers=None):
-        super().__init__()
-        if num_workers is None:
-            self._num_workers = get_num_default_workers()
-        else:
-            self._num_workers = num_workers
-        assert self._num_workers >= 0, "Number of workers must be nonnegative."
-        if self._num_workers == 0:
-            self._num_workers = mp.cpu_count()
-
-    @abstractmethod
-    def applyNodeLocal(self, node):
-        pass
-
-    def apply(self, model):
-        # Remove old nodes from the current model
-        old_nodes = []
-        for i in range(len(model.graph.node)):
-            old_nodes.append(model.graph.node.pop())
-
-        # Execute transformation in parallel
-        with mp.Pool(self._num_workers) as p:
-            new_nodes_and_bool = p.map(self.applyNodeLocal, old_nodes, chunksize=1)
-
-        # extract nodes and check if the transformation needs to run again
-        # Note: .pop() had initially reversed the node order
-        run_again = False
-        for node, run in reversed(new_nodes_and_bool):
-            # Reattach new nodes to old model
-            model.graph.node.append(node)
-            if run is True:
-                run_again = True
-
-        return (model, run_again)
diff --git a/src/finn/transformation/batchnorm_to_affine.py b/src/finn/transformation/batchnorm_to_affine.py
deleted file mode 100644
index 401c59164..000000000
--- a/src/finn/transformation/batchnorm_to_affine.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-from onnx import TensorProto
-from onnx import helper as oh
-
-from finn.transformation import Transformation
-from finn.transformation.infer_shapes import InferShapes
-
-
-class BatchNormToAffine(Transformation):
-    """Replaces any test-time BatchNorm layers with Mul-Add layers."""
-
-    def apply(self, model):
-        graph = model.graph
-        node_ind = 0
-        graph_modified = False
-        for n in graph.node:
-            node_ind += 1
-            if n.op_type == "BatchNormalization":
-                graph_modified = True
-                bn_input = n.input[0]
-                bn_output = n.output[0]
-                # extract batchnorm parameters as numpy arrays
-                scale = model.get_initializer(n.input[1])
-                bias = model.get_initializer(n.input[2])
-                mean = model.get_initializer(n.input[3])
-                variance = model.get_initializer(n.input[4])
-                epsilon = 1e-5
-                # find A and B to compute batchnorm as affine transpose Ax+B
-                # TODO is a division by moving avg factor needed for variance?
-                A = scale / np.sqrt(epsilon + variance)
-                B = bias - (A * mean)
-                # see if we have surrounding Unsqueeze/Squeeze nodes we can remove
-                producer = model.find_producer(bn_input)
-                if producer is not None:
-                    if producer.op_type == "Unsqueeze":
-                        bn_input = producer.input[0]
-                consumer = model.find_consumer(bn_output)
-                if consumer is not None:
-                    if consumer.op_type == "Squeeze":
-                        bn_output = consumer.output[0]
-                data_shape = model.get_tensor_shape(bn_input)
-                assert A.ndim == B.ndim, "Unexpected mul/add dims in BatchNormToAffine"
-                assert (
-                    len(data_shape) >= A.ndim
-                ), "Unexpected number of dims found in BatchNormToAffine"
-                # reshape the mul/add constants to match the data shape/dims
-                # by adding (1,) dimensions to the right
-                n_spatial_dims = len(data_shape) - 2
-                target_shape = (1, -1) + tuple(1 for i in range(n_spatial_dims))
-                A = A.reshape(target_shape)
-                B = B.reshape(target_shape)
-                # create value_info and initializers for Mul and Add constants
-                mul_const = oh.make_tensor_value_info(
-                    model.make_new_valueinfo_name(), TensorProto.FLOAT, A.shape
-                )
-                graph.value_info.append(mul_const)
-                model.set_initializer(mul_const.name, A)
-                mul_output = oh.make_tensor_value_info(
-                    model.make_new_valueinfo_name(), TensorProto.FLOAT, data_shape
-                )
-                graph.value_info.append(mul_output)
-                add_const = oh.make_tensor_value_info(
-                    model.make_new_valueinfo_name(), TensorProto.FLOAT, B.shape
-                )
-                graph.value_info.append(add_const)
-                model.set_initializer(add_const.name, B)
-                # create Mul and Add nodes to replace the batchnorm
-                mul_node = oh.make_node(
-                    "Mul", [bn_input, mul_const.name], [mul_output.name]
-                )
-                add_node = oh.make_node(
-                    "Add", [mul_output.name, add_const.name], [bn_output]
-                )
-                # insert where the batchnorm is to preserve topological ordering
-                graph.node.insert(node_ind, mul_node)
-                graph.node.insert(node_ind + 1, add_node)
-                # remove old nodes
-                graph.node.remove(n)
-                if consumer is not None:
-                    if consumer.op_type == "Squeeze":
-                        graph.node.remove(consumer)
-                if producer is not None:
-                    if producer.op_type == "Unsqueeze":
-                        graph.node.remove(producer)
-        model = model.transform(InferShapes())
-        return (model, graph_modified)
diff --git a/src/finn/transformation/bipolar_to_xnor.py b/src/finn/transformation/bipolar_to_xnor.py
deleted file mode 100644
index 80f2a7335..000000000
--- a/src/finn/transformation/bipolar_to_xnor.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-import warnings
-from onnx import TensorProto
-from onnx import helper as oh
-
-from finn.core.datatype import DataType
-from finn.transformation import Transformation
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.util.basic import get_by_name
-from finn.custom_op.registry import getCustomOp
-
-
-class ConvertBipolarMatMulToXnorPopcount(Transformation):
-    """Convert MatMul nodes with all-bipolar inputs to XnorPopcountMatMul
-    and associated result correction."""
-
-    def apply(self, model):
-        graph = model.graph
-        node_ind = 0
-        graph_modified = False
-        for n in graph.node:
-            node_ind += 1
-            if n.op_type == "MatMul":
-                mm_input = n.input[0]
-                mm_weight = n.input[1]
-                mm_output = n.output[0]
-                i_bp = model.get_tensor_datatype(mm_input) == DataType.BIPOLAR
-                w_bp = model.get_tensor_datatype(mm_weight) == DataType.BIPOLAR
-                if i_bp and w_bp:
-                    # find producing threshold node and adjust output to binary
-                    def find_prod_mt(x):
-                        is_mt = x.op_type == "MultiThreshold"
-                        is_bp = False
-                        if is_mt:
-                            dt = get_by_name(x.attribute, "out_dtype").s
-                            is_bp = dt.decode("utf-8") == "BIPOLAR"
-                        return is_mt and is_bp
-
-                    mt_chain = model.find_upstream(mm_input, find_prod_mt)
-                    if len(mt_chain) == 0:
-                        if mm_input == graph.input[0].name:
-                            # change input datatype to BINARY
-                            model.set_tensor_datatype(mm_input, DataType.BINARY)
-                            graph_modified = True
-                            warnings.warn(
-                                """IMPORTANT: Changing graph input DataType
-                            to BINARY instead of BIPOLAR. Ensure this is respected
-                            when checking for correctness.
-                            """
-                            )
-                        else:
-                            raise Exception(
-                                """Could not find upstream bipolar
-                                   MultiThreshold, and the MatMul is not the
-                                   first node on graph input. Unable to convert
-                                   input tensor from BIPOLAR to BINARY."""
-                            )
-                    else:
-                        graph_modified = True
-                        mt = mt_chain[-1]
-                        mt_inst = getCustomOp(mt)
-                        # ensure old scale/bias were correct for BIPOLAR
-                        scale_ok = mt_inst.get_nodeattr("out_scale") == 2.0
-                        bias_ok = mt_inst.get_nodeattr("out_bias") == -1.0
-                        assert (
-                            scale_ok and bias_ok
-                        ), """Unexpected scale/bias
-                        attributes for BIPOLAR MultiThreshold node."""
-                        # start conversion, set MT output to binary
-                        # (this is what XnorPopcountMatMul expects)
-                        mt_inst.set_nodeattr("out_dtype", "BINARY")
-                        mt_inst.set_nodeattr("out_scale", 1.0)
-                        mt_inst.set_nodeattr("out_bias", 0.0)
-                        model.set_tensor_datatype(mm_input, DataType.BINARY)
-                    # change node type and domain
-                    n.op_type = "XnorPopcountMatMul"
-                    n.domain = "finn"
-                    # convert weights into binary (-1,+1) -> (0,1)
-                    Wbin = (model.get_initializer(mm_weight) + 1) / 2
-                    # extract vector length (common matrix dim)
-                    K = Wbin.shape[0]
-                    model.set_initializer(mm_weight, Wbin)
-                    model.set_tensor_datatype(mm_weight, DataType.BINARY)
-                    # make new output node with correct shape
-                    mm_out_shape = model.get_tensor_shape(mm_output)
-                    xnorpcout = oh.make_tensor_value_info(
-                        model.make_new_valueinfo_name(), TensorProto.FLOAT, mm_out_shape
-                    )
-                    n.output[0] = xnorpcout.name
-                    model.set_tensor_datatype(xnorpcout.name, DataType.UINT32)
-                    # add mul-add nodes to produce correct dot product result
-                    # need to derive P-N from P and K = P+N
-                    # so we need 2*P-K
-                    A = np.asarray([2.0], dtype=np.float32)
-                    B = np.asarray([-K], dtype=np.float32)
-                    # create value_info and initializers for Mul and Add constants
-                    mul_const = oh.make_tensor_value_info(
-                        model.make_new_valueinfo_name(), TensorProto.FLOAT, A.shape
-                    )
-                    graph.value_info.append(mul_const)
-                    model.set_initializer(mul_const.name, A)
-                    mul_output = oh.make_tensor_value_info(
-                        model.make_new_valueinfo_name(), TensorProto.FLOAT, mm_out_shape
-                    )
-                    graph.value_info.append(mul_output)
-                    add_const = oh.make_tensor_value_info(
-                        model.make_new_valueinfo_name(), TensorProto.FLOAT, B.shape
-                    )
-                    graph.value_info.append(add_const)
-                    model.set_initializer(add_const.name, B)
-                    # create Mul and Add nodes to replace the batchnorm
-                    mul_node = oh.make_node(
-                        "Mul", [xnorpcout.name, mul_const.name], [mul_output.name]
-                    )
-                    add_node = oh.make_node(
-                        "Add", [mul_output.name, add_const.name], [mm_output]
-                    )
-                    # insert where the batchnorm is to preserve topological ordering
-                    graph.node.insert(node_ind, mul_node)
-                    graph.node.insert(node_ind + 1, add_node)
-        if graph_modified:
-            model = model.transform(InferShapes())
-            model = model.transform(InferDataTypes())
-        return (model, graph_modified)
diff --git a/src/finn/transformation/change_datalayout.py b/src/finn/transformation/change_datalayout.py
deleted file mode 100644
index d5b393a25..000000000
--- a/src/finn/transformation/change_datalayout.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from onnx import helper, TensorProto
-
-from finn.transformation import Transformation
-from finn.transformation.infer_shapes import InferShapes
-from finn.util.basic import get_by_name
-
-
-class ChangeDataLayoutQuantAvgPool2d(Transformation):
-    """Replace QuantAvgPool2d with datalayout (N,C,H,W) with Transpose nodes
-    and QuantAvgPool2dNHWC with datalayout (N,H,W,C)"""
-
-    def apply(self, model):
-        graph = model.graph
-        node_ind = 0
-        graph_modified = False
-        for n in graph.node:
-            node_ind += 1
-            if n.op_type == "QuantAvgPool2d" and (
-                get_by_name(n.attribute, "data_layout") is None
-                or get_by_name(n.attribute, "data_layout").s.decode("UTF-8") == "NCHW"
-            ):
-                graph_modified = True
-                node_input = n.input[0]
-                node_output = n.output[0]
-                s = get_by_name(n.attribute, "stride").i
-                k = get_by_name(n.attribute, "kernel").i
-                ibits = get_by_name(n.attribute, "ibits").i
-                obits = get_by_name(n.attribute, "obits").i
-                signed = get_by_name(n.attribute, "signed").i
-                batchsize = model.get_tensor_shape(n.input[0])[0]  # assume NCHW
-                channels = model.get_tensor_shape(n.input[0])[1]  # assume NCHW
-                idim = model.get_tensor_shape(n.input[0])[-1]  # assume NCHW
-                odim = model.get_tensor_shape(n.output[0])[-1]  # assume NCHW
-
-                # create new nodes
-                # NCHW -> NHWC
-                # create new intermediate values
-                inp_trans_out = helper.make_tensor_value_info(
-                    model.make_new_valueinfo_name(),
-                    TensorProto.FLOAT,
-                    (batchsize, idim, idim, channels),  # NHWC
-                )
-                graph.value_info.append(inp_trans_out)
-                inp_trans_out = inp_trans_out.name
-                quantavg_out = helper.make_tensor_value_info(
-                    model.make_new_valueinfo_name(),
-                    TensorProto.FLOAT,
-                    (batchsize, odim, odim, channels),
-                )
-                graph.value_info.append(quantavg_out)
-                quantavg_out = quantavg_out.name
-                inp_trans_node = helper.make_node(
-                    "Transpose", [node_input], [inp_trans_out], perm=[0, 2, 3, 1]
-                )
-                quantavg_node = helper.make_node(
-                    "QuantAvgPool2d",
-                    [inp_trans_out],
-                    [quantavg_out],
-                    domain="finn",
-                    stride=s,
-                    kernel=k,
-                    ibits=ibits,
-                    obits=obits,
-                    signed=signed,
-                    data_layout="NHWC",
-                )
-                # NHWC -> NCHW
-                out_trans_node = helper.make_node(
-                    "Transpose", [quantavg_out], [node_output], perm=[0, 3, 1, 2]
-                )
-                # insert nodes
-                graph.node.insert(node_ind, inp_trans_node)
-                graph.node.insert(node_ind + 1, quantavg_node)
-                graph.node.insert(node_ind + 2, out_trans_node)
-                # remove old nodes
-                graph.node.remove(n)
-
-                # set shapes
-                model.set_tensor_shape(inp_trans_out, (batchsize, idim, idim, channels))
-                model.set_tensor_shape(quantavg_out, (batchsize, odim, odim, channels))
-        model = model.transform(InferShapes())
-        return (model, graph_modified)
diff --git a/src/finn/transformation/double_to_single_float.py b/src/finn/transformation/double_to_single_float.py
deleted file mode 100644
index 4f7eb1cc8..000000000
--- a/src/finn/transformation/double_to_single_float.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from finn.transformation import Transformation
-import numpy as np
-
-
-class DoubleToSingleFloat(Transformation):
-    """Convert any float64 initializers to float32."""
-
-    def apply(self, model):
-        graph_modified = False
-        init_names = [x.name for x in model.graph.initializer]
-        for nm in init_names:
-            init = model.get_initializer(nm)
-            if init.dtype == np.float64:
-                init_f32 = init.astype(np.float32)
-                model.set_initializer(nm, init_f32)
-                graph_modified = True
-        return (model, graph_modified)
diff --git a/src/finn/transformation/fold_constants.py b/src/finn/transformation/fold_constants.py
deleted file mode 100644
index a73035e57..000000000
--- a/src/finn/transformation/fold_constants.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.core.onnx_exec as oxe
-from finn.transformation import Transformation
-from finn.transformation.infer_shapes import InferShapes
-
-
-class FoldConstants(Transformation):
-    """Replace the output of a node with const-only inputs with a precomputed
-    result."""
-
-    def apply(self, model):
-        graph = model.graph
-        node_ind = 0
-        graph_modified = False
-        execution_context = model.make_empty_exec_context()
-        for n in graph.node:
-            node_ind += 1
-            node_inp_inits = list(map(lambda x: model.get_initializer(x), n.input))
-            node_inp_dyn = list(filter(lambda x: x is None, node_inp_inits))
-            node_out = n.output[0]
-            is_all_constant_inputs = len(node_inp_dyn) == 0
-            ishape = model.get_tensor_shape(n.input[0])
-            is_const_shape = (n.op_type == "Shape") and (ishape is not None)
-            if is_all_constant_inputs or is_const_shape:
-                # this node has no dynamic inputs, only constant ones -- so we can
-                # do constant folding.
-                oxe.execute_node(n, execution_context, graph)
-                # use the execution result as an initializer
-                model.set_initializer(node_out, execution_context[node_out])
-                # remove old node
-                graph.node.remove(n)
-                graph_modified = True
-        if graph_modified:
-            model = model.transform(InferShapes())
-        return (model, graph_modified)
diff --git a/src/finn/transformation/fpgadataflow/__init__.py b/src/finn/transformation/fpgadataflow/__init__.py
deleted file mode 100644
index 83c8e8bed..000000000
--- a/src/finn/transformation/fpgadataflow/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/finn/transformation/general.py b/src/finn/transformation/general.py
deleted file mode 100644
index 02f95b14e..000000000
--- a/src/finn/transformation/general.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.util.basic as util
-from finn.transformation import Transformation
-from toposort import toposort_flatten
-
-
-class RemoveUnusedTensors(Transformation):
-    """Remove any unused tensors in the graph by removing any initializers,
-    ValueInfo and tensor annotations associated with it. Unused tensors do not
-    appear as any input/output for any graph nodes.
-    """
-
-    def apply(self, model):
-        graph_modified = False
-        onnx_graph = model.model.graph
-        # build a set of tensors that we actually use in the graph nodes
-        used_tensors = set()
-        for node in model.graph.node:
-            for i in node.input:
-                used_tensors.add(i)
-            for o in node.output:
-                used_tensors.add(o)
-        # remove initializers, value_info and annotations that are not in the
-        # used set of tensors, as determined by the graph node i/o
-        for init in onnx_graph.initializer:
-            if init.name not in used_tensors:
-                onnx_graph.initializer.remove(init)
-                graph_modified = True
-        for vi in onnx_graph.value_info:
-            if vi.name not in used_tensors:
-                onnx_graph.value_info.remove(vi)
-                graph_modified = True
-        for qa in onnx_graph.quantization_annotation:
-            if qa.tensor_name not in used_tensors:
-                onnx_graph.quantization_annotation.remove(qa)
-                graph_modified = True
-
-        return (model, graph_modified)
-
-
-class RemoveStaticGraphInputs(Transformation):
-    "Remove any top-level graph inputs that have initializers."
-
-    def apply(self, model):
-        graph_modified = False
-        for i in model.graph.input:
-            if model.get_initializer(i.name) is not None:
-                # move ValueInfo to internal (value_info) container
-                model.graph.value_info.append(i)
-                model.graph.input.remove(i)
-                graph_modified = True
-
-        return (model, graph_modified)
-
-
-class GiveUniqueNodeNames(Transformation):
-    """Give unique names to each node in the graph using enumeration, starting
-    with given prefix (if specified in the constructor)."""
-
-    def __init__(self, prefix=""):
-        super().__init__()
-        self.prefix = prefix
-
-    def apply(self, model):
-        optype_count = {}
-        for n in model.graph.node:
-            if n.op_type not in optype_count.keys():
-                optype_count[n.op_type] = 0
-            n.name = "%s%s_%d" % (self.prefix, n.op_type, optype_count[n.op_type])
-            optype_count[n.op_type] += 1
-        # return model_was_changed = False as single iteration is always enough
-        return (model, False)
-
-
-class GiveRandomTensorNames(Transformation):
-    """Give random tensor names to all tensors."""
-
-    def apply(self, model):
-        names = model.get_all_tensor_names()
-        for name in names:
-            model.rename_tensor(name, util.random_string())
-        # return model_was_changed = False as single iteration is always enough
-        return (model, False)
-
-
-class GiveReadableTensorNames(Transformation):
-    """Give more human-readable names to all internal tensors. You should
-    apply GiveUniqueNodeNames prior to this transform to avoid empty node names,
-    as the readable names are based on the node names."""
-
-    def apply(self, model):
-        # to ensure we can use rename_tensor safely (without renaming existing
-        # tensors) we start by giving random names to all tensors
-        model = model.transform(GiveRandomTensorNames())
-        graph = model.graph
-        for n in graph.node:
-            assert n.name != "", "Found empty node name"
-            out_num = 0
-            for o in n.output:
-                model.rename_tensor(o, "%s_out%d" % (n.name, out_num))
-                out_num += 1
-            init_in_num = 0
-            for i in n.input:
-                if model.get_initializer(i) is not None:
-                    model.rename_tensor(i, "%s_param%d" % (n.name, init_in_num))
-                    init_in_num += 1
-        # give special names to the main model input and output
-        model.rename_tensor(model.graph.input[0].name, "global_in")
-        model.rename_tensor(model.graph.output[0].name, "global_out")
-        # return model_was_changed = False as single iteration is always enough
-        return (model, False)
-
-
-class GiveUniqueParameterTensors(Transformation):
-    """Make every parameter tensor unique. The aim is to avoid affecting
-    other nodes apart from the one the system is currently operating on."""
-
-    def apply(self, model):
-        graph = model.graph
-        graph_modified = False
-        seen_parameters = []
-        for n in graph.node:
-            # copy inputs since they may be modified
-            node_inputs_list = [x for x in n.input]
-            for input_idx, node_input in enumerate(node_inputs_list):
-                # check if it's a parameter
-                input_init = model.get_initializer(node_input)
-                if input_init is None:
-                    # dynamic input
-                    continue
-
-                # check if repeated
-                if node_input not in seen_parameters:
-                    # first occurance
-                    seen_parameters += [node_input]
-                    continue
-
-                new_param_name = model.make_new_valueinfo_name()
-
-                model.set_initializer(new_param_name, input_init)
-                model.set_tensor_datatype(
-                    new_param_name, model.get_tensor_datatype(node_input)
-                )
-
-                # point node input to new tensor
-                n.input[input_idx] = new_param_name
-
-        return (model, graph_modified)
-
-
-class SortGraph(Transformation):
-    """ Returns the model with its node list sorted topologically.
-    Any ONNX graph to be executed must have a topologically sorted node list,
-    as dictated by the ONNX standard.
-    """
-
-    # Notes on SortGraph performance:
-    # benchmark in  tests/transformation/test_sort_graph.py
-    # The algorithm doesn't move initializers so its performance should only depend on
-    # the number of nodes
-    #
-    # Relative order of magnitudes for time per step:
-    # - Gather graph structure:       base
-    # - Sort nodes:                   0.1 of base
-    # - Remove and insert in order :  0.001 of base
-    #
-    # Notes:
-    # Remove nodes and insert them in order:
-    # Probably this is faster than copying initializers and more robust in general
-
-    def apply(self, model):
-        if len(model.graph.node) == 1:
-            # single-node graph, nothing to sort
-            return (model, False)
-        # Gather graph structure
-        graph_dependencies = {}
-        node_list = [
-            n for n in model.graph.node
-        ]  # I also need the list to remove the nodes
-        for node_idx, n in enumerate(node_list):
-            node_pred = model.find_direct_predecessors(n)
-            if node_pred is None:
-                # Will also eliminate nodes that are floating around for some reason
-                continue
-
-            node_dependencies = [node_list.index(pred) for pred in node_pred]
-            graph_dependencies[node_idx] = set(node_dependencies)
-
-        # Sort nodes
-        sorted_node_indexes = toposort_flatten(graph_dependencies)
-
-        # Remove nodes and insert them in order
-        # Can't remove nodes before if I want to use model.find_direct_predecessors()
-        for n in node_list:
-            model.graph.node.remove(n)
-
-        for new_idx, sorted_idx in enumerate(sorted_node_indexes):
-            model.graph.node.insert(new_idx, node_list[sorted_idx])
-
-        return (model, False)
-
-
-class ConvertSubToAdd(Transformation):
-    """Convert subtract-a-constant nodes to add-a-constant nodes."""
-
-    def apply(self, model):
-        graph = model.graph
-        for n in graph.node:
-            if n.op_type == "Sub":
-                A = model.get_initializer(n.input[1])
-                if A is not None:
-                    n.op_type = "Add"
-                    model.set_initializer(n.input[1], -A)
-        # return model_was_changed = False as single iteration is always enough
-        return (model, False)
-
-
-class ConvertDivToMul(Transformation):
-    """Convert divide by constant nodes to multiply by constant nodes."""
-
-    def apply(self, model):
-        graph = model.graph
-        for n in graph.node:
-            if n.op_type == "Div":
-                A = model.get_initializer(n.input[1])
-                if A is not None:
-                    n.op_type = "Mul"
-                    model.set_initializer(n.input[1], 1.0 / A)
-        # return model_was_changed = False as single iteration is always enough
-        return (model, False)
diff --git a/src/finn/transformation/infer_data_layouts.py b/src/finn/transformation/infer_data_layouts.py
deleted file mode 100644
index d07162fa0..000000000
--- a/src/finn/transformation/infer_data_layouts.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.custom_op.registry as registry
-import finn.core.data_layout as DataLayout
-from finn.transformation import Transformation
-import warnings
-from finn.util.basic import get_by_name
-
-
-def _dims_to_layout(model, node, ndims):
-    if ndims == 2:
-        return DataLayout.NC
-    else:
-        if node.domain == "finn":
-            if node.op_type == "MultiThreshold" or node.op_type == "QuantAvgPool2d":
-                mt_inst = registry.getCustomOp(node)
-                layout = mt_inst.get_nodeattr("data_layout")
-                if layout == "NHWC" and ndims == 4:
-                    return DataLayout.NHWC
-                elif layout == "NCHW" and ndims == 4:
-                    return DataLayout.NCHW
-                else:
-                    return DataLayout.UNKNOWN
-            else:
-                if ndims == 4:
-                    return DataLayout.NHWC
-                else:
-                    return DataLayout.UNKNOWN
-        else:
-            # propagate input layout to output
-            # TODO this won't work for concat, squeeze/unsqueeze/reshape...
-            return model.get_tensor_layout(node.input[0])
-
-
-def _infer_node_data_layout(model, node):
-    """Infer output data layout annotation(s) for a particular node.
-    Returns True if any changes were made."""
-    old_layouts = list(map(lambda x: model.get_tensor_layout(x), node.output))
-    if node.domain == "finn":
-        # try to guess based on number of output dims
-        for o in node.output:
-            ndims = len(model.get_tensor_shape(o))
-            new_layout = _dims_to_layout(model, node, ndims)
-            model.set_tensor_layout(o, new_layout)
-    else:
-        if node.op_type == "Transpose":
-            # grab input annotation and switch it around using perm
-            perm = get_by_name(node.attribute, "perm").ints
-            inp_layout = model.get_tensor_layout(node.input[0])
-            out_layout = [inp_layout[i] for i in perm]
-            model.set_tensor_layout(node.output[0], out_layout)
-        elif node.op_type == "Unsqueeze":
-            inp_layout = model.get_tensor_layout(node.input[0])
-            # add dummy dimension at the output
-            out_layout = inp_layout + ["x"]
-            model.set_tensor_layout(node.output[0], out_layout)
-        elif node.op_type == "Squeeze":
-            inp_layout = model.get_tensor_layout(node.input[0])
-            assert inp_layout[-1] == "x"
-            # remove dummy dimension
-            out_layout = inp_layout[:-1]
-            model.set_tensor_layout(node.output[0], out_layout)
-        else:
-            # try to guess based on number of output dims
-            for o in node.output:
-                ndims = len(model.get_tensor_shape(o))
-                model.set_tensor_layout(o, _dims_to_layout(model, node, ndims))
-    # compare old and new output dtypes to see if anything changed
-    new_layouts = list(map(lambda x: model.get_tensor_layout(x), node.output))
-    graph_modified = new_layouts != old_layouts
-    return graph_modified
-
-
-class InferDataLayouts(Transformation):
-    """Try to infer data layout annotations info for all input/intermediate/output
-    tensors based on inputs and node type."""
-
-    def apply(self, model):
-        graph = model.graph
-        graph_modified = False
-        # first, make sure that the global input has an annotation
-        # this is really hard to do in general, so we do some bad guesswork
-        inp_name = graph.input[0].name
-        if model.get_tensor_layout(inp_name) is None:
-            inp_shape = model.get_tensor_shape(inp_name)
-            if len(inp_shape) == 4:
-                warnings.warn("Assuming 4D input is NCHW")
-                model.set_tensor_layout(inp_name, DataLayout.NCHW)
-                graph_modified = True
-            elif len(inp_shape) == 2:
-                graph_modified = True
-                warnings.warn("Assuming 2D input is NC")
-                model.set_tensor_layout(inp_name, DataLayout.NC)
-            else:
-                raise Exception(
-                    """Unknown number of dims for input, don't know
-                how to annotate"""
-                )
-        for node in graph.node:
-            graph_modified |= _infer_node_data_layout(model, node)
-        return (model, graph_modified)
diff --git a/src/finn/transformation/infer_datatypes.py b/src/finn/transformation/infer_datatypes.py
deleted file mode 100644
index 39b7a787b..000000000
--- a/src/finn/transformation/infer_datatypes.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.custom_op.registry as registry
-from finn.core.datatype import DataType
-from finn.transformation import Transformation
-
-
-def _infer_node_datatype(model, node):
-    """Infer output datatype(s) for a particular node. Returns True if any
-    changes were made."""
-    dt_identity_optypes = ["Reshape", "Transpose"]
-    idtypes = list(map(lambda x: model.get_tensor_datatype(x), node.input))
-    odtypes = list(map(lambda x: model.get_tensor_datatype(x), node.output))
-    op_type = node.op_type
-    if node.domain == "finn":
-        # handle DataType inference for CustomOp
-        try:
-            # lookup op_type in registry of CustomOps
-            inst = registry.custom_op[op_type](node)
-            inst.infer_node_datatype(model)
-        except KeyError:
-            # exception if op_type is not supported
-            raise Exception("Custom op_type %s is currently not supported." % op_type)
-    else:
-        if node.op_type == "Sign":
-            # always produces bipolar outputs
-            model.set_tensor_datatype(node.output[0], DataType.BIPOLAR)
-        elif node.op_type == "MatMul":
-            if len(list(filter(lambda x: x == DataType.FLOAT32, idtypes))) != 0:
-                # node has at least one float input, output is also float
-                model.set_tensor_datatype(node.output[0], DataType.FLOAT32)
-            else:
-                # TODO compute minimum / maximum result to minimize bitwidth
-                # use (u)int32 accumulators for now
-                has_signed_inp = len(list(filter(lambda x: x.signed(), idtypes))) != 0
-                if has_signed_inp:
-                    odtype = DataType.INT32
-                else:
-                    odtype = DataType.UINT32
-                model.set_tensor_datatype(node.output[0], odtype)
-        elif node.op_type in dt_identity_optypes:
-            # set output dtype = input dtype
-            idtype = model.get_tensor_datatype(node.input[0])
-            model.set_tensor_datatype(node.output[0], idtype)
-        else:
-            # unknown, assume node produces float32 outputs
-            for o in node.output:
-                # check if output datatype is already set to a value != FLOAT32
-                odtype = model.get_tensor_datatype(o)
-                if odtype is not None and odtype != DataType.FLOAT32:
-                    # don't change data type
-                    model.set_tensor_datatype(o, odtype)
-                else:
-                    model.set_tensor_datatype(o, DataType.FLOAT32)
-    # compare old and new output dtypes to see if anything changed
-    new_odtypes = list(map(lambda x: model.get_tensor_datatype(x), node.output))
-    graph_modified = new_odtypes != odtypes
-    return graph_modified
-
-
-class InferDataTypes(Transformation):
-    """Infer FINN DataType info for all intermediate/output tensors based on
-    inputs and node type."""
-
-    def apply(self, model):
-        graph = model.graph
-        graph_modified = False
-        for node in graph.node:
-            graph_modified |= _infer_node_datatype(model, node)
-        return (model, graph_modified)
diff --git a/src/finn/transformation/infer_shapes.py b/src/finn/transformation/infer_shapes.py
deleted file mode 100644
index 361ef7f6a..000000000
--- a/src/finn/transformation/infer_shapes.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import onnx.shape_inference as si
-
-import finn.custom_op.registry as registry
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation import Transformation
-
-
-def _make_shape_compatible_op(node, model):
-    """Return a shape-compatible non-FINN op for a given FINN op. Used for
-    shape inference with custom ops."""
-    assert node.domain == "finn", 'Node domain is not set to "finn".'
-    op_type = node.op_type
-    try:
-        # lookup op_type in registry of CustomOps
-        inst = registry.custom_op[op_type](node)
-        return inst.make_shape_compatible_op(model)
-    except KeyError:
-        # exception if op_type is not supported
-        raise Exception("Custom op_type %s is currently not supported." % op_type)
-
-
-def _hide_finn_ops(model):
-    """Replace any FINN ops by shape-compatible ones, and return a dict that
-    can be used to map the string representations of the new (shape-compatible)
-    ops back to the old ops."""
-    hidden_ops = {}
-    node_ind = 0
-    for node in model.graph.node:
-        node_ind += 1
-        if node.domain == "finn":
-            new_node = _make_shape_compatible_op(node, model)
-            hidden_ops[str(new_node)] = node
-            model.graph.node.insert(node_ind, new_node)
-            model.graph.node.remove(node)
-    return hidden_ops
-
-
-def _restore_finn_ops(model, hidden_ops):
-    """Replace any shape-compatible ops with the FINN ops that originally
-    generated them."""
-    node_ind = 0
-    for node in model.graph.node:
-        node_ind += 1
-        try:
-            old_node = hidden_ops[str(node)]
-            model.graph.node.insert(node_ind, old_node)
-            model.graph.node.remove(node)
-        except KeyError:
-            pass
-
-
-class InferShapes(Transformation):
-    """Ensure every tensor in the model has a specified shape (ValueInfo)."""
-
-    def apply(self, model):
-        # hide your riches!
-        hidden_ops = _hide_finn_ops(model)
-        # call regular ONNX shape inference
-        model = ModelWrapper(si.infer_shapes(model.model))
-        # bring back hidden ops
-        _restore_finn_ops(model, hidden_ops)
-        return (model, False)
diff --git a/src/finn/transformation/insert_topk.py b/src/finn/transformation/insert_topk.py
deleted file mode 100644
index 213d2cedf..000000000
--- a/src/finn/transformation/insert_topk.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-
-from onnx import TensorProto
-from onnx import helper as oh
-
-from finn.transformation import Transformation
-from finn.core.datatype import DataType
-
-
-class InsertTopK(Transformation):
-    """Add TopK node at the network output and replace the graph output with
-    the TopK indices."""
-
-    def __init__(self, k=5, axis=-1, largest=1, sorted=1):
-        super().__init__()
-        self.k = k
-        self.axis = axis
-        self.largest = largest
-        self.sorted = sorted
-
-    def apply(self, model):
-        # get name of output tensor
-        graph_out_name = model.graph.output[0].name
-        # find final node
-        final_node = model.find_producer(graph_out_name)
-        # if a top-select op is already present, do nothing
-        if final_node.op_type == "TopK":
-            return (model, False)
-        else:
-            out_shape = model.get_tensor_shape(graph_out_name)
-            out_dtype = model.get_tensor_datatype(graph_out_name)
-            # adjust shape
-            out_shape[self.axis] = self.k
-            # make new buffer
-            k_tensor = np.array([self.k]).astype(np.int64)
-            k_value = oh.make_tensor_value_info(
-                model.make_new_valueinfo_name(), TensorProto.INT64, [1]
-            )
-            topk_values = oh.make_tensor_value_info(
-                model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape
-            )
-            topk_indices = oh.make_tensor_value_info(
-                model.make_new_valueinfo_name(), TensorProto.INT64, out_shape
-            )
-            model.graph.value_info.append(k_value)
-            model.set_tensor_datatype(k_value.name, out_dtype)  # TODO set to int64
-            model.graph.value_info.append(topk_values)
-            model.set_tensor_datatype(topk_values.name, out_dtype)
-            # create and append topk node
-            model.set_initializer(k_value.name, k_tensor)
-            topk_node = oh.make_node(
-                "TopK",
-                inputs=[graph_out_name, k_value.name],
-                outputs=[topk_values.name, topk_indices.name],
-                axis=self.axis,
-                largest=self.largest,
-                sorted=self.sorted,
-            )
-            model.graph.node.append(topk_node)
-            # replace the existing output definition with topk indices
-            model.graph.output.insert(0, topk_indices)
-            model.graph.output.pop(1)
-            # set quantization annotation for indices
-            # minimal output dtype for TopK indices dependens on num. classes
-            # assuming UINT32 is large enough for now (FINN has currently no
-            # DataType.INT64)
-            model.set_tensor_datatype(topk_indices.name, DataType.UINT32)
-            return (model, True)
diff --git a/src/finn/transformation/lower_convs_to_matmul.py b/src/finn/transformation/lower_convs_to_matmul.py
deleted file mode 100644
index e5a1f778d..000000000
--- a/src/finn/transformation/lower_convs_to_matmul.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-from onnx import TensorProto
-from onnx import helper
-
-from finn.transformation import Transformation
-from finn.transformation.infer_shapes import InferShapes
-from finn.util.basic import get_by_name
-
-
-class LowerConvsToMatMul(Transformation):
-    """Replace Conv layers with pairs of Im2Col-MatMul layers, plus Transpose
-    layers to keep the original data layout."""
-
-    def apply(self, model):
-        graph = model.graph
-        node_ind = 0
-        graph_modified = False
-        for n in graph.node:
-            node_ind += 1
-            if n.op_type == "Conv":
-                graph_modified = True
-                cnv_input = n.input[0]
-                cnv_output = n.output[0]
-                idt = model.get_tensor_datatype(cnv_input)
-                odt = model.get_tensor_datatype(cnv_output)
-                # extract conv parameters
-                k = get_by_name(n.attribute, "kernel_shape").ints[-1]
-                pad = get_by_name(n.attribute, "pads").ints[-1]
-                stride = get_by_name(n.attribute, "strides").ints[-1]
-                group = get_by_name(n.attribute, "group").i
-                weight_name = n.input[1]
-                W_conv = model.get_initializer(weight_name)
-                ifm_ch = model.get_tensor_shape(n.input[0])[1]  # assume NCHW
-                ofm_ch = model.get_tensor_shape(n.output[0])[1]  # assume NCHW
-                ifm_dim = model.get_tensor_shape(n.input[0])[-1]  # assume NCHW
-                ofm_dim = model.get_tensor_shape(n.output[0])[-1]  # assume NCHW
-
-                # if depthwise conv create sparse matrix and variable "dw"
-                # to store as attribute in Im2Col that indicates that the created
-                # Im2Col node belongs to a depthwise convolution
-                dw = False
-                if group == ifm_ch and ofm_ch == ifm_ch:
-                    W_sparse = np.zeros((ofm_ch, ifm_ch, k, k))
-                    for ch in range(ifm_ch):
-                        W_sparse[ch][ch] = W_conv[ch][0]
-                    W_conv = W_sparse.astype(np.float32)
-                    # we need to store information of the
-                    # sparsity of the weight matrix. For this
-                    # we use the sparsity annotation of the
-                    # weight tensor
-                    sparsity = {"dw": {"kernel_shape": k}}
-                    model.set_tensor_sparsity(weight_name, sparsity)
-                    # additionally create variable "dw" to store
-                    # as attribute in Im2Col that indicates that the created
-                    # Im2Col node belongs to a depthwise convolution
-                    dw = True
-
-                # reuse conv weights for new matmul weights
-                # conv weights are [OFM][IFM][k][k]
-                # first convert to [OFM][k][k][IFM] (to remain compatible with
-                # finn-hlslib and how it does im2col/sliding window)
-                W_matmul = W_conv.transpose(0, 2, 3, 1)
-                # reshape into [OFM][k*k*IFM] matrix
-                W_matmul = W_matmul.reshape(ofm_ch, ifm_ch * k * k)
-                # transpose to get ONNX-compatible [k*k*IFM][OFM] matrix
-                W_matmul = W_matmul.T
-                model.set_initializer(weight_name, W_matmul)
-
-                # create new intermediate values
-                inp_trans_out = helper.make_tensor_value_info(
-                    model.make_new_valueinfo_name(),
-                    TensorProto.FLOAT,
-                    (1, ifm_dim, ifm_dim, ifm_ch),  # NHWC
-                )
-                graph.value_info.append(inp_trans_out)
-                inp_trans_out = inp_trans_out.name
-                model.set_tensor_datatype(inp_trans_out, idt)
-
-                need_im2col = True
-                if k == 1 and pad == 0 and stride == 1:
-                    need_im2col = False
-
-                if need_im2col:
-                    im2col_out = helper.make_tensor_value_info(
-                        model.make_new_valueinfo_name(),
-                        TensorProto.FLOAT,
-                        (1, ofm_dim, ofm_dim, ifm_ch * k * k),
-                    )
-                    graph.value_info.append(im2col_out)
-                    im2col_out = im2col_out.name
-                    model.set_tensor_datatype(im2col_out, idt)
-
-                matmul_out = helper.make_tensor_value_info(
-                    model.make_new_valueinfo_name(),
-                    TensorProto.FLOAT,
-                    (1, ofm_dim, ofm_dim, ofm_ch),
-                )
-                graph.value_info.append(matmul_out)
-                matmul_out = matmul_out.name
-                model.set_tensor_datatype(matmul_out, odt)
-
-                # create new nodes
-                # NCHW -> NHWC
-                inp_trans_node = helper.make_node(
-                    "Transpose", [cnv_input], [inp_trans_out], perm=[0, 2, 3, 1]
-                )
-                # lower input tensor
-                matmul_input = inp_trans_out
-                if need_im2col:
-                    matmul_input = im2col_out
-                    im2col_node = helper.make_node(
-                        "Im2Col",
-                        [inp_trans_out],
-                        [im2col_out],
-                        domain="finn",
-                        stride=stride,
-                        kernel_size=k,
-                        pad_amount=pad,
-                        input_shape="(1,{},{},{})".format(ifm_dim, ifm_dim, ifm_ch),
-                        depthwise=dw,
-                    )
-
-                # do matmul
-                matmul_node = helper.make_node(
-                    "MatMul", [matmul_input, weight_name], [matmul_out]
-                )
-                # NHWC -> NCHW
-                out_trans_node = helper.make_node(
-                    "Transpose", [matmul_out], [cnv_output], perm=[0, 3, 1, 2]
-                )
-                # insert nodes where the conv is to preserve topological ordering
-                graph.node.insert(node_ind, inp_trans_node)
-                if need_im2col:
-                    graph.node.insert(node_ind + 1, im2col_node)
-                    graph.node.insert(node_ind + 2, matmul_node)
-                    graph.node.insert(node_ind + 3, out_trans_node)
-                else:
-                    graph.node.insert(node_ind + 1, matmul_node)
-                    graph.node.insert(node_ind + 2, out_trans_node)
-                # remove old nodes
-                graph.node.remove(n)
-        model = model.transform(InferShapes())
-        return (model, graph_modified)
diff --git a/src/finn/transformation/merge_onnx_models.py b/src/finn/transformation/merge_onnx_models.py
deleted file mode 100644
index 2a8491083..000000000
--- a/src/finn/transformation/merge_onnx_models.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import copy
-import warnings
-from onnx import helper
-
-from finn.transformation import Transformation
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.infer_data_layouts import InferDataLayouts
-from finn.transformation.general import (
-    GiveReadableTensorNames,
-    GiveRandomTensorNames,
-    GiveUniqueNodeNames,
-    GiveUniqueParameterTensors,
-)
-
-
-class MergeONNXModels(Transformation):
-    """Merges two models. The model passed in the transformation will be inserted before
-    the model the transformation is applied on, the resulting model is returned.
-    This transformation will try to connect graph.output[0] of the pre model and
-    graph.input[0] of the post model.
-    If more than one input or output exists, a warning is raised."""
-
-    def __init__(self, pre_model):
-        super().__init__()
-        # use deep copy of model that should be inserted in the beginning of
-        # the other model to ensure that it stays unchanged
-        self.pre_model = copy.deepcopy(pre_model)
-
-    def apply(self, model):
-        graph_modified = False
-        pre_model = self.pre_model
-        post_model = copy.deepcopy(model)
-        # to avoid mix-ups, start by giving all tensors random names
-        pre_model = pre_model.transform(GiveRandomTensorNames())
-        post_model = post_model.transform(GiveRandomTensorNames())
-
-        # check for dynamic outputs of pre model
-        dyn_outp = []
-        for outp in pre_model.graph.output:
-            init_val = pre_model.get_initializer(outp.name)
-            if init_val is None:
-                dyn_outp.append(outp)
-
-        if len(dyn_outp) != 1:
-            warnings.warn(
-                "The pre model has more than one dynamic output! The transformation "
-                "tries to connect the first dynamic output to the first dynamic input "
-                "of the post model."
-            )
-
-        # check for dynamic inputs of post model
-        dyn_inp = []
-        for inp in post_model.graph.input:
-            init_val = post_model.get_initializer(inp.name)
-            if init_val is None:
-                dyn_inp.append(inp)
-
-        if len(dyn_inp) != 1:
-            warnings.warn(
-                "The post model has more than one dynamic input! The transformation "
-                "tries to connect the first dynamic input to the first dynamic output "
-                "of the pre model."
-            )
-
-        # erase all node names to avoid conflict
-        for n in pre_model.graph.node:
-            n.name = ""
-        for n in post_model.graph.node:
-            n.name = ""
-
-        # check if models can be merged
-        output_model_a = dyn_outp[0].name
-        input_model_b = dyn_inp[0].name
-        output_a_shape = pre_model.get_tensor_shape(output_model_a)
-        input_b_shape = post_model.get_tensor_shape(input_model_b)
-        assert (
-            output_a_shape == input_b_shape
-        ), "Models can't be merged! Shapes don't match."
-
-        # connect output of one model to input of the other
-        for n in pre_model.graph.node:
-            if output_model_a == n.output[0]:
-                n.output[0] = input_model_b
-
-        # extract information for new model
-
-        # nodes
-        node_pre = [node for node in pre_model.graph.node]
-        node_post = [node for node in post_model.graph.node]
-        node_new = node_pre + node_post
-
-        # in and output
-        inp = pre_model.graph.input[0]
-        outp = post_model.graph.output[0]
-
-        vi_pre = [x for x in pre_model.graph.value_info]
-        out_pre = [x for x in pre_model.graph.output]
-        qa_pre = [x for x in pre_model.graph.quantization_annotation]
-        init_pre = [x for x in pre_model.graph.initializer]
-
-        vi_post = [x for x in post_model.graph.value_info]
-        qa_post = [x for x in post_model.graph.quantization_annotation]
-        init_post = [x for x in post_model.graph.initializer]
-
-        vi_new = vi_pre + vi_post + out_pre
-        qa_new = qa_pre + qa_post
-        init_new = init_pre + init_post
-
-        # create new graph and model
-        new_graph = helper.make_graph(
-            nodes=node_new,
-            name="fuse-graph",
-            inputs=[inp],
-            outputs=[outp],
-            value_info=vi_new,
-        )
-
-        new_model = helper.make_model(new_graph, producer_name="fuse_model")
-        new_model = ModelWrapper(new_model)
-
-        for i in init_new:
-            new_model.graph.initializer.append(i)
-        for qa in qa_new:
-            new_model.graph.quantization_annotation.append(qa)
-
-        # tidy-up new model
-        model = new_model
-        model = model.transform(InferShapes())
-        model = model.transform(InferDataTypes())
-        model = model.transform(InferDataLayouts())
-        model = model.transform(GiveUniqueNodeNames())
-        model = model.transform(GiveUniqueParameterTensors())
-        model = model.transform(GiveReadableTensorNames())
-
-        return (model, graph_modified)
diff --git a/src/finn/util/__init__.py b/src/finn/util/__init__.py
deleted file mode 100644
index 83c8e8bed..000000000
--- a/src/finn/util/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py
deleted file mode 100644
index cc759bebb..000000000
--- a/src/finn/util/basic.py
+++ /dev/null
@@ -1,442 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import random
-import string
-import subprocess
-import tempfile
-import warnings
-
-import numpy as np
-
-from finn.core.datatype import DataType
-
-# mapping from PYNQ board names to FPGA part names
-pynq_part_map = dict()
-pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
-pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
-pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1"
-pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e"
-pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e"
-
-# native AXI HP port width (in bits) for PYNQ boards
-pynq_native_port_width = dict()
-pynq_native_port_width["Pynq-Z1"] = 64
-pynq_native_port_width["Pynq-Z2"] = 64
-pynq_native_port_width["Ultra96"] = 128
-pynq_native_port_width["ZCU102"] = 128
-pynq_native_port_width["ZCU104"] = 128
-
-# Alveo device and platform mappings
-alveo_part_map = dict()
-alveo_part_map["U50"] = "xcu50-fsvh2104-2L-e"
-alveo_part_map["U200"] = "xcu200-fsgd2104-2-e"
-alveo_part_map["U250"] = "xcu250-figd2104-2L-e"
-alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e"
-
-alveo_default_platform = dict()
-alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3"
-alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2"
-alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2"
-alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3"
-
-
-def get_rtlsim_trace_depth():
-    """Return the trace depth for rtlsim via PyVerilator. Controllable
-    via the RTLSIM_TRACE_DEPTH environment variable. If the env.var. is
-    undefined, the default value of 1 is returned. A trace depth of 1
-    will only show top-level signals and yield smaller .vcd files.
-
-    The following depth values are of interest for whole-network stitched IP
-    rtlsim:
-    - level 1 shows top-level input/output streams
-    - level 2 shows per-layer input/output streams
-    - level 3 shows per full-layer I/O including FIFO count signals
-    """
-
-    try:
-        return int(os.environ["RTLSIM_TRACE_DEPTH"])
-    except KeyError:
-        return 1
-
-
-def get_remote_vivado():
-    """Return the address of the remote Vivado synthesis server as set by the,
-    REMOTE_VIVADO environment variable, otherwise return None"""
-
-    try:
-        return os.environ["REMOTE_VIVADO"]
-    except KeyError:
-        return None
-
-
-def get_num_default_workers():
-    """Return the number of workers for parallel transformations. Controllable
-    via the NUM_DEFAULT_WORKERS environment variable. If the env.var. is
-    undefined, the default value of 1 is returned.
-    """
-
-    try:
-        return int(os.environ["NUM_DEFAULT_WORKERS"])
-    except KeyError:
-        return 1
-
-
-def get_finn_root():
-    "Return the root directory that FINN is cloned into."
-
-    try:
-        return os.environ["FINN_ROOT"]
-    except KeyError:
-        raise Exception(
-            """Environment variable FINN_ROOT must be set
-        correctly. Please ensure you have launched the Docker contaier correctly.
-        """
-        )
-
-
-def get_execution_error_thresh():
-    "Return the max error that is allowed for rounding in FINN execution."
-    try:
-        return float(os.environ["ERROR_THRESH"])
-    except KeyError:
-        return 1e-2
-
-
-def get_sanitize_quant_tensors():
-    """Return whether tensors with quantization annotations should be sanitized.
-    Enabled by default, disabling will yield faster ONNX execution but may give
-    incorrect results. Use with caution."""
-    try:
-        return int(os.environ["SANITIZE_QUANT_TENSORS"])
-    except KeyError:
-        # enabled by default
-        return 1
-
-
-def make_build_dir(prefix=""):
-    """Creates a temporary folder with given prefix to be used as a build dir.
-    Use this function instead of tempfile.mkdtemp to ensure any generated files
-    will survive on the host after the FINN Docker container exits."""
-    try:
-        inst_prefix = os.environ["FINN_INST_NAME"] + "/"
-        return tempfile.mkdtemp(prefix=inst_prefix + prefix)
-    except KeyError:
-        raise Exception(
-            """Environment variable FINN_INST_NAME must be set
-        correctly. Please ensure you have launched the Docker contaier correctly.
-        """
-        )
-
-
-def get_by_name(container, name, name_field="name"):
-    """Return item from container by .name field if it exists, None otherwise.
-    Will throw an Exception if multiple items are found, since this violates the
-    ONNX standard."""
-    names = [getattr(x, name_field) for x in container]
-
-    inds = [i for i, e in enumerate(names) if e == name]
-    if len(inds) > 1:
-        raise Exception("Found multiple get_by_name matches, undefined behavior")
-    elif len(inds) == 0:
-        return None
-    else:
-        ind = inds[0]
-        return container[ind]
-
-
-def remove_by_name(container, name, name_field="name"):
-    """Remove item from container by .name field if it exists."""
-    item = get_by_name(container, name, name_field)
-    if item is not None:
-        container.remove(item)
-
-
-def random_string(stringLength=6):
-    """Randomly generate a string of letters and digits."""
-    lettersAndDigits = string.ascii_letters + string.digits
-    return "".join(random.choice(lettersAndDigits) for i in range(stringLength))
-
-
-def interleave_matrix_outer_dim_from_partitions(matrix, n_partitions):
-    """Interleave the outermost dimension of a matrix from given
-    partitions (n_partitions)."""
-    if type(matrix) != np.ndarray or matrix.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        matrix = np.asarray(matrix, dtype=np.float32)
-    shp = matrix.shape
-    ndim = matrix.ndim
-    # ensure # partitions evenly divide the outermost dimension
-    assert (
-        shp[0] % n_partitions == 0
-    ), """The outermost dimension is not divisable
-    by the number of partitions."""
-    # only tested for matrices
-    assert (
-        ndim == 2
-    ), """The dimension of the matrix is not 2. Currently this function
-    only works for matrices."""
-    # interleave rows between PEs using reshape + transpose
-    matrix_r = matrix.reshape(-1, n_partitions, shp[1]).transpose((1, 0, 2))
-    matrix_r = matrix_r.reshape(n_partitions, -1, shp[1])
-    return matrix_r
-
-
-def roundup_to_integer_multiple(x, factor):
-    """Round up integer x to the nearest integer multiple of integer factor.
-    Returns x if factor is set to -1. Both x and factor must otherwise be
-    positive."""
-    # ensure integers
-    assert int(x) == x, "The input x is not an integer."
-    assert int(factor) == factor, "The input factor is not an integer."
-    # use -1 to indicate no padding needed
-    if factor == -1:
-        return x
-    # ensure positive values
-    assert factor > 0 and x > 0, "Factor and x are <= 0."
-    if x < factor:
-        return factor
-    else:
-        if x % factor == 0:
-            return x
-        else:
-            return x + (factor - (x % factor))
-
-
-def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False):
-    """Pad each dimension of given NumPy ndarray using val, so that each
-    dimension is a multiple of the respective value in pad_to_dims. -1 means
-    do not pad that particular dimension. If distr_pad is False, all padding
-    will be inserted after the existing values; otherwise it will be split
-    evenly between before and after the existing values, with one extra value
-    inserted after if the padding amount is not divisible by two."""
-    if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        ndarray = np.asarray(ndarray, dtype=np.float32)
-    assert ndarray.ndim == len(
-        pad_to_dims
-    ), """The dimensions of the input
-    array don't match the length of the pad_to_dims value."""
-    # compute the desired shape
-    desired = zip(list(ndarray.shape), list(pad_to_dims))
-    desired = map(lambda x: roundup_to_integer_multiple(x[0], x[1]), desired)
-    desired = np.asarray(list(desired), dtype=np.int32)
-    current = np.asarray(ndarray.shape, dtype=np.int32)
-    pad_amt = desired - current
-    # add padding to get to the desired shape
-    if distr_pad:
-        pad_before = (pad_amt // 2).astype(np.int32)
-        pad_after = pad_amt - pad_before
-        pad_amt = list(zip(pad_before, pad_after))
-    else:
-        # all padding is added after the existing values
-        pad_amt = list(map(lambda x: (0, x), pad_amt))
-    ret = np.pad(ndarray, pad_amt, mode="constant", constant_values=val)
-    assert (
-        np.asarray(ret.shape, dtype=np.int32) == desired
-    ).all(), """The
-    calculated output array doesn't match the desired/expected one."""
-    return ret
-
-
-def calculate_matvec_accumulator_range(matrix, vec_dt):
-    """Calculate the minimum and maximum possible result (accumulator) values
-    for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW)
-    with datatype vec_dt. Returns (acc_min, acc_max).
-    """
-    min_weight = matrix.min()
-    max_weight = matrix.max()
-    perceptive_field_elems = matrix.shape[0]
-    min_input = vec_dt.min()
-    max_input = vec_dt.max()
-    # calculate minimum and maximum values of accumulator
-    # assume inputs span the whole range of the input datatype
-    acc_min = perceptive_field_elems * min(
-        min_weight * max_input,
-        min_weight * min_input,
-        max_weight * max_input,
-        max_weight * min_input,
-    )
-    acc_max = perceptive_field_elems * max(
-        min_weight * max_input,
-        min_weight * min_input,
-        max_weight * max_input,
-        max_weight * min_input,
-    )
-    return (acc_min, acc_max)
-
-
-def gen_finn_dt_tensor(finn_dt, tensor_shape):
-    """Generates random tensor in given shape and with given FINN DataType."""
-    if type(tensor_shape) == list:
-        tensor_shape = tuple(tensor_shape)
-    if finn_dt == DataType.BIPOLAR:
-        tensor_values = np.random.randint(2, size=tensor_shape)
-        tensor_values = 2 * tensor_values - 1
-    elif finn_dt == DataType.BINARY:
-        tensor_values = np.random.randint(2, size=tensor_shape)
-    elif "INT" in finn_dt.name or finn_dt == DataType.TERNARY:
-        tensor_values = np.random.randint(
-            finn_dt.min(), high=finn_dt.max() + 1, size=tensor_shape
-        )
-    else:
-        raise ValueError(
-            "Datatype {} is not supported, no tensor could be generated".format(finn_dt)
-        )
-    # always use float type as container
-    return tensor_values.astype(np.float32)
-
-
-def calculate_signed_dot_prod_range(dt_a, dt_b, len):
-    """Returns the (min,max) values a dot product between two signed vectors of
-    types dt_a and dt_b of len elements can take."""
-    assert (
-        dt_a.signed() and dt_b.signed()
-    ), """The input values are not both
-    signed vectors."""
-    min_prod = 2 ** 30
-    max_prod = -(2 ** 30)
-    for a_val in [dt_a.min(), dt_a.max()]:
-        for b_val in [dt_b.min(), dt_b.max()]:
-            prod = a_val * b_val * len
-            if prod < min_prod:
-                min_prod = prod
-            if prod > max_prod:
-                max_prod = prod
-    return (min_prod, max_prod)
-
-
-def sanitize_quant_values(model, node_tensors, execution_context, check_values=False):
-    """ Sanitize given list of tensors in execution_context by rounding values
-    that are supposed to be integers (as indicated by their quantization
-    annotation). Will raise an assertion if the amount of rounding is too large.
-    Returns the sanitized execution context.
-
-    If check_values is specified, an extra DataType.allowed() check will be
-    performed on any rounded tensors.
-
-    Background:
-    FINN uses floating point tensors as a carrier data type to represent
-    integers. Floating point arithmetic can introduce rounding errors, e.g.
-    (int_num * float_scale) / float_scale is not always equal to int_num.
-    We use this function to ensure that the values that are supposed to be
-    integers are indeed integers.
-    """
-
-    for tensor in node_tensors:
-        dtype = model.get_tensor_datatype(tensor)
-        # floats don't need sanitization, skip to next
-        # introduces less quicker runtime
-        if dtype == DataType.FLOAT32:
-            continue
-        current_values = execution_context[tensor]
-        updated_values = current_values
-        has_to_be_rounded = False
-        # TODO: vectorize with numpy
-        for value in np.nditer(current_values):
-            if not dtype.allowed(value):
-                has_to_be_rounded = True
-                break
-        if has_to_be_rounded:
-            updated_values = np.round(current_values)
-            warnings.warn(
-                "The values of tensor {} can't be represented "
-                "with the set FINN datatype ({}), they will be rounded to match the "
-                "FINN datatype.".format(tensor, dtype)
-            )
-        # check if rounded values are not too far from original values
-        max_error = max(np.abs(current_values - updated_values).flatten())
-        if max_error <= get_execution_error_thresh():
-            if check_values is True:
-                # check again if values can now be represented with set finn datatype
-                # TODO: vectorize with numpy
-                for value in np.nditer(updated_values):
-                    if not dtype.allowed(value):
-                        raise Exception(
-                            """Values can't be represented with set
-                                finn datatype ({}) for input {}""".format(
-                                dtype, tensor
-                            )
-                        )
-            execution_context[tensor] = updated_values
-        else:
-            raise Exception(
-                """Rounding error is too high to match set FINN
-            datatype ({}) for input {}""".format(
-                    dtype, tensor
-                )
-            )
-    return execution_context
-
-
-class CppBuilder:
-    """Builds the g++ compiler command to produces the executable of the c++ code
-    in code_gen_dir which is passed to the function build() of this class."""
-
-    def __init__(self):
-        self.include_paths = []
-        self.cpp_files = []
-        self.executable_path = ""
-        self.code_gen_dir = ""
-        self.compile_components = []
-        self.compile_script = ""
-
-    def append_includes(self, library_path):
-        """Adds given library path to include_paths list."""
-        self.include_paths.append(library_path)
-
-    def append_sources(self, cpp_file):
-        """Adds given c++ file to cpp_files list."""
-        self.cpp_files.append(cpp_file)
-
-    def set_executable_path(self, path):
-        """Sets member variable "executable_path" to given path."""
-        self.executable_path = path
-
-    def build(self, code_gen_dir):
-        """Builds the g++ compiler command according to entries in include_paths
-        and cpp_files lists. Saves it in bash script in given folder and
-        executes it."""
-        # raise error if includes are empty
-        self.code_gen_dir = code_gen_dir
-        self.compile_components.append("g++ -o " + str(self.executable_path))
-        for cpp_file in self.cpp_files:
-            self.compile_components.append(cpp_file)
-        for lib in self.include_paths:
-            self.compile_components.append(lib)
-        bash_compile = ""
-        for component in self.compile_components:
-            bash_compile += str(component) + " "
-        self.compile_script = str(self.code_gen_dir) + "/compile.sh"
-        with open(self.compile_script, "w") as f:
-            f.write("#!/bin/bash \n")
-            f.write(bash_compile + "\n")
-        bash_command = ["bash", self.compile_script]
-        process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-        process_compile.communicate()
diff --git a/src/finn/util/create.py b/src/finn/util/create.py
deleted file mode 100644
index 853cdd0d4..000000000
--- a/src/finn/util/create.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-from finn.core.modelwrapper import ModelWrapper
-from onnx import TensorProto, helper
-from finn.core.datatype import DataType
-from finn.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor
-
-
-def hls_random_mlp_maker(layer_spec):
-    """Create an MLP of given specification using HLSCustomOp instances.
-    Generate random weights/thresholds of appropriate size."""
-    ret = []
-    for l in layer_spec:
-        idt = l["idt"]
-        wdt = l["wdt"]
-        mw = l["mw"]
-        mh = l["mh"]
-        act = l["act"]
-        l["W"] = gen_finn_dt_tensor(wdt, (mw, mh))
-        if act is None:
-            # no activation, produce accumulators
-            T = None
-            tdt = None
-            if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
-                odt = DataType.UINT32
-            else:
-                odt = DataType.INT32
-        else:
-            odt = act
-            (min, max) = calculate_signed_dot_prod_range(idt, wdt, mw)
-            n_steps = act.get_num_possible_values() - 1
-            T = np.random.randint(min, max - 1, (mh, n_steps)).astype(np.float32)
-            # provide non-decreasing thresholds
-            T = np.sort(T, axis=1)
-            # generate thresholds for activation
-            if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
-                tdt = DataType.UINT32
-                # bias thresholds to be positive
-                T = np.ceil((T + mw) / 2)
-                assert (T >= 0).all()
-            else:
-                tdt = DataType.INT32
-        l["T"] = T
-        l["tdt"] = tdt
-        l["odt"] = odt
-        ret.append(l)
-
-    return hls_mlp_maker(ret)
-
-
-def hls_mlp_maker(layer_spec):
-    """Create an MLP of given specification using HLSCustomOp instances."""
-
-    current_in_name = ""
-    current_out_name = ""
-    i = 0
-
-    graph = helper.make_graph(nodes=[], name="mlp", inputs=[], outputs=[])
-
-    model = helper.make_model(graph, producer_name="finn")
-    model = ModelWrapper(model)
-
-    for l in layer_spec:
-        current_W_name = "W_%d" % i
-        current_T_name = "T_%d" % i
-        current_in_name = "act_%d" % i
-        current_out_name = "act_%d" % (i + 1)
-
-        W = l["W"]
-        (mw, mh) = W.shape
-        T = l["T"]
-        pe = l["pe"]
-        simd = l["simd"]
-        wdt = l["wdt"]
-        idt = l["idt"]
-        tdt = l["tdt"]
-        odt = l["odt"]
-
-        if i == 0:
-            global_in = helper.make_tensor_value_info(
-                current_in_name, TensorProto.FLOAT, [1, mw]
-            )
-            model.graph.input.append(global_in)
-
-        if i == len(layer_spec) - 1:
-            global_out = helper.make_tensor_value_info(
-                current_out_name, TensorProto.FLOAT, [1, mh]
-            )
-            model.graph.output.append(global_out)
-
-        # there are two ways to implement bipolar weights and inputs for
-        # StreamingFC:
-        # - specify their datatypes as such
-        # - specify their datatypes as BINARY as use binaryXnorMode
-        if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
-            # we'll internally convert weights/inputs to binary and specify the
-            # datatypes as such, and also set the binaryXnorMode attribute to 1
-            export_wdt = DataType.BINARY
-            export_idt = DataType.BINARY
-            binary_xnor_mode = 1
-        else:
-            export_wdt = wdt
-            export_idt = idt
-            binary_xnor_mode = 0
-
-        if T is not None:
-            no_act = 0
-            node_inp_list = [current_in_name, current_W_name, current_T_name]
-            if odt == DataType.BIPOLAR:
-                actval = 0
-            else:
-                actval = odt.min()
-        else:
-            # no thresholds
-            node_inp_list = [current_in_name, current_W_name]
-            actval = 0
-            no_act = 1
-        FCLayer_node = helper.make_node(
-            "StreamingFCLayer_Batch",
-            node_inp_list,
-            [current_out_name],
-            domain="finn",
-            backend="fpgadataflow",
-            resType="ap_resource_lut()",
-            MW=mw,
-            MH=mh,
-            SIMD=simd,
-            PE=pe,
-            inputDataType=export_idt.name,
-            weightDataType=export_wdt.name,
-            outputDataType=odt.name,
-            ActVal=actval,
-            binaryXnorMode=binary_xnor_mode,
-            noActivation=no_act,
-        )
-
-        model.graph.node.append(FCLayer_node)
-        model.set_tensor_datatype(current_in_name, idt)
-        model.set_tensor_datatype(current_out_name, odt)
-        model.set_tensor_datatype(current_W_name, wdt)
-        if binary_xnor_mode:
-            # convert bipolar to binary
-            model.set_initializer(current_W_name, (W + 1) / 2)
-        else:
-            model.set_initializer(current_W_name, W)
-        if T is not None:
-            model.set_tensor_datatype(current_T_name, tdt)
-            model.set_initializer(current_T_name, T)
-        i += 1
-
-    return model
diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py
deleted file mode 100644
index a087fd2ff..000000000
--- a/src/finn/util/data_packing.py
+++ /dev/null
@@ -1,397 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import binascii
-import os
-import sys
-
-import numpy as np
-from bitstring import BitArray
-
-from finn.core.datatype import DataType
-from finn.util.basic import roundup_to_integer_multiple
-
-
-def array2hexstring(array, dtype, pad_to_nbits, prefix="0x", reverse=False):
-    """
-    Pack given one-dimensional NumPy array with FINN DataType dtype into a hex
-    string.
-    Any BIPOLAR values will be converted to a single bit with a 0 representing
-    -1.
-    pad_to_nbits is used to prepend leading zeros to ensure packed strings of
-    fixed width. The minimum value for pad_to_nbits is 4, since a single hex
-    digit is four bits. reverse can be used to reverse the array prior to
-    packing.
-
-    Examples:
-
-    array2hexstring([1, 1, 1, 0], DataType.BINARY, 4) = "0xe"
-
-    array2hexstring([1, 1, 1, 0], DataType.BINARY, 8) = "0x0e"
-
-    array2hexstring([1, 1, 0, 1], DataType.BINARY, 4, reverse=True) = "0xb"
-
-    array2hexstring([1, 1, 1, 0], DataType.BINARY, 8, reverse=True) = "0x07"
-    """
-    if pad_to_nbits < 4:
-        pad_to_nbits = 4
-    # ensure input is a numpy array with float values
-    if type(array) != np.ndarray or array.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        array = np.asarray(array, dtype=np.float32)
-    # ensure one-dimensional array to pack
-    assert array.ndim == 1, "The given array is not one-dimensional."
-    if dtype == DataType.BIPOLAR:
-        # convert bipolar values to binary
-        array = (array + 1) / 2
-        dtype = DataType.BINARY
-    # reverse prior to packing, if desired
-    if reverse:
-        array = np.flip(array, -1)
-    lineval = BitArray(length=0)
-    bw = dtype.bitwidth()
-    for val in array:
-        # ensure that this value is permitted by chosen dtype
-        assert dtype.allowed(val), "This value is not permitted by chosen dtype."
-        if dtype.is_integer():
-            if dtype.signed():
-                lineval.append(BitArray(int=int(val), length=bw))
-            else:
-                lineval.append(BitArray(uint=int(val), length=bw))
-        else:
-            lineval.append(BitArray(float=val, length=bw))
-    if pad_to_nbits >= lineval.len:
-        # extend to the desired output width (a minimum of 4 bits)
-        lineval.prepend(BitArray(length=pad_to_nbits - lineval.len))
-    else:
-        raise Exception("Number of bits is greater than pad_to_nbits")
-    # represent as hex
-    return prefix + lineval.hex
-
-
-def hexstring2npbytearray(hexstring, remove_prefix="0x"):
-    """Convert a hex string into a NumPy array of dtype uint8.
-
-    Example:
-
-    hexstring2npbytearray("0f01") = array([15,  1], dtype=uint8)
-    """
-    # remove prefix if found
-    if hexstring.startswith(remove_prefix):
-        lrp = len(remove_prefix)
-        hexstring = hexstring[lrp:]
-    # use Python's built-in bytearray
-    return np.asarray(bytearray.fromhex(hexstring), dtype=np.uint8)
-
-
-def npbytearray2hexstring(npbytearray, prefix="0x"):
-    """Convert a NumPy array of uint8 dtype into a hex string.
-
-    Example:
-
-    npbytearray2hexstring(array([15,  1], dtype=uint8)) = "0x0f01"
-    """
-    return prefix + binascii.hexlify(bytearray(npbytearray)).decode("utf-8")
-
-
-def pack_innermost_dim_as_hex_string(
-    ndarray, dtype, pad_to_nbits, reverse_inner=False, prefix="0x"
-):
-    """Pack the innermost dimension of the given numpy ndarray into hex
-    strings using array2hexstring.
-
-    Examples:
-
-    A = [[1, 1, 1, 0], [0, 1, 1, 0]]
-
-    eA = ["0e", "06"]
-
-    pack_innermost_dim_as_hex_string(A, DataType.BINARY, 8) == eA
-
-    B = [[[3, 3], [3, 3]], [[1, 3], [3, 1]]]
-
-    eB = [[ "0f", "0f"], ["07", "0d"]]
-
-    pack_innermost_dim_as_hex_string(B, DataType.UINT2, 8) == eB
-    """
-
-    if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        ndarray = np.asarray(ndarray, dtype=np.float32)
-
-    def fun(x):
-        return array2hexstring(
-            x, dtype, pad_to_nbits, reverse=reverse_inner, prefix=prefix
-        )
-
-    return np.apply_along_axis(fun, ndarray.ndim - 1, ndarray)
-
-
-def unpack_innermost_dim_from_hex_string(
-    ndarray, dtype, out_shape, packedBits, reverse_inner=False
-):
-    """Convert a NumPy array of hex strings into a FINN NumPy array by unpacking
-    the hex strings into the specified data type. out_shape can be specified
-    such that any padding in the packing dimension is removed. If reverse_inner
-    is set, the innermost unpacked dimension will be reversed."""
-
-    if type(ndarray) != np.ndarray:
-        raise Exception(
-            """unpack_innermost_dim_from_hex_string needs ndarray
-        as input"""
-        )
-    if ndarray.dtype.kind not in {"U", "S"}:
-        raise Exception(
-            """unpack_innermost_dim_from_hex_string needs ndarray of
-        hex strings as input"""
-        )
-    # convert ndarray into flattened list
-    data = ndarray.flatten().tolist()
-    targetBits = dtype.bitwidth()
-    # calculate outer and inner dim shapes
-    outer_dim_elems = 1
-    for dim in range(len(out_shape) - 1):
-        outer_dim_elems = outer_dim_elems * out_shape[dim]
-    inner_dim_elems = out_shape[-1]
-
-    array = []
-    for outer_elem in range(outer_dim_elems):
-        ar_list = []
-        ar_elem = data[0]
-        data.pop(0)
-        ar_elem = ar_elem.split("x")
-        ar_elem_bin = bin(int(ar_elem[1], 16))[2:].zfill(packedBits)
-        ar_elem_bin = [int(x) for x in ar_elem_bin]
-
-        ar_elem_bin.reverse()
-        for i in range(inner_dim_elems):
-            upper_limit = (i + 1) * targetBits
-            lower_limit = i * targetBits
-            elem = ar_elem_bin[lower_limit:upper_limit]
-            elem.reverse()
-            elem_str = "".join(map(str, elem))
-            ar_list.append(int(elem_str, 2))
-        # reverse inner dimension back to "normal" positions
-        if reverse_inner is False:
-            ar_list.reverse()
-
-        # interpret output values correctly
-
-        # interpret values as bipolar
-        if dtype == DataType.BIPOLAR:
-            ar_list = [2 * x - 1 for x in ar_list]
-        # interpret values as signed values
-        elif dtype.name.startswith("INT"):
-            mask = 2 ** (dtype.bitwidth() - 1)
-            ar_list = [-(x & mask) + (x & ~mask) for x in ar_list]
-
-        array.append(ar_list)
-    array = np.asarray(array, dtype=np.float32).reshape(out_shape)
-    return array
-
-
-def numpy_to_hls_code(
-    ndarray, dtype, hls_var_name, pack_innermost_dim=True, no_decl=False
-):
-    """Return C++ code representation of a numpy ndarray with FINN DataType
-    dtype, using hls_var_name as the resulting C++ variable name. If
-    pack_innermost_dim is specified, the innermost dimension of the ndarray
-    will be packed into a hex string using array2hexstring. If no_decl is
-    set to True, no variable name and type will be generated as part of the
-    emitted string.
-    """
-    hls_dtype = dtype.get_hls_datatype_str()
-    if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        ndarray = np.asarray(ndarray, dtype=np.float32)
-    if pack_innermost_dim:
-        idimlen = ndarray.shape[-1]
-        idimbits = idimlen * dtype.bitwidth()
-        idimbits = roundup_to_integer_multiple(idimbits, 4)
-        ndarray = pack_innermost_dim_as_hex_string(ndarray, dtype, idimbits)
-        hls_dtype = "ap_uint<%d>" % idimbits
-    ndims = ndarray.ndim
-    # add type string and variable name
-    # e.g. "const ap_uint<64>" "weightMem0"
-    ret = "%s %s" % (hls_dtype, hls_var_name)
-    # add dimensions
-    for d in range(ndims):
-        ret += "[%d]" % ndarray.shape[d]
-    orig_printops = np.get_printoptions()
-    np.set_printoptions(threshold=sys.maxsize)
-
-    # define a function to convert a single element into a C++ init string
-    # a single element can be a hex string if we are using packing
-    def elem2str(x):
-        if type(x) == str or type(x) == np.str_ or type(x) == np.str:
-            return '%s("%s", 16)' % (hls_dtype, x)
-        elif type(x) == np.float32:
-            if dtype == DataType.FLOAT32:
-                return str(x)
-            else:
-                return str(int(x))
-        else:
-            raise Exception("Unsupported type for numpy_to_hls_code")
-
-    strarr = np.array2string(ndarray, separator=", ", formatter={"all": elem2str})
-    np.set_printoptions(**orig_printops)
-    strarr = strarr.replace("[", "{").replace("]", "}")
-    if no_decl:
-        ret = strarr + ";"
-    else:
-        ret = ret + " = \n" + strarr + ";"
-    return ret
-
-
-def npy_to_rtlsim_input(input_file, input_dtype, pad_to_nbits, reverse_inner=True):
-    """Convert the multidimensional NumPy array of integers (stored as floats)
-    from input_file into a flattened sequence of Python arbitrary-precision
-    integers, packing the innermost dimension. See
-    finn.util.basic.pack_innermost_dim_as_hex_string() for more info on how the
-    packing works. If reverse_inner is set, the innermost dimension will be
-    reversed prior to packing."""
-    pad_to_nbits = roundup_to_integer_multiple(pad_to_nbits, 4)
-    if issubclass(type(input_file), np.ndarray):
-        inp = input_file
-    elif os.path.isfile(input_file):
-        inp = np.load(input_file)
-    else:
-        raise Exception("input_file must be ndarray or filename for .npy")
-    packed_data = pack_innermost_dim_as_hex_string(
-        inp, input_dtype, pad_to_nbits, reverse_inner=reverse_inner
-    )
-    packed_data = packed_data.flatten()
-    packed_data = [int(x[2:], 16) for x in packed_data]
-    return packed_data
-
-
-def rtlsim_output_to_npy(
-    output, path, dtype, shape, packedBits, targetBits, reverse_inner=True
-):
-    """Convert a flattened sequence of Python arbitrary-precision integers
-    output into a NumPy array, saved as npy file at path. Each arbitrary-precision
-    integer is assumed to be a packed array of targetBits-bit elements, which
-    will be unpacked as the innermost dimension of the NumPy array. If path is
-    not None it will also be saved as a npy file."""
-
-    # TODO should have its own testbench?
-    output = np.asarray([hex(int(x)) for x in output])
-    out_array = unpack_innermost_dim_from_hex_string(
-        output, dtype, shape, packedBits=packedBits, reverse_inner=reverse_inner
-    )
-    # make copy before saving the array
-    out_array = out_array.copy()
-    if path is not None:
-        np.save(path, out_array)
-    return out_array
-
-
-def finnpy_to_packed_bytearray(
-    ndarray, dtype, reverse_inner=False, reverse_endian=False
-):
-    """Given a numpy ndarray with FINN DataType dtype, pack the innermost
-    dimension and return the packed representation as an ndarray of uint8.
-    The packed innermost dimension will be padded to the nearest multiple
-    of 8 bits. The returned ndarray has the same number of dimensions as the
-    input.
-    """
-
-    if (not issubclass(type(ndarray), np.ndarray)) or ndarray.dtype != np.float32:
-        # try to convert to a float numpy array (container dtype is float)
-        ndarray = np.asarray(ndarray, dtype=np.float32)
-    # pack innermost dim to hex strings padded to 8 bits
-    bits = dtype.bitwidth() * ndarray.shape[-1]
-    bits_padded = roundup_to_integer_multiple(bits, 8)
-    packed_hexstring = pack_innermost_dim_as_hex_string(
-        ndarray, dtype, bits_padded, reverse_inner=reverse_inner
-    )
-
-    def fn(x):
-        return np.asarray(list(map(hexstring2npbytearray, x)))
-
-    if packed_hexstring.ndim == 0:
-        # scalar, call hexstring2npbytearray directly
-        ret = hexstring2npbytearray(np.asscalar(packed_hexstring))
-    else:
-        # convert ndarray of hex strings to byte array
-        ret = np.apply_along_axis(fn, packed_hexstring.ndim - 1, packed_hexstring)
-    if reverse_endian:
-        # reverse the endianness of packing dimension
-        ret = np.flip(ret, axis=-1)
-    return ret
-
-
-def packed_bytearray_to_finnpy(
-    packed_bytearray,
-    dtype,
-    output_shape=None,
-    reverse_inner=False,
-    reverse_endian=False,
-):
-    """Given a packed numpy uint8 ndarray, unpack it into a FINN array of
-    given DataType.
-
-    output_shape can be specified to remove padding from the
-    packed dimension, or set to None to be inferred from the input."""
-
-    if (
-        not issubclass(type(packed_bytearray), np.ndarray)
-    ) or packed_bytearray.dtype != np.uint8:
-        raise Exception("packed_bytearray_to_finnpy needs NumPy uint8 arrays")
-    if packed_bytearray.ndim == 0:
-        raise Exception("packed_bytearray_to_finnpy expects at least 1D ndarray")
-    packed_dim = packed_bytearray.ndim - 1
-    packed_bits = packed_bytearray.shape[packed_dim] * 8
-    target_bits = dtype.bitwidth()
-    if output_shape is None:
-        # determine output shape from input shape
-        assert (
-            packed_bits % target_bits == 0
-        ), """packed_bits are not divisable by
-        target_bits."""
-        n_target_elems = packed_bits // target_bits
-        output_shape = packed_bytearray.shape[:-1] + (n_target_elems,)
-    # if reverse_endian and target_bits > 8:
-    #     # revse the endianness of each element
-    #     orig_shape = packed_bytearray.shape
-    #     assert target_bits % 8 == 0, "target_bits are not a multiple of 8."
-    #     target_bytes = target_bits // 8
-    #     new_shape = orig_shape[:-1] + (-1, target_bytes)
-    #     packed_bytearray = np.flip(packed_bytearray.reshape(new_shape), axis=-1)
-    #     packed_bytearray = packed_bytearray.reshape(orig_shape)
-    if reverse_endian:
-        packed_bytearray = np.flip(packed_bytearray, axis=-1)
-    # convert innermost dim of byte array to hex strings
-    packed_hexstring = np.apply_along_axis(
-        npbytearray2hexstring, packed_dim, packed_bytearray
-    )
-    ret = unpack_innermost_dim_from_hex_string(
-        packed_hexstring, dtype, output_shape, packed_bits, reverse_inner
-    )
-
-    return ret
diff --git a/src/finn/util/fpgadataflow.py b/src/finn/util/fpgadataflow.py
deleted file mode 100644
index 3fe747a84..000000000
--- a/src/finn/util/fpgadataflow.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import subprocess
-
-try:
-    from pyverilator import PyVerilator
-except ModuleNotFoundError:
-    PyVerilator = None
-from finn.util.basic import get_by_name, make_build_dir, get_rtlsim_trace_depth
-
-
-class IPGenBuilder:
-    """Builds the bash script to generate IP blocks using Vivado HLS."""
-
-    def __init__(self):
-        self.tcl_script = ""
-        self.ipgen_path = ""
-        self.code_gen_dir = ""
-        self.ipgen_script = ""
-
-    def append_tcl(self, tcl_script):
-        """Sets member variable "tcl_script" to given tcl script."""
-        self.tcl_script = tcl_script
-
-    def set_ipgen_path(self, path):
-        """Sets member variable ipgen_path to given path."""
-        self.ipgen_path = path
-
-    def build(self, code_gen_dir):
-        """Builds the bash script with given parameters and saves it in given folder.
-        To guarantee the generation in the correct folder the bash script contains a
-        cd command."""
-        self.code_gen_dir = code_gen_dir
-        self.ipgen_script = str(self.code_gen_dir) + "/ipgen.sh"
-        working_dir = os.environ["PWD"]
-        f = open(self.ipgen_script, "w")
-        f.write("#!/bin/bash \n")
-        f.write("cd {}\n".format(code_gen_dir))
-        f.write("vivado_hls {}\n".format(self.tcl_script))
-        f.write("cd {}\n".format(working_dir))
-        f.close()
-        bash_command = ["bash", self.ipgen_script]
-        process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
-        process_compile.communicate()
-
-
-def pyverilate_stitched_ip(model):
-    "Given a model with stitched IP, return a PyVerilator sim object."
-    if PyVerilator is None:
-        raise ImportError("Installation of PyVerilator is required.")
-
-    vivado_stitch_proj_dir = model.get_metadata_prop("vivado_stitch_proj")
-    with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f:
-        all_verilog_srcs = f.read().split()
-
-    def file_to_dir(x):
-        return os.path.dirname(os.path.realpath(x))
-
-    def file_to_basename(x):
-        return os.path.basename(os.path.realpath(x))
-
-    all_verilog_dirs = list(map(file_to_dir, all_verilog_srcs))
-    all_verilog_files = list(
-        set(
-            filter(
-                lambda x: x.endswith(".v"),
-                list(map(file_to_basename, all_verilog_srcs)),
-            )
-        )
-    )
-    top_module_name = model.get_metadata_prop("wrapper_filename")
-    top_module_name = file_to_basename(top_module_name).strip(".v")
-    build_dir = make_build_dir("pyverilator_ipstitched_")
-    sim = PyVerilator.build(
-        all_verilog_files,
-        verilog_path=all_verilog_dirs,
-        build_dir=build_dir,
-        trace_depth=get_rtlsim_trace_depth(),
-        top_module_name=top_module_name,
-        auto_eval=False,
-    )
-    return sim
-
-
-def pyverilate_get_liveness_threshold_cycles():
-    """Return the number of no-output cycles rtlsim will wait before assuming
-    the simulation is not finishing and throwing an exception."""
-
-    return int(os.getenv("LIVENESS_THRESHOLD", 10000))
-
-
-def is_fpgadataflow_node(node):
-    """Returns True if given node is fpgadataflow node. Otherwise False."""
-    is_node = False
-    if node is not None:
-        if node.domain == "finn":
-            n_backend = get_by_name(node.attribute, "backend")
-            if n_backend is not None:
-                backend_value = n_backend.s.decode("UTF-8")
-                if backend_value == "fpgadataflow":
-                    is_node = True
-
-    return is_node
-
-
-def rtlsim_multi_io(sim, io_dict, num_out_values, trace_file=""):
-    """Runs the pyverilator simulation by passing the input values to the simulation,
-    toggle the clock and observing the execution time. Function contains also an
-    observation loop that can abort the simulation if no output value is produced
-    after a set number of cycles. Can handle multiple i/o streams. See function
-    implementation for details on how the top-level signals should be named.
-
-    sim: the PyVerilator object for simulation
-    io_dict: a dict of dicts in the following format:
-            {"inputs" : {"in0" : <input_data>, "in1" : <input_data>},
-             "outputs" : {"out0" : [], "out1" : []} }
-            <input_data> is a list of Python arbitrary-precision ints indicating
-            what data to push into the simulation, and the output lists are
-            similarly filled when the simulation is complete
-    num_out_values: number of total values to be read from the simulation to
-                    finish the simulation and return.
-
-    returns: number of clock cycles elapsed for completion
-
-    """
-
-    if trace_file != "":
-        sim.start_vcd_trace(trace_file)
-
-    for outp in io_dict["outputs"]:
-        sim.io[outp + "_V_V_TREADY"] = 1
-
-    # observe if output is completely calculated
-    # total_cycle_count will contain the number of cycles the calculation ran
-    output_done = False
-    total_cycle_count = 0
-    output_count = 0
-    old_output_count = 0
-
-    # avoid infinite looping of simulation by aborting when there is no change in
-    # output values after 100 cycles
-    no_change_count = 0
-    liveness_threshold = pyverilate_get_liveness_threshold_cycles()
-
-    while not (output_done):
-        for inp in io_dict["inputs"]:
-            inputs = io_dict["inputs"][inp]
-            sim.io[inp + "_V_V_TVALID"] = 1 if len(inputs) > 0 else 0
-            sim.io[inp + "_V_V_TDATA"] = inputs[0] if len(inputs) > 0 else 0
-            if sim.io[inp + "_V_V_TREADY"] == 1 and sim.io[inp + "_V_V_TVALID"] == 1:
-                inputs = inputs[1:]
-            io_dict["inputs"][inp] = inputs
-
-        for outp in io_dict["outputs"]:
-            outputs = io_dict["outputs"][outp]
-            if sim.io[outp + "_V_V_TVALID"] == 1 and sim.io[outp + "_V_V_TREADY"] == 1:
-                outputs = outputs + [sim.io[outp + "_V_V_TDATA"]]
-                output_count += 1
-            io_dict["outputs"][outp] = outputs
-
-        sim.io.ap_clk = 1
-        sim.io.ap_clk = 0
-
-        total_cycle_count = total_cycle_count + 1
-
-        if output_count == old_output_count:
-            no_change_count = no_change_count + 1
-        else:
-            no_change_count = 0
-            old_output_count = output_count
-
-        # check if all expected output words received
-        if output_count == num_out_values:
-            output_done = True
-
-        # end sim on timeout
-        if no_change_count == liveness_threshold:
-            if trace_file != "":
-                sim.flush_vcd_trace()
-                sim.stop_vcd_trace()
-            raise Exception(
-                "Error in simulation! Takes too long to produce output. "
-                "Consider setting the LIVENESS_THRESHOLD env.var. to a "
-                "larger value."
-            )
-
-    if trace_file != "":
-        sim.flush_vcd_trace()
-        sim.stop_vcd_trace()
-
-    return total_cycle_count
diff --git a/src/finn/util/onnx.py b/src/finn/util/onnx.py
deleted file mode 100644
index 4d7cdd126..000000000
--- a/src/finn/util/onnx.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-import onnx
-import finn.core.data_layout as DataLayout
-
-
-def valueinfo_to_tensor(vi):
-    """Creates an all-zeroes numpy tensor from a ValueInfoProto."""
-
-    dims = [x.dim_value for x in vi.type.tensor_type.shape.dim]
-    return np.zeros(
-        dims, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type]
-    )
-
-
-def nchw_to_nhwc(t, model, idx, reverse=False):
-    """Converts between NCHW <-> NHWC layouts for tensor t by inserting a transpose. 
-    If reverse=False, t is assumed NCHW and we insert transpose to convert NCHW -> NHWC
-    If reverse=True, t is assumed NHWC and we insert transpose to convert NHWC -> NCHW.
-    """
-    graph = model.graph
-    # create new NHWC tensor
-    t_shape = model.get_tensor_shape(t)
-    bs = t_shape[0]
-    ch = t_shape[1]
-    height = t_shape[2]
-    width = t_shape[3]
-    t_trans = onnx.helper.make_tensor_value_info(
-        model.make_new_valueinfo_name(),
-        onnx.TensorProto.FLOAT,
-        (bs, height, width, ch),  # NHWC
-    )
-    graph.value_info.append(t_trans)
-    dt = model.get_tensor_datatype(t)
-    t_trans = t_trans.name
-    model.set_tensor_datatype(t_trans, dt)
-    model.set_tensor_layout(t_trans, DataLayout.NHWC)
-    # NCHW <-> NHWC transpose
-    if reverse:
-        t_trans_node = onnx.helper.make_node(
-            "Transpose", [t_trans], [t], perm=[0, 3, 1, 2]
-        )
-    else:
-        t_trans_node = onnx.helper.make_node(
-            "Transpose", [t], [t_trans], perm=[0, 2, 3, 1]
-        )
-    graph.node.insert(idx, t_trans_node)
-    return t_trans
diff --git a/tests/analysis/test_is_linear.py b/tests/analysis/test_is_linear.py
deleted file mode 100644
index 6afe9bb9c..000000000
--- a/tests/analysis/test_is_linear.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import onnx.helper as oh
-from onnx import TensorProto
-
-import finn.analysis.topology as ta
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-
-
-def test_is_linear_linear():
-    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
-    add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
-    mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
-    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
-    modelproto = oh.make_model(
-        oh.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=[add_param, mul_param],
-            nodes=[
-                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
-                oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
-            ],
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-    ret = model.analysis(ta.is_linear)
-    assert ret["is_linear"] is True
-
-
-def test_is_linear_forked_node_output():
-    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
-    add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
-    mul0_param = oh.make_tensor_value_info("mul0_param", TensorProto.FLOAT, [2])
-    mul1_param = oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT, [2])
-    mul0_res = oh.make_tensor_value_info("mul0_res", TensorProto.FLOAT, [2])
-    mul1_res = oh.make_tensor_value_info("mul1_res", TensorProto.FLOAT, [2])
-    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
-    modelproto = oh.make_model(
-        oh.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=[add_param, mul0_param, mul1_param, mul0_res, mul1_res],
-            nodes=[
-                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
-                oh.make_node("Mul", ["middle", "mul0_param"], ["mul0_res"]),
-                oh.make_node("Mul", ["middle", "mul1_param"], ["mul1_res"]),
-                oh.make_node("Add", ["mul0_res", "mul1_res"], ["top_out"]),
-            ],
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-    ret = model.analysis(ta.is_linear)
-    assert ret["is_linear"] is False
diff --git a/tests/analysis/test_topology_checks.py b/tests/analysis/test_topology_checks.py
deleted file mode 100644
index 7f7f800da..000000000
--- a/tests/analysis/test_topology_checks.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-from pkgutil import get_data
-
-import onnx.helper as oh
-from onnx import TensorProto
-import brevitas.onnx as bo
-from finn.util.test import get_test_model_trained
-import finn.analysis.topology as ta
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-
-
-def test_all_tensors_f32():
-    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
-    add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
-    mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
-    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
-    modelproto = oh.make_model(
-        oh.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=[add_param, mul_param],
-            nodes=[
-                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
-                oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
-            ],
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-    ret = model.analysis(ta.all_tensors_f32)
-    assert ret["all_tensors_f32"] is True
-
-    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
-    add_param = oh.make_tensor_value_info("add_param", TensorProto.INT8, [2])
-    mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
-    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
-    modelproto = oh.make_model(
-        oh.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=[add_param, mul_param],
-            nodes=[
-                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
-                oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
-            ],
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-    ret = model.analysis(ta.all_tensors_f32)
-    assert ret["all_tensors_f32"] is False
-
-
-def test_node_inputs_in_expected_order():
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    model = model.transform(InferShapes())
-    ret = model.analysis(ta.node_inputs_in_expected_order)
-    # this model has an (unnecessary) dynamic reshape for its weight tensor
-    # and so it fails the check
-    assert ret["node_inputs_in_expected_order"] is False
-
-
-def test_nodes_topologically_sorted():
-    # test analysis pass (nodes_topologically_sorted) with different models
-
-    # test with data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx
-    raw_m = get_data(
-        "finn", "data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx"
-    )
-    model = ModelWrapper(raw_m)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is True
-
-    # remove first node and add it at the end
-    graph = model.graph
-    first_node = graph.node[0]
-    graph.node.remove(first_node)
-    graph.node.append(first_node)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is False
-
-    # test with data/onnx/mnist-conv/model.onnx
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is True
-
-    # remove first node and add it at the end
-    graph = model.graph
-    first_node = graph.node[0]
-    graph.node.remove(first_node)
-    graph.node.append(first_node)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is False
-
-    # test with manually created small network
-    Neg_node = oh.make_node("Neg", inputs=["in1"], outputs=["neg1"])
-    Round_node = oh.make_node("Round", inputs=["neg1"], outputs=["round1"])
-
-    Ceil_node = oh.make_node("Ceil", inputs=["neg1"], outputs=["ceil1"])
-    Add_node = oh.make_node("Add", inputs=["round1", "ceil1"], outputs=["out1"])
-
-    in1 = oh.make_tensor_value_info("in1", TensorProto.FLOAT, [4, 4])
-    out1 = oh.make_tensor_value_info("out1", TensorProto.FLOAT, [4, 4])
-
-    graph = oh.make_graph(
-        nodes=[Neg_node, Round_node, Ceil_node, Add_node],
-        name="simple_graph",
-        inputs=[in1],
-        outputs=[out1],
-        value_info=[
-            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
-            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
-            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
-        ],
-    )
-
-    onnx_model = oh.make_model(graph, producer_name="simple-model")
-    model = ModelWrapper(onnx_model)
-
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is True
-
-    # create same graph but with "wrong" node order
-    graph = oh.make_graph(
-        nodes=[Round_node, Ceil_node, Neg_node, Add_node],
-        name="simple_graph",
-        inputs=[in1],
-        outputs=[out1],
-        value_info=[
-            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
-            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
-            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
-        ],
-    )
-
-    onnx_model = oh.make_model(graph, producer_name="simple-model")
-    model = ModelWrapper(onnx_model)
-
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is False
-
-    # test with data/onnx/finn-hls-model/finn-hls-onnx-model.onnx
-    raw_m = get_data("finn", "data/onnx/finn-hls-model/finn-hls-onnx-model.onnx")
-    model = ModelWrapper(raw_m)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is True
-
-    # remove first node and add it at the end
-    graph = model.graph
-    first_node = graph.node[0]
-    graph.node.remove(first_node)
-    graph.node.append(first_node)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is False
-
-    # test with cnv_w1a1
-    build_dir = "/tmp/" + os.environ["FINN_INST_NAME"]
-    cnv = get_test_model_trained("CNV", 1, 1)
-    bo.export_finn_onnx(
-        cnv, (1, 3, 32, 32), build_dir + "/end2end_cnv_w1a1_export.onnx"
-    )
-    model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_export.onnx")
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is True
-
-    # remove first node and add it at the end
-    graph = model.graph
-    first_node = graph.node[0]
-    graph.node.remove(first_node)
-    graph.node.append(first_node)
-    ret = model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"] is False
diff --git a/tests/core/test_basic_onnx_exec.py b/tests/core/test_basic_onnx_exec.py
deleted file mode 100644
index ddb2cbfc4..000000000
--- a/tests/core/test_basic_onnx_exec.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from pkgutil import get_data
-
-import numpy as np
-import onnx
-import onnx.numpy_helper as np_helper
-
-import finn.core.onnx_exec as oxe
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-from finn.core.datatype import DataType
-from finn.util.basic import gen_finn_dt_tensor
-
-
-def test_mnist_onnx_download_extract_run():
-    # load the onnx model
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    model = model.transform(InferShapes())
-    # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
-    raw_o = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/output_0.pb")
-    input_tensor = onnx.load_tensor_from_string(raw_i)
-    output_tensor = onnx.load_tensor_from_string(raw_o)
-    # run using FINN-based execution (full graph)
-    input_dict = {"Input3": np_helper.to_array(input_tensor)}
-    output_dict = oxe.execute_onnx(model, input_dict, return_full_exec_context=True)
-    assert np.isclose(
-        np_helper.to_array(output_tensor), output_dict["Plus214_Output_0"], atol=1e-3
-    ).all()
-    # test subgraph execution
-    start_node = model.graph.node[1]
-    end_node = model.graph.node[3]
-    subgraph_i_dict = {start_node.input[0]: output_dict[start_node.input[0]]}
-    subgraph_o_dict = oxe.execute_onnx(
-        model,
-        subgraph_i_dict,
-        return_full_exec_context=True,
-        start_node=start_node,
-        end_node=end_node,
-    )
-    assert np.isclose(
-        subgraph_o_dict[end_node.output[0]], output_dict[end_node.output[0]], atol=1e-3
-    ).all()
-
-
-def test_onnx_exec_internal_rounding():
-    inp0 = onnx.helper.make_tensor_value_info("inp0", onnx.TensorProto.FLOAT, [2, 2])
-    inp1 = onnx.helper.make_tensor_value_info("inp1", onnx.TensorProto.FLOAT, [1])
-    outp = onnx.helper.make_tensor_value_info("outp", onnx.TensorProto.FLOAT, [2, 2])
-    mul_node = onnx.helper.make_node("Mul", inputs=["inp0", "inp1"], outputs=["outp"])
-    graph = onnx.helper.make_graph(
-        nodes=[mul_node], name="mul_graph", inputs=[inp0, inp1], outputs=[outp]
-    )
-
-    model = onnx.helper.make_model(graph, producer_name="mul-model")
-    model = ModelWrapper(model)
-    idt = DataType.INT2
-    model.set_tensor_datatype("inp0", idt)
-    model.set_tensor_datatype("inp1", idt)
-    model.transform(InferShapes())
-
-    mul_value = np.asarray([-1], dtype=np.float32)
-    inp_int = gen_finn_dt_tensor(idt, [2, 2])
-    scale = np.random.uniform(low=0, high=1, size=(2, 2)).astype(np.float32)
-    inp_rounded = (inp_int * scale) / (scale + 1e-7)
-    input_dict = {"inp0": inp_rounded, "inp1": mul_value}
-    output_dict = oxe.execute_onnx(model, input_dict)
-    produced = output_dict["outp"]
-    expected = np.multiply(inp_int, mul_value)
-    assert (produced == expected).all()
diff --git a/tests/core/test_custom_onnx_exec.py b/tests/core/test_custom_onnx_exec.py
deleted file mode 100644
index 086681dde..000000000
--- a/tests/core/test_custom_onnx_exec.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-from onnx import TensorProto, helper
-
-import finn.core.execute_custom_node as ex_cu_node
-
-
-def test_execute_custom_node_multithreshold():
-    inputs = np.ndarray(
-        shape=(6, 3, 2, 2),
-        buffer=np.array(
-            [
-                4.8,
-                3.2,
-                1.2,
-                4.9,
-                7.8,
-                2.4,
-                3.1,
-                4.7,
-                6.2,
-                5.1,
-                4.9,
-                2.2,
-                6.2,
-                0.0,
-                0.8,
-                4.7,
-                0.2,
-                5.6,
-                8.9,
-                9.2,
-                9.1,
-                4.0,
-                3.3,
-                4.9,
-                2.3,
-                1.7,
-                1.3,
-                2.2,
-                4.6,
-                3.4,
-                3.7,
-                9.8,
-                4.7,
-                4.9,
-                2.8,
-                2.7,
-                8.3,
-                6.7,
-                4.2,
-                7.1,
-                2.8,
-                3.1,
-                0.8,
-                0.6,
-                4.4,
-                2.7,
-                6.3,
-                6.1,
-                1.4,
-                5.3,
-                2.3,
-                1.9,
-                4.7,
-                8.1,
-                9.3,
-                3.7,
-                2.7,
-                5.1,
-                4.2,
-                1.8,
-                4.1,
-                7.3,
-                7.1,
-                0.4,
-                0.2,
-                1.3,
-                4.3,
-                8.9,
-                1.4,
-                1.6,
-                8.3,
-                9.4,
-            ]
-        ),
-    )
-
-    threshold_values = np.ndarray(
-        shape=(3, 7),
-        buffer=np.array(
-            [
-                0.8,
-                1.4,
-                1.7,
-                3.5,
-                5.2,
-                6.8,
-                8.2,
-                0.2,
-                2.2,
-                3.5,
-                4.5,
-                6.6,
-                8.6,
-                9.2,
-                1.3,
-                4.1,
-                4.5,
-                6.5,
-                7.8,
-                8.1,
-                8.9,
-            ]
-        ),
-    )
-
-    v = helper.make_tensor_value_info("v", TensorProto.FLOAT, [6, 3, 2, 2])
-    thresholds = helper.make_tensor_value_info("thresholds", TensorProto.FLOAT, [3, 7])
-    out = helper.make_tensor_value_info("out", TensorProto.FLOAT, [6, 3, 2, 2])
-
-    node_def = helper.make_node(
-        "MultiThreshold", ["v", "thresholds"], ["out"], domain="finn"
-    )
-
-    graph_def = helper.make_graph([node_def], "test_model", [v, thresholds], [out])
-
-    execution_context = {}
-    execution_context["v"] = inputs
-    execution_context["thresholds"] = threshold_values
-
-    ex_cu_node.execute_custom_node(node_def, execution_context, graph_def)
-
-    outputs = np.ndarray(
-        shape=(6, 3, 2, 2),
-        buffer=np.array(
-            [
-                4.0,
-                3.0,
-                1.0,
-                4.0,
-                5.0,
-                2.0,
-                2.0,
-                4.0,
-                3.0,
-                3.0,
-                3.0,
-                1.0,
-                5.0,
-                0.0,
-                1.0,
-                4.0,
-                1.0,
-                4.0,
-                6.0,
-                7.0,
-                7.0,
-                1.0,
-                1.0,
-                3.0,
-                3.0,
-                3.0,
-                1.0,
-                3.0,
-                4.0,
-                2.0,
-                3.0,
-                7.0,
-                3.0,
-                3.0,
-                1.0,
-                1.0,
-                7.0,
-                5.0,
-                4.0,
-                6.0,
-                2.0,
-                2.0,
-                1.0,
-                1.0,
-                2.0,
-                1.0,
-                3.0,
-                3.0,
-                2.0,
-                5.0,
-                3.0,
-                3.0,
-                4.0,
-                5.0,
-                7.0,
-                3.0,
-                1.0,
-                3.0,
-                2.0,
-                1.0,
-                4.0,
-                6.0,
-                6.0,
-                0.0,
-                1.0,
-                1.0,
-                3.0,
-                6.0,
-                1.0,
-                1.0,
-                6.0,
-                7.0,
-            ]
-        ),
-    )
-
-    assert (execution_context["out"] == outputs).all()
-
-    # test the optional output scaling features on MultiThreshold
-    node_def = helper.make_node(
-        "MultiThreshold",
-        ["v", "thresholds"],
-        ["out"],
-        domain="finn",
-        out_scale=2.0,
-        out_bias=-1.0,
-    )
-
-    graph_def = helper.make_graph([node_def], "test_model", [v, thresholds], [out])
-    ex_cu_node.execute_custom_node(node_def, execution_context, graph_def)
-    outputs_scaled = 2.0 * outputs - 1.0
-    assert (execution_context["out"] == outputs_scaled).all()
-
-    # test the optional data layout option for MultiThreshold
-    node_def = helper.make_node(
-        "MultiThreshold",
-        ["v", "thresholds"],
-        ["out"],
-        domain="finn",
-        data_layout="NHWC",
-    )
-
-    v_nhwc = helper.make_tensor_value_info("v", TensorProto.FLOAT, [6, 2, 2, 3])
-    out_nhwc = helper.make_tensor_value_info("out", TensorProto.FLOAT, [6, 2, 2, 3])
-    inputs_nhwc = np.transpose(inputs, (0, 2, 3, 1))  # NCHW -> NHWC
-    outputs_nhwc = np.transpose(outputs, (0, 2, 3, 1))  # NCHW -> NHWC
-    execution_context["v"] = inputs_nhwc
-
-    graph_def = helper.make_graph(
-        [node_def], "test_model", [v_nhwc, thresholds], [out_nhwc]
-    )
-    ex_cu_node.execute_custom_node(node_def, execution_context, graph_def)
-    assert (execution_context["out"] == outputs_nhwc).all()
diff --git a/tests/core/test_datatypes.py b/tests/core/test_datatypes.py
deleted file mode 100644
index f1d34923c..000000000
--- a/tests/core/test_datatypes.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from finn.core.datatype import DataType
-
-
-def test_datatypes():
-    assert DataType.BIPOLAR.allowed(-1)
-    assert DataType.BIPOLAR.allowed(0) is False
-    assert DataType.BINARY.allowed(-1) is False
-    assert DataType.BINARY.allowed(1)
-    assert DataType.TERNARY.allowed(2) is False
-    assert DataType.TERNARY.allowed(-1)
-    assert DataType.UINT2.allowed(2)
-    assert DataType.UINT2.allowed(10) is False
-    assert DataType.UINT3.allowed(5)
-    assert DataType.UINT3.allowed(-7) is False
-    assert DataType.UINT4.allowed(15)
-    assert DataType.UINT4.allowed(150) is False
-    assert DataType.UINT8.allowed(150)
-    assert DataType.UINT8.allowed(777) is False
-    assert DataType.UINT16.allowed(14500)
-    assert DataType.UINT16.allowed(-1) is False
-    assert DataType.UINT32.allowed(2 ** 10)
-    assert DataType.UINT32.allowed(-1) is False
-    assert DataType.INT2.allowed(-1)
-    assert DataType.INT2.allowed(-10) is False
-    assert DataType.INT3.allowed(5) is False
-    assert DataType.INT3.allowed(-2)
-    assert DataType.INT4.allowed(15) is False
-    assert DataType.INT4.allowed(-5)
-    assert DataType.INT8.allowed(150) is False
-    assert DataType.INT8.allowed(-127)
-    assert DataType.INT16.allowed(-1.04) is False
-    assert DataType.INT16.allowed(-7777)
-    assert DataType.INT32.allowed(7.77) is False
-    assert DataType.INT32.allowed(-5)
-    assert DataType.INT32.allowed(5)
-    assert DataType.BINARY.signed() is False
-    assert DataType.FLOAT32.signed()
-    assert DataType.BIPOLAR.signed()
-    assert DataType.TERNARY.signed()
-
-
-def test_smallest_possible():
-    assert DataType.get_smallest_possible(1) == DataType.BINARY
-    assert DataType.get_smallest_possible(1.1) == DataType.FLOAT32
-    assert DataType.get_smallest_possible(-1) == DataType.BIPOLAR
-    assert DataType.get_smallest_possible(-3) == DataType.INT3
-    assert DataType.get_smallest_possible(-3.2) == DataType.FLOAT32
diff --git a/tests/core/test_mixed_onnx_exec.py b/tests/core/test_mixed_onnx_exec.py
deleted file mode 100644
index d8754105e..000000000
--- a/tests/core/test_mixed_onnx_exec.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-from onnx import TensorProto, helper
-
-import finn.core.onnx_exec as oxe
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-
-
-def test_execute_mixed_model():
-
-    out0 = helper.make_tensor_value_info("out0", TensorProto.FLOAT, [6, 3, 2, 2])
-
-    graph_def = helper.make_graph(
-        nodes=[
-            helper.make_node(
-                "MultiThreshold", ["v", "thresholds"], ["out0"], domain="finn"
-            ),
-            helper.make_node("Relu", ["out0"], ["out1"]),
-        ],
-        name="test-model",
-        inputs=[
-            helper.make_tensor_value_info("v", TensorProto.FLOAT, [6, 3, 2, 2]),
-            helper.make_tensor_value_info("thresholds", TensorProto.FLOAT, [3, 7]),
-        ],
-        outputs=[
-            helper.make_tensor_value_info("out1", TensorProto.FLOAT, [6, 3, 2, 2])
-        ],
-        value_info=[out0],
-    )
-    model_def = helper.make_model(graph_def, producer_name="onnx-example")
-
-    model = ModelWrapper(model_def)
-    model = model.transform(InferShapes())
-
-    inputs = np.asarray(
-        [
-            4.8,
-            3.2,
-            1.2,
-            4.9,
-            7.8,
-            2.4,
-            3.1,
-            4.7,
-            6.2,
-            5.1,
-            4.9,
-            2.2,
-            6.2,
-            0.0,
-            0.8,
-            4.7,
-            0.2,
-            5.6,
-            8.9,
-            9.2,
-            9.1,
-            4.0,
-            3.3,
-            4.9,
-            2.3,
-            1.7,
-            1.3,
-            2.2,
-            4.6,
-            3.4,
-            3.7,
-            9.8,
-            4.7,
-            4.9,
-            2.8,
-            2.7,
-            8.3,
-            6.7,
-            4.2,
-            7.1,
-            2.8,
-            3.1,
-            0.8,
-            0.6,
-            4.4,
-            2.7,
-            6.3,
-            6.1,
-            1.4,
-            5.3,
-            2.3,
-            1.9,
-            4.7,
-            8.1,
-            9.3,
-            3.7,
-            2.7,
-            5.1,
-            4.2,
-            1.8,
-            4.1,
-            7.3,
-            7.1,
-            0.4,
-            0.2,
-            1.3,
-            4.3,
-            8.9,
-            1.4,
-            1.6,
-            8.3,
-            9.4,
-        ],
-        dtype=np.float32,
-    ).reshape(6, 3, 2, 2)
-
-    threshold_values = np.asarray(
-        [
-            0.8,
-            1.4,
-            1.7,
-            3.5,
-            5.2,
-            6.8,
-            8.2,
-            0.2,
-            2.2,
-            3.5,
-            4.5,
-            6.6,
-            8.6,
-            9.2,
-            1.3,
-            4.1,
-            4.5,
-            6.5,
-            7.8,
-            8.1,
-            8.9,
-        ],
-        dtype=np.float32,
-    ).reshape(3, 7)
-
-    input_dict = {}
-    input_dict["v"] = inputs
-    input_dict["thresholds"] = threshold_values
-
-    output_dict = oxe.execute_onnx(model, input_dict)
-
-    outputs = np.asarray(
-        [
-            4.0,
-            3.0,
-            1.0,
-            4.0,
-            5.0,
-            2.0,
-            2.0,
-            4.0,
-            3.0,
-            3.0,
-            3.0,
-            1.0,
-            5.0,
-            0.0,
-            1.0,
-            4.0,
-            1.0,
-            4.0,
-            6.0,
-            7.0,
-            7.0,
-            1.0,
-            1.0,
-            3.0,
-            3.0,
-            3.0,
-            1.0,
-            3.0,
-            4.0,
-            2.0,
-            3.0,
-            7.0,
-            3.0,
-            3.0,
-            1.0,
-            1.0,
-            7.0,
-            5.0,
-            4.0,
-            6.0,
-            2.0,
-            2.0,
-            1.0,
-            1.0,
-            2.0,
-            1.0,
-            3.0,
-            3.0,
-            2.0,
-            5.0,
-            3.0,
-            3.0,
-            4.0,
-            5.0,
-            7.0,
-            3.0,
-            1.0,
-            3.0,
-            2.0,
-            1.0,
-            4.0,
-            6.0,
-            6.0,
-            0.0,
-            1.0,
-            1.0,
-            3.0,
-            6.0,
-            1.0,
-            1.0,
-            6.0,
-            7.0,
-        ],
-        dtype=np.float32,
-    ).reshape(6, 3, 2, 2)
-
-    assert (output_dict["out1"] == outputs).all()
diff --git a/tests/core/test_modelwrapper.py b/tests/core/test_modelwrapper.py
deleted file mode 100644
index 0fb7ae42f..000000000
--- a/tests/core/test_modelwrapper.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import onnx
-from collections import Counter
-import brevitas.onnx as bo
-import numpy as np
-import finn.core.data_layout as DataLayout
-
-from finn.core.modelwrapper import ModelWrapper
-from finn.util.test import get_test_model_trained
-
-export_onnx_path = "test_modelwrapper.onnx"
-
-
-def test_modelwrapper():
-    lfc = get_test_model_trained("LFC", 1, 1)
-    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
-    model = ModelWrapper(export_onnx_path)
-    assert model.check_all_tensor_shapes_specified() is False
-    inp_name = model.graph.input[0].name
-    inp_shape = model.get_tensor_shape(inp_name)
-    assert inp_shape == [1, 1, 28, 28]
-    # find first matmul node
-    l0_mat_tensor_name = ""
-    l0_inp_tensor_name = ""
-    for node in model.graph.node:
-        if node.op_type == "MatMul":
-            l0_inp_tensor_name = node.input[0]
-            l0_mat_tensor_name = node.input[1]
-            break
-    assert l0_mat_tensor_name != ""
-    l0_weights = model.get_initializer(l0_mat_tensor_name)
-    assert l0_weights.shape == (784, 1024)
-    l0_weights_hist = Counter(l0_weights.flatten())
-    assert (l0_weights_hist[1.0] + l0_weights_hist[-1.0]) == 784 * 1024
-    l0_weights_rand = np.random.randn(784, 1024)
-    model.set_initializer(l0_mat_tensor_name, l0_weights_rand)
-    assert (model.get_initializer(l0_mat_tensor_name) == l0_weights_rand).all()
-    assert l0_inp_tensor_name != ""
-    inp_cons = model.find_consumer(l0_inp_tensor_name)
-    assert inp_cons.op_type == "MatMul"
-    out_prod = model.find_producer(l0_inp_tensor_name)
-    assert out_prod.op_type == "MultiThreshold"
-    inp_layout = model.get_tensor_layout(inp_name)
-    assert inp_layout is None
-    inp_layout = DataLayout.NCHW
-    model.set_tensor_layout(inp_name, inp_layout)
-    assert model.get_tensor_layout(inp_name) == inp_layout
-    inp_sparsity = model.get_tensor_sparsity(inp_name)
-    assert inp_sparsity is None
-    inp_sparsity = {"dw": {"kernel_shape": 3}}
-    model.set_tensor_sparsity(inp_name, inp_sparsity)
-    assert model.get_tensor_sparsity(inp_name) == inp_sparsity
-    os.remove(export_onnx_path)
-
-
-def test_modelwrapper_graph_order():
-    # create small network with properties to be tested
-    Neg_node = onnx.helper.make_node("Neg", inputs=["in1"], outputs=["neg1"])
-    Round_node = onnx.helper.make_node("Round", inputs=["neg1"], outputs=["round1"])
-
-    Ceil_node = onnx.helper.make_node("Ceil", inputs=["neg1"], outputs=["ceil1"])
-    Add_node = onnx.helper.make_node(
-        "Add", inputs=["round1", "ceil1"], outputs=["out1"]
-    )
-
-    in1 = onnx.helper.make_tensor_value_info("in1", onnx.TensorProto.FLOAT, [4, 4])
-    out1 = onnx.helper.make_tensor_value_info("out1", onnx.TensorProto.FLOAT, [4, 4])
-
-    graph = onnx.helper.make_graph(
-        nodes=[Neg_node, Round_node, Ceil_node, Add_node],
-        name="simple_graph",
-        inputs=[in1],
-        outputs=[out1],
-        value_info=[
-            onnx.helper.make_tensor_value_info("neg1", onnx.TensorProto.FLOAT, [4, 4]),
-            onnx.helper.make_tensor_value_info(
-                "round1", onnx.TensorProto.FLOAT, [4, 4]
-            ),
-            onnx.helper.make_tensor_value_info("ceil1", onnx.TensorProto.FLOAT, [4, 4]),
-        ],
-    )
-
-    onnx_model = onnx.helper.make_model(graph, producer_name="simple-model")
-    model = ModelWrapper(onnx_model)
-
-    # test graph order functions
-    assert model.find_consumers("in1") == [Neg_node]
-    assert model.find_consumers("neg1") == [Round_node, Ceil_node]
-    assert model.find_consumers("round1") == [Add_node]
-    assert model.find_consumers("ceil1") == [Add_node]
-    assert model.find_consumers("out1") is None
-
-    assert model.find_direct_successors(Neg_node) == [Round_node, Ceil_node]
-    assert model.find_direct_successors(Round_node) == [Add_node]
-    assert model.find_direct_successors(Ceil_node) == [Add_node]
-    assert model.find_direct_successors(Add_node) is None
-
-    assert model.find_direct_predecessors(Neg_node) is None
-    assert model.find_direct_predecessors(Round_node) == [Neg_node]
-    assert model.find_direct_predecessors(Ceil_node) == [Neg_node]
-    assert model.find_direct_predecessors(Add_node) == [Round_node, Ceil_node]
-
-    assert model.get_node_index(Neg_node) == 0
-    assert model.get_node_index(Round_node) == 1
-    assert model.get_node_index(Ceil_node) == 2
-    assert model.get_node_index(Add_node) == 3
-
-
-def test_modelwrapper_detect_forks_n_joins():
-    # create small network with properties to be tested
-    Neg_node = onnx.helper.make_node("Neg", inputs=["in1"], outputs=["neg1"])
-    Round_node = onnx.helper.make_node("Round", inputs=["neg1"], outputs=["round1"])
-
-    Ceil_node = onnx.helper.make_node("Ceil", inputs=["neg1"], outputs=["ceil1"])
-    Add_node = onnx.helper.make_node(
-        "Add", inputs=["round1", "ceil1"], outputs=["out1"]
-    )
-
-    in1 = onnx.helper.make_tensor_value_info("in1", onnx.TensorProto.FLOAT, [4, 4])
-    out1 = onnx.helper.make_tensor_value_info("out1", onnx.TensorProto.FLOAT, [4, 4])
-
-    graph = onnx.helper.make_graph(
-        nodes=[Neg_node, Round_node, Ceil_node, Add_node],
-        name="simple_graph",
-        inputs=[in1],
-        outputs=[out1],
-        value_info=[
-            onnx.helper.make_tensor_value_info("neg1", onnx.TensorProto.FLOAT, [4, 4]),
-            onnx.helper.make_tensor_value_info(
-                "round1", onnx.TensorProto.FLOAT, [4, 4]
-            ),
-            onnx.helper.make_tensor_value_info("ceil1", onnx.TensorProto.FLOAT, [4, 4]),
-        ],
-    )
-
-    onnx_model = onnx.helper.make_model(graph, producer_name="simple-model")
-    model = ModelWrapper(onnx_model)
-
-    # test
-    assert model.is_fork_node(Neg_node)
-    assert not model.is_fork_node(Round_node)
-    assert not model.is_fork_node(Ceil_node)
-    assert not model.is_fork_node(Add_node)
-
-    assert not model.is_join_node(Neg_node)
-    assert not model.is_join_node(Round_node)
-    assert not model.is_join_node(Ceil_node)
-    assert model.is_join_node(Add_node)
diff --git a/tests/custom_op/test_im2col.py b/tests/custom_op/test_im2col.py
deleted file mode 100644
index 0b148145b..000000000
--- a/tests/custom_op/test_im2col.py
+++ /dev/null
@@ -1,320 +0,0 @@
-import numpy as np
-from onnx import TensorProto, helper
-
-import finn.core.onnx_exec as oxe
-from finn.core.datatype import DataType
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.infer_shapes import InferShapes
-from finn.custom_op.im2col import compute_conv_output_dim
-
-
-def check_two_dict_for_equality(dict1, dict2):
-    for key in dict1:
-        assert key in dict2, "Key: {} is not in both dictionaries".format(key)
-        assert (
-            dict1[key] == dict2[key]
-        ), """Values for key {} are not the same
-        in both dictionaries""".format(
-            key
-        )
-
-    return True
-
-
-def execution_im2col(x, idt, k, stride, ifm_ch, ifm_dim, pad_amt=0, pad_val=0):
-    ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt)
-
-    # set up onnx model
-    inp = helper.make_tensor_value_info(
-        "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]
-    )
-    outp = helper.make_tensor_value_info(
-        "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch]
-    )
-
-    Im2Col_node = helper.make_node(
-        "Im2Col",
-        ["inp"],
-        ["outp"],
-        domain="finn",
-        stride=stride,
-        kernel_size=k,
-        pad_amount=pad_amt,
-        pad_value=pad_val,
-        input_shape="(1,{},{},{})".format(ifm_dim, ifm_dim, ifm_ch),
-    )
-
-    graph = helper.make_graph(
-        nodes=[Im2Col_node], name="im2col_graph", inputs=[inp], outputs=[outp]
-    )
-
-    model = helper.make_model(graph, producer_name="im2col-model")
-    model = ModelWrapper(model)
-
-    model.set_tensor_datatype("inp", idt)
-
-    # test shape inference
-    model.transform(InferShapes())
-    assert model.get_tensor_shape("outp") == [1, ofm_dim, ofm_dim, k * k * ifm_ch]
-
-    # test datatype inference
-    assert model.get_tensor_datatype("outp") is DataType.FLOAT32
-    model = model.transform(InferDataTypes())
-    assert model.get_tensor_datatype("outp") is idt
-
-    # prepare input data
-    input_dict = {"inp": x}
-
-    # execute model
-    y_produced = oxe.execute_onnx(model, input_dict)["outp"]
-
-    return y_produced
-
-
-def test_im2col():
-    # bipolar inputs with following im2col parameters
-    idt = DataType.BIPOLAR
-    k = 2
-    stride = 1
-    ifm_ch = 1
-    ifm_dim = 4
-    pad_amt = 0
-    pad_val = 0
-    ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt)
-
-    x = np.asarray(
-        [
-            -1.0,
-            -1.0,
-            1.0,
-            1.0,
-            1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            1.0,
-            1.0,
-            1.0,
-        ],
-        dtype=np.float32,
-    ).reshape(1, ifm_dim, ifm_dim, ifm_ch)
-
-    expected = np.asarray(
-        [
-            -1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            1.0,
-            1.0,
-            1.0,
-            1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            1.0,
-            1.0,
-            -1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            1.0,
-            1.0,
-            1.0,
-            -1.0,
-            1.0,
-            1.0,
-            -1.0,
-            -1.0,
-            1.0,
-            1.0,
-        ],
-        dtype=np.float32,
-    ).reshape(1, ofm_dim, ofm_dim, k * k * ifm_ch)
-
-    produced = execution_im2col(x, idt, k, stride, ifm_ch, ifm_dim, pad_amt, pad_val)
-    assert (produced == expected).all()
-
-    idt = DataType.INT8
-    k = 2
-    stride = 1
-    ifm_ch = 2
-    ifm_dim = 4
-    pad_amt = 0
-    pad_val = 0
-    ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt)
-
-    x = np.asarray(
-        [
-            [
-                [[1, -1], [2, -2], [3, -3], [4, -4]],
-                [[5, -5], [6, -6], [7, -7], [8, -8]],
-                [[9, -9], [10, -10], [11, -11], [12, -12]],
-                [[13, -13], [14, -14], [15, -15], [16, -16]],
-            ]
-        ],
-        dtype=np.float32,
-    )
-
-    expected = np.asarray(
-        [
-            [
-                [
-                    [1.0, -1.0, 2.0, -2.0, 5.0, -5.0, 6.0, -6.0],
-                    [2.0, -2.0, 3.0, -3.0, 6.0, -6.0, 7.0, -7.0],
-                    [3.0, -3.0, 4.0, -4.0, 7.0, -7.0, 8.0, -8.0],
-                ],
-                [
-                    [5.0, -5.0, 6.0, -6.0, 9.0, -9.0, 10.0, -10.0],
-                    [6.0, -6.0, 7.0, -7.0, 10.0, -10.0, 11.0, -11.0],
-                    [7.0, -7.0, 8.0, -8.0, 11.0, -11.0, 12.0, -12.0],
-                ],
-                [
-                    [9.0, -9.0, 10.0, -10.0, 13.0, -13.0, 14.0, -14.0],
-                    [10.0, -10.0, 11.0, -11.0, 14.0, -14.0, 15.0, -15.0],
-                    [11.0, -11.0, 12.0, -12.0, 15.0, -15.0, 16.0, -16.0],
-                ],
-            ]
-        ],
-        dtype=np.float32,
-    )
-
-    produced = execution_im2col(x, idt, k, stride, ifm_ch, ifm_dim, pad_amt, pad_val)
-    assert (produced == expected).all()
-
-    idt = DataType.INT8
-    k = 2
-    stride = 1
-    ifm_ch = 2
-    ifm_dim = 4
-    pad_amt = 1
-    pad_val = 0
-    ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad_amt)
-
-    x = np.asarray(
-        [
-            [
-                [[1, -1], [2, -2], [3, -3], [4, -4]],
-                [[5, -5], [6, -6], [7, -7], [8, -8]],
-                [[9, -9], [10, -10], [11, -11], [12, -12]],
-                [[13, -13], [14, -14], [15, -15], [16, -16]],
-            ]
-        ],
-        dtype=np.float32,
-    )
-
-    expected = np.asarray(
-        [
-            [
-                [
-                    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0],
-                    [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 2.0, -2.0],
-                    [0.0, 0.0, 0.0, 0.0, 2.0, -2.0, 3.0, -3.0],
-                    [0.0, 0.0, 0.0, 0.0, 3.0, -3.0, 4.0, -4.0],
-                    [0.0, 0.0, 0.0, 0.0, 4.0, -4.0, 0.0, 0.0],
-                ],
-                [
-                    [0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 5.0, -5.0],
-                    [1.0, -1.0, 2.0, -2.0, 5.0, -5.0, 6.0, -6.0],
-                    [2.0, -2.0, 3.0, -3.0, 6.0, -6.0, 7.0, -7.0],
-                    [3.0, -3.0, 4.0, -4.0, 7.0, -7.0, 8.0, -8.0],
-                    [4.0, -4.0, 0.0, 0.0, 8.0, -8.0, 0.0, 0.0],
-                ],
-                [
-                    [0.0, 0.0, 5.0, -5.0, 0.0, 0.0, 9.0, -9.0],
-                    [5.0, -5.0, 6.0, -6.0, 9.0, -9.0, 10.0, -10.0],
-                    [6.0, -6.0, 7.0, -7.0, 10.0, -10.0, 11.0, -11.0],
-                    [7.0, -7.0, 8.0, -8.0, 11.0, -11.0, 12.0, -12.0],
-                    [8.0, -8.0, 0.0, 0.0, 12.0, -12.0, 0.0, 0.0],
-                ],
-                [
-                    [0.0, 0.0, 9.0, -9.0, 0.0, 0.0, 13.0, -13.0],
-                    [9.0, -9.0, 10.0, -10.0, 13.0, -13.0, 14.0, -14.0],
-                    [10.0, -10.0, 11.0, -11.0, 14.0, -14.0, 15.0, -15.0],
-                    [11.0, -11.0, 12.0, -12.0, 15.0, -15.0, 16.0, -16.0],
-                    [12.0, -12.0, 0.0, 0.0, 16.0, -16.0, 0.0, 0.0],
-                ],
-                [
-                    [0.0, 0.0, 13.0, -13.0, 0.0, 0.0, 0.0, 0.0],
-                    [13.0, -13.0, 14.0, -14.0, 0.0, 0.0, 0.0, 0.0],
-                    [14.0, -14.0, 15.0, -15.0, 0.0, 0.0, 0.0, 0.0],
-                    [15.0, -15.0, 16.0, -16.0, 0.0, 0.0, 0.0, 0.0],
-                    [16.0, -16.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
-                ],
-            ]
-        ],
-        dtype=np.float32,
-    )
-
-    produced = execution_im2col(x, idt, k, stride, ifm_ch, ifm_dim, pad_amt, pad_val)
-    assert (produced == expected).all()
-
-
-def test_im2col_infer_shapes():
-    idt = DataType.BIPOLAR
-    k = 2
-    stride = 1
-    ifm_ch = 1
-    ifm_dim = 4
-    ofm_dim = int(((ifm_dim - k) / stride) + 1)
-
-    # set up onnx model
-    inp = helper.make_tensor_value_info(
-        "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]
-    )
-    outp = helper.make_tensor_value_info(
-        "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch]
-    )
-
-    abs_node = helper.make_node("Abs", inputs=["inp"], outputs=["abs"])
-
-    Im2Col_node = helper.make_node(
-        "Im2Col",
-        ["abs"],
-        ["im2col"],
-        domain="finn",
-        stride=stride,
-        kernel_size=k,
-        input_shape="(1,{},{},{})".format(ifm_dim, ifm_dim, ifm_ch),
-    )
-
-    abs1_node = helper.make_node("Abs", inputs=["im2col"], outputs=["outp"])
-
-    graph = helper.make_graph(
-        nodes=[abs_node, Im2Col_node, abs1_node],
-        name="shape_graph",
-        inputs=[inp],
-        outputs=[outp],
-        value_info=[
-            helper.make_tensor_value_info(
-                "abs", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]
-            ),
-            helper.make_tensor_value_info(
-                "im2col", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch]
-            ),
-        ],
-    )
-
-    model = helper.make_model(graph, producer_name="shape-model")
-    model = ModelWrapper(model)
-
-    model.set_tensor_datatype("inp", idt)
-
-    # test shape inference
-    model.transform(InferShapes())
-    assert model.get_tensor_shape("im2col") == [1, ofm_dim, ofm_dim, k * k * ifm_ch]
diff --git a/tests/custom_op/test_multithreshold.py b/tests/custom_op/test_multithreshold.py
deleted file mode 100644
index 7e6ad4fe0..000000000
--- a/tests/custom_op/test_multithreshold.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-import time
-from finn.custom_op.multithreshold import multithreshold
-
-
-def compare(x, y):
-    """Comparison helper function for multithresholding.
-
-    Gets two values and returns 1.0 if x>=y otherwise 0.0."""
-    if x >= y:
-        return 1.0
-    else:
-        return 0.0
-
-# naive implementation of thresholding for performance comparison
-def multithreshold_elementwise(v, thresholds, out_scale=None, out_bias=None):
-    """Given a set of threshold values t={t_0, t_1 ... t_n} the successive
-    thresholding maps any real number x to an integer in the interval [0, n],
-    where the returned integer is the number of thresholds x is greater than
-    or equal to.
-
-    The output tensor will be scaled by out_scale and biased by out_bias."""
-    # the inputs are expected to be in the shape (N,C,H,W) or (N, C)
-    # the MultiThreshold node supports a data_layout attribute that can be set
-    # to 'NHWC' to support (N,H,W,C) data layout mode for in-out as well
-    # N : Batch size
-    # C : Number of channels
-    # H : Heigth of the input images
-    # W : Width of the input images
-    #
-    # the thresholds are expected to be in the shape (C, B)
-    # C : Number of channels (must be the same value as C in input tensor
-    #     or 1 if all channels use the same threshold value)
-    # B : Desired activation steps => i.e. for 4-bit activation,
-    #     B=7 (2^(n)-1 and n=4)
-    # the output tensor will be scaled by out_scale and biased by out_bias
-    # assert threshold shape
-    is_global_threshold = thresholds.shape[0] == 1
-    assert (
-        v.shape[1] == thresholds.shape[0]
-    ) or is_global_threshold, """"Threshold
-    shape incorrect"""
-    # save the required shape sizes for the loops (N, C and B)
-    num_batch = v.shape[0]
-    num_channel = v.shape[1]
-    num_act = thresholds.shape[1]
-    # reshape inputs to enable channel-wise reading
-    vr = v.reshape((v.shape[0], v.shape[1], -1))
-    # save the new shape size of the images
-    num_img_elem = vr.shape[2]
-    # initiate output tensor
-    ret = np.zeros_like(vr)
-    # iterate over thresholds channel-wise
-    for t in range(num_channel):
-        channel_thresh = thresholds[0] if is_global_threshold else thresholds[t]
-        # iterate over batches
-        for b in range(num_batch):
-            # iterate over image elements on which the thresholds will be applied
-            for elem in range(num_img_elem):
-                # iterate over the different thresholds for one channel
-                for a in range(num_act):
-                    # apply successive thresholding to every element
-                    ret[b][t][elem] += compare(vr[b][t][elem], channel_thresh[a])
-    if out_scale is None:
-        out_scale = 1.0
-    if out_bias is None:
-        out_bias = 0.0
-    return out_scale * ret.reshape(v.shape) + out_bias
-
-
-def test_multithreshold():
-
-    inputs = np.ndarray(
-        shape=(6, 3, 2, 2),
-        buffer=np.array(
-            [
-                4.8,
-                3.2,
-                1.2,
-                4.9,
-                7.8,
-                2.4,
-                3.1,
-                4.7,
-                6.2,
-                5.1,
-                4.9,
-                2.2,
-                6.2,
-                0.0,
-                0.8,
-                4.7,
-                0.2,
-                5.6,
-                8.9,
-                9.2,
-                9.1,
-                4.0,
-                3.3,
-                4.9,
-                2.3,
-                1.7,
-                1.3,
-                2.2,
-                4.6,
-                3.4,
-                3.7,
-                9.8,
-                4.7,
-                4.9,
-                2.8,
-                2.7,
-                8.3,
-                6.7,
-                4.2,
-                7.1,
-                2.8,
-                3.1,
-                0.8,
-                0.6,
-                4.4,
-                2.7,
-                6.3,
-                6.1,
-                1.4,
-                5.3,
-                2.3,
-                1.9,
-                4.7,
-                8.1,
-                9.3,
-                3.7,
-                2.7,
-                5.1,
-                4.2,
-                1.8,
-                4.1,
-                7.3,
-                7.1,
-                0.4,
-                0.2,
-                1.3,
-                4.3,
-                8.9,
-                1.4,
-                1.6,
-                8.3,
-                9.4,
-            ]
-        ),
-    )
-
-    thresholds = np.ndarray(
-        shape=(3, 7),
-        buffer=np.array(
-            [
-                0.8,
-                1.4,
-                1.7,
-                3.5,
-                5.2,
-                6.8,
-                8.2,
-                0.2,
-                2.2,
-                3.5,
-                4.5,
-                6.6,
-                8.6,
-                9.2,
-                1.3,
-                4.1,
-                4.5,
-                6.5,
-                7.8,
-                8.1,
-                8.9,
-            ]
-        ),
-    )
-
-    outputs = np.ndarray(
-        shape=(6, 3, 2, 2),
-        buffer=np.array(
-            [
-                4.0,
-                3.0,
-                1.0,
-                4.0,
-                5.0,
-                2.0,
-                2.0,
-                4.0,
-                3.0,
-                3.0,
-                3.0,
-                1.0,
-                5.0,
-                0.0,
-                1.0,
-                4.0,
-                1.0,
-                4.0,
-                6.0,
-                7.0,
-                7.0,
-                1.0,
-                1.0,
-                3.0,
-                3.0,
-                3.0,
-                1.0,
-                3.0,
-                4.0,
-                2.0,
-                3.0,
-                7.0,
-                3.0,
-                3.0,
-                1.0,
-                1.0,
-                7.0,
-                5.0,
-                4.0,
-                6.0,
-                2.0,
-                2.0,
-                1.0,
-                1.0,
-                2.0,
-                1.0,
-                3.0,
-                3.0,
-                2.0,
-                5.0,
-                3.0,
-                3.0,
-                4.0,
-                5.0,
-                7.0,
-                3.0,
-                1.0,
-                3.0,
-                2.0,
-                1.0,
-                4.0,
-                6.0,
-                6.0,
-                0.0,
-                1.0,
-                1.0,
-                3.0,
-                6.0,
-                1.0,
-                1.0,
-                6.0,
-                7.0,
-            ]
-        ),
-    )
-
-    results = multithreshold(inputs, thresholds)
-    assert (results == outputs).all()
-
-    results_scaled = multithreshold(inputs, thresholds, 2.0, -1.0)
-    outputs_scaled = 2.0 * outputs - 1.0
-    assert (results_scaled == outputs_scaled).all()
-
-    # performance and random test
-    np.random.seed(0)
-    inputs = np.random.random((1, 256, 64, 64))
-    thresholds = (np.array([[1, 2, 3, 4, 5, 6]]) - 0.5) / 6
-
-    before = time.time()
-    vec_results = multithreshold(inputs, thresholds)
-    after = time.time()
-    vector_runtime = after - before
-
-    before = time.time()
-    nonvec_results = multithreshold_elementwise(inputs, thresholds)
-    after = time.time()
-    non_vector_runtime = after - before
-
-    assert (vec_results == nonvec_results).all()
-
-    return vector_runtime, non_vector_runtime
-
-
-if __name__ == "__main__":
-    vector_runtime, non_vector_runtime = test_multithreshold()
-
-    print("Runtime non-vectorized: ", non_vector_runtime, "s")
-    print("Runtime vectorized: ", vector_runtime, "s")
-    print("Speed-up: ", non_vector_runtime / vector_runtime)
diff --git a/tests/custom_op/test_xnorpopcountmatmul.py b/tests/custom_op/test_xnorpopcountmatmul.py
deleted file mode 100644
index 745b782d4..000000000
--- a/tests/custom_op/test_xnorpopcountmatmul.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-from pkgutil import get_data
-
-import brevitas.onnx as bo
-import numpy as np
-import onnx
-import onnx.helper as helper
-import onnx.numpy_helper as nph
-from onnx import TensorProto
-
-import finn.core.onnx_exec as oxe
-from finn.core.datatype import DataType
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount
-from finn.transformation.fold_constants import FoldConstants
-from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.streamline.sign_to_thres import ConvertSignToThres
-from finn.util.test import get_test_model_trained
-
-export_onnx_path = "test_xnorpopcountmatmul.onnx"
-
-
-def test_xnorpopcountmatmul():
-    M = 1
-    K = 3
-    N = 3
-    x = helper.make_tensor_value_info("x", TensorProto.FLOAT, [M, K])
-    W = helper.make_tensor_value_info("W", TensorProto.FLOAT, [K, N])
-    out = helper.make_tensor_value_info("out", TensorProto.FLOAT, ["x", "y"])
-    node_def = helper.make_node(
-        "XnorPopcountMatMul", ["x", "W"], ["out"], domain="finn"
-    )
-    modelproto = helper.make_model(
-        helper.make_graph([node_def], "test_model", [x], [out], value_info=[W])
-    )
-    model = ModelWrapper(modelproto)
-    model.set_tensor_datatype("x", DataType.BINARY)
-    model.set_tensor_datatype("W", DataType.BINARY)
-    W_data = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
-    model.set_initializer("W", W_data)
-    # test shape inference
-    model = model.transform(InferShapes())
-    assert model.get_tensor_shape("out") == [M, N]
-    # test datatype inference
-    assert model.get_tensor_datatype("out") is DataType.FLOAT32
-    model = model.transform(InferDataTypes())
-    assert model.get_tensor_datatype("out") is DataType.UINT32
-    # test execution
-    x_data = np.asarray([[1, 0, 0]], dtype=np.float32)
-    inp_dict = {"x": x_data}
-    out_dict = oxe.execute_onnx(model, inp_dict)
-    Wb = 2 * W_data - 1
-    xb = 2 * x_data - 1
-    rb = np.matmul(xb, Wb)
-    assert (2 * out_dict["out"] - K == rb).all()
-
-
-def test_convert_bipolar_matmul_to_xnorpopcountmatmul():
-    lfc = get_test_model_trained("LFC", 1, 1)
-    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
-    model = ModelWrapper(export_onnx_path)
-    model = model.transform(InferShapes())
-    model = model.transform(FoldConstants())
-    model = model.transform(GiveUniqueNodeNames())
-    model = model.transform(GiveReadableTensorNames())
-    model = model.transform(ConvertSignToThres())
-    # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
-    input_tensor = onnx.load_tensor_from_string(raw_i)
-    # run using FINN-based execution
-    input_dict = {"global_in": nph.to_array(input_tensor)}
-    expected_ctx = oxe.execute_onnx(model, input_dict, True)
-    expected = expected_ctx[model.graph.output[0].name]
-    model = model.transform(ConvertBipolarMatMulToXnorPopcount())
-    produced_ctx = oxe.execute_onnx(model, input_dict, True)
-    produced = produced_ctx[model.graph.output[0].name]
-    assert np.isclose(expected, produced, atol=1e-3).all()
-    os.remove(export_onnx_path)
diff --git a/tests/transformation/test_change_datalayout.py b/tests/transformation/test_change_datalayout.py
deleted file mode 100644
index 66459d574..000000000
--- a/tests/transformation/test_change_datalayout.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import pytest
-from onnx import helper, TensorProto
-
-from finn.custom_op.maxpoolnhwc import compute_pool_output_dim
-from finn.core.modelwrapper import ModelWrapper
-from finn.core.datatype import DataType
-import finn.core.data_layout as DataLayout
-from finn.transformation.change_datalayout import ChangeDataLayoutQuantAvgPool2d
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.infer_data_layouts import InferDataLayouts
-from finn.transformation.general import GiveUniqueNodeNames, GiveReadableTensorNames
-from finn.util.basic import gen_finn_dt_tensor
-from finn.util.basic import get_by_name
-import finn.core.onnx_exec as oxe
-
-# stride
-@pytest.mark.parametrize("s", [1, 2])
-# kernel
-@pytest.mark.parametrize("k", [3, 4])
-# ibits
-@pytest.mark.parametrize("ibits", [4, 8])
-# obits
-@pytest.mark.parametrize("obits", [2, 4])
-# signed
-@pytest.mark.parametrize("signed", [False, True])
-# channels
-@pytest.mark.parametrize("c", [2, 3])
-# input dimension
-@pytest.mark.parametrize("idim", [6, 7])
-def test_change_datalayout_quantavgpool(s, k, ibits, obits, signed, c, idim):
-    n = 1
-    odim = compute_pool_output_dim(idim, k, s)
-    # determine input FINN datatype
-    if signed is True:
-        prefix = "INT"
-    else:
-        prefix = "UINT"
-    dt_name = prefix + str(ibits)
-    dtype = DataType[dt_name]
-
-    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [n, c, idim, idim])
-    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [n, c, odim, odim])
-
-    node = helper.make_node(
-        "QuantAvgPool2d",
-        ["inp"],
-        ["outp"],
-        domain="finn",
-        stride=s,
-        kernel=k,
-        ibits=ibits,
-        obits=obits,
-        signed=signed,
-        data_layout="NCHW",
-    )
-    graph = helper.make_graph(
-        nodes=[node], name="single-quantavgpool", inputs=[inp], outputs=[outp]
-    )
-
-    model = helper.make_model(graph)
-    model = ModelWrapper(model)
-    model = model.transform(InferShapes())
-    model = model.transform(InferDataTypes())
-    model = model.transform(InferDataLayouts())
-    model = model.transform(GiveUniqueNodeNames())
-    model = model.transform(GiveReadableTensorNames())
-    model_transformed = model.transform(ChangeDataLayoutQuantAvgPool2d())
-    model_transformed = model_transformed.transform(InferShapes())
-    model_transformed = model_transformed.transform(InferDataTypes())
-    model_transformed = model_transformed.transform(InferDataLayouts())
-    model_transformed = model_transformed.transform(GiveUniqueNodeNames())
-    model_transformed = model_transformed.transform(GiveReadableTensorNames())
-    inp_values = gen_finn_dt_tensor(dtype, [n, c, idim, idim])
-    idict = {"inp": inp_values}
-    assert oxe.compare_execution(model, model_transformed, idict)
-    assert len(model.graph.node) + 2 == len(model_transformed.graph.node)
-    assert model_transformed.graph.node[-1].op_type == "Transpose"
-    assert model_transformed.graph.node[0].op_type == "Transpose"
-    # check if QuantAvgPool2d node has datalayout set correctly
-    node = model_transformed.graph.node[1]
-    d_layout = get_by_name(node.attribute, "data_layout").s.decode("UTF-8")
-    assert d_layout == "NHWC"
-    assert model_transformed.get_tensor_layout(node.input[0]) == DataLayout.NHWC
-    assert model_transformed.get_tensor_layout(node.output[0]) == DataLayout.NHWC
diff --git a/tests/transformation/test_general_transformation.py b/tests/transformation/test_general_transformation.py
deleted file mode 100644
index 153af378e..000000000
--- a/tests/transformation/test_general_transformation.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from pkgutil import get_data
-
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.general import GiveUniqueNodeNames
-
-import numpy as np
-import onnx
-import finn.core.onnx_exec as oxe
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.general import GiveUniqueParameterTensors
-
-
-def test_give_unique_node_names():
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    model = model.transform(GiveUniqueNodeNames())
-    assert model.graph.node[0].name == "Reshape_0"
-    assert model.graph.node[1].name == "Conv_0"
-    assert model.graph.node[11].name == "Add_2"
-
-
-def test_give_unique_parameter_tensors():
-
-    # Create model
-    input_shape = [4, 4]
-    in1 = onnx.helper.make_tensor_value_info("in1", onnx.TensorProto.FLOAT, input_shape)
-    out1 = onnx.helper.make_tensor_value_info(
-        "out1", onnx.TensorProto.FLOAT, input_shape
-    )
-
-    graph_nodes = []
-    graph_nodes += [
-        onnx.helper.make_node("Add", inputs=["in1", "param1"], outputs=["t1"])
-    ]
-
-    graph_nodes += [
-        onnx.helper.make_node("Sum", inputs=["t1", "param1", "param1"], outputs=["t2"])
-    ]
-
-    graph_nodes += [
-        onnx.helper.make_node("Sum", inputs=["t2", "param2", "param1"], outputs=["t3"])
-    ]
-
-    graph_nodes += [
-        onnx.helper.make_node("Add", inputs=["t3", "param1"], outputs=["out1"])
-    ]
-
-    onnx_graph = onnx.helper.make_graph(
-        nodes=graph_nodes, name="simple_graph", inputs=[in1], outputs=[out1],
-    )
-
-    onnx_model = onnx.helper.make_model(onnx_graph, producer_name="simple-model")
-    model = ModelWrapper(onnx_model)
-
-    # Set param values
-    np.random.seed(0)
-    param1 = np.random.rand(*input_shape).astype(np.float32)
-    param2 = np.random.rand(*input_shape).astype(np.float32)
-    model.set_initializer("param1", param1)
-    model.set_initializer("param2", param2)
-    model = model.transform(InferShapes())
-
-    # Apply transformation
-    new_model = model.transform(GiveUniqueParameterTensors())
-    new_model = new_model.transform(InferShapes())
-
-    # Test
-    # Breaks the model?
-    input_tensor = np.random.rand(*input_shape).astype(np.float32)
-    input_dict = {"in1": input_tensor}
-
-    # run original
-    expected_context = oxe.execute_onnx(model, input_dict)
-    expected_output = expected_context[model.graph.output[0].name]
-
-    # run modified
-    produced_context = oxe.execute_onnx(new_model, input_dict)
-    produced_output = produced_context[new_model.graph.output[0].name]
-
-    assert np.isclose(
-        expected_output, produced_output, atol=1e-8
-    ).all(), " GiveUniqueParameterTensors() transform breaks the model"
-
-    # Does the job?
-    param_set = set()
-    param_cnt = 0
-    for n in new_model.graph.node:
-        for i in range(1, len(n.input)):
-            param_set |= {n.input[i]}
-            param_cnt += 1
-
-    assert len(param_set) == param_cnt, " There are still parameters reused"
diff --git a/tests/transformation/test_infer_shapes.py b/tests/transformation/test_infer_shapes.py
deleted file mode 100644
index a6ebe540b..000000000
--- a/tests/transformation/test_infer_shapes.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from pkgutil import get_data
-
-import numpy as np
-from onnx import TensorProto, helper
-
-import finn.util.basic as util
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.infer_shapes import InferShapes
-
-
-def test_infer_shapes():
-    # load the onnx model
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    graph = model.graph
-
-    # multi-thresholding node to be inserted between the first Relu and MaxPool node
-
-    # get Relu node to use data
-    Relu_node = graph.node[3]
-    assert Relu_node.op_type == "Relu", "The wrong model was chosen for the check"
-
-    # create thresholds tensor as constant
-    mt_thresh0 = helper.make_tensor_value_info("mt_thresh0", TensorProto.FLOAT, [8, 7])
-
-    # random numbers for the thresholds
-    # thresholds for one channel have to be sorted to guarantee the correct behavior
-    mt_thresh0_values = np.empty([8, 7], dtype=np.float32)
-    for i in range(len(mt_thresh0_values)):
-        mt_thresh0_values[i] = np.sort(np.random.random_sample(7) * 10)
-
-    model.set_initializer(mt_thresh0.name, mt_thresh0_values)
-
-    # add multi-thresholding node and change Relu node
-    mt_node = helper.make_node(
-        "MultiThreshold", ["mt_v0", "mt_thresh0"], [Relu_node.output[0]], domain="finn"
-    )
-    Relu_node.output[0] = "mt_v0"
-
-    # explicitly remove any present shape from ReLU and MultiThreshold outputs
-    util.remove_by_name(model.graph.value_info, Relu_node.output[0])
-    util.remove_by_name(model.graph.value_info, mt_node.output[0])
-    graph.node.insert(4, mt_node)
-
-    # first check routine
-    # check if at least one shape is not specified
-    assert not (
-        model.check_all_tensor_shapes_specified()
-    ), "All tensors are already specified before the shape inference execution"
-
-    # perform shape inference on mixed model
-    model = model.transform(InferShapes())
-
-    # second check routine
-    # now all shapes should be specified and mt_node output shape is (1,8,28,28)
-    assert (
-        model.check_all_tensor_shapes_specified()
-    ), "There are still tensors that are not specified"
-    assert (model.get_tensor_shape(mt_node.output[0])) == (
-        [1, 8, 28, 28]
-    ), "output of multi-thresholding node has wrong shape"
diff --git a/tests/transformation/test_merge_onnx_models.py b/tests/transformation/test_merge_onnx_models.py
deleted file mode 100644
index db7c990ba..000000000
--- a/tests/transformation/test_merge_onnx_models.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from pkgutil import get_data
-
-import numpy as np
-import onnx
-import onnx.numpy_helper as np_helper
-from onnx import TensorProto, helper
-
-from finn.core.modelwrapper import ModelWrapper
-from finn.core.datatype import DataType
-from finn.transformation.infer_shapes import InferShapes
-from finn.transformation.infer_datatypes import InferDataTypes
-from finn.transformation.infer_data_layouts import InferDataLayouts
-from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
-from finn.transformation.merge_onnx_models import MergeONNXModels
-import finn.core.onnx_exec as oxe
-
-
-def test_merge_onnx_models():
-    # load pre model
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model1 = ModelWrapper(raw_m)
-    # the input for model1 comes from a uint8 vector so we set the finn datatype
-    # of the input tensor to DataType.UINT8 to verify that the datatypes are correctly
-    # preserved in the transformed model
-    model1.set_tensor_datatype(model1.graph.input[0].name, DataType.UINT8)
-    model1 = model1.transform(InferShapes())
-    model1 = model1.transform(GiveUniqueNodeNames())
-    model1 = model1.transform(GiveReadableTensorNames())
-
-    # set up post model
-    shape = [1, 10]
-    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape)
-    a0 = helper.make_tensor_value_info("a0", TensorProto.FLOAT, [])
-    a1 = helper.make_tensor_value_info("a1", TensorProto.FLOAT, [])
-    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape)
-
-    mul_node = helper.make_node("Mul", ["inp", "a0"], ["mul_out"])
-    div_node = helper.make_node("Div", ["mul_out", "a1"], ["outp"])
-
-    graph = helper.make_graph(
-        nodes=[mul_node, div_node],
-        name="model2-graph",
-        inputs=[inp],
-        outputs=[outp],
-        value_info=[a0, a1],
-    )
-
-    model2 = helper.make_model(graph, producer_name="model2")
-    model2 = ModelWrapper(model2)
-    # initialize model2
-    a0_value = np.random.uniform(low=0, high=1, size=(1)).astype(np.float32)
-    model2.set_initializer("a0", a0_value)
-    a1_value = np.random.uniform(low=0.1, high=1, size=(1)).astype(np.float32)
-    model2.set_initializer("a1", a1_value)
-    # set a dummy sparsity annotation to check if it gets correctly transferred
-    # to the merged model
-    sparsity = {"dw": {"kernel_shape": 0}}
-    model2.set_tensor_sparsity("a1", sparsity)
-    model2 = model2.transform(InferShapes())
-    model2 = model2.transform(InferDataTypes())
-    model2 = model2.transform(InferDataLayouts())
-    model2 = model2.transform(GiveUniqueNodeNames())
-    model2 = model2.transform(GiveReadableTensorNames())
-
-    # simulate the models before the merging and pass the output of model1 to model2
-    # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
-    inp_values = onnx.load_tensor_from_string(raw_i)
-    inp_values = np_helper.to_array(inp_values)
-    idict = {model1.graph.input[0].name: inp_values}
-    odict = oxe.execute_onnx(model1, idict)
-    temp = odict[model1.graph.output[0].name]
-
-    idict = {model2.graph.input[0].name: temp}
-    odict = oxe.execute_onnx(model2, idict)
-    outp = odict[model2.graph.output[0].name]
-    # merge models
-    model_transformed = model2.transform(MergeONNXModels(model1))
-
-    idict = {model_transformed.graph.input[0].name: inp_values}
-    odict = oxe.execute_onnx(model_transformed, idict)
-    outp_transformed = odict[model_transformed.graph.output[0].name]
-
-    assert (outp == outp_transformed).all()
-    assert len(model_transformed.graph.node) == len(model1.graph.node) + len(
-        model2.graph.node
-    )
-    # to test if the value is preserved we set the sparsity annotation of input[1]
-    # of the division block to a dummy value, we can now look for the division block
-    # and check if the sparsity annotation is still the same
-    for n in model_transformed.graph.node:
-        if n.op_type == "Div":
-            tensor_name = n.input[1]
-            set_sparsity = model_transformed.get_tensor_sparsity(tensor_name)
-            assert sparsity == set_sparsity
-
-    # check if finn datatype of graph.input[0] is still set to UINT8
-    assert model_transformed.get_tensor_datatype("global_in") == DataType.UINT8
diff --git a/tests/transformation/test_renaming.py b/tests/transformation/test_renaming.py
deleted file mode 100644
index db8b8410e..000000000
--- a/tests/transformation/test_renaming.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from pkgutil import get_data
-
-import numpy as np
-import onnx
-import onnx.numpy_helper as np_helper
-
-import finn.core.onnx_exec as oxe
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
-from finn.transformation.infer_shapes import InferShapes
-
-
-def test_renaming():
-    # load the onnx model
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
-    model = ModelWrapper(raw_m)
-    model = model.transform(InferShapes())
-    model = model.transform(GiveUniqueNodeNames())
-    model = model.transform(GiveReadableTensorNames())
-    # do some basic checks
-    assert model.graph.input[0].name == "global_in"
-    assert model.graph.output[0].name == "global_out"
-    assert model.graph.node[1].op_type == "Conv"
-    assert model.graph.node[1].name == "Conv_0"
-    assert model.graph.node[1].input[1] == "Conv_0_param0"
-    assert model.graph.node[6].op_type == "Add"
-    assert model.graph.node[6].name == "Add_1"
-    assert model.graph.node[6].input[1] == "Add_1_param0"
-    # ensure running renaming twice still yields the same names
-    model = model.transform(GiveUniqueNodeNames())
-    model = model.transform(GiveReadableTensorNames())
-    assert model.graph.node[1].op_type == "Conv"
-    assert model.graph.node[1].name == "Conv_0"
-    assert model.graph.node[1].input[1] == "Conv_0_param0"
-    assert model.graph.node[6].op_type == "Add"
-    assert model.graph.node[6].name == "Add_1"
-    assert model.graph.node[6].input[1] == "Add_1_param0"
-    # run renamed model to make sure we did not mess up the topology
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
-    raw_o = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/output_0.pb")
-    input_tensor = onnx.load_tensor_from_string(raw_i)
-    output_tensor = onnx.load_tensor_from_string(raw_o)
-    input_dict = {"global_in": np_helper.to_array(input_tensor)}
-    output_dict = oxe.execute_onnx(model, input_dict)
-    assert np.isclose(
-        np_helper.to_array(output_tensor), output_dict["global_out"], atol=1e-3
-    ).all()
diff --git a/tests/transformation/test_sort_graph.py b/tests/transformation/test_sort_graph.py
deleted file mode 100644
index 05842504c..000000000
--- a/tests/transformation/test_sort_graph.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from onnx import TensorProto, helper
-import numpy as np
-
-from finn.core.modelwrapper import ModelWrapper
-from finn.transformation.general import SortGraph
-from finn.transformation.infer_shapes import InferShapes
-import pytest
-import finn.analysis.topology as ta
-
-
-def make_randomly_sorted_linear_model(num_of_nodes, seed=None):
-    if seed is not None:
-        np.random.seed(seed)
-
-    ch = 2
-    ifmdim = 16
-    input_shape = (1, ch, ifmdim, ifmdim)
-
-    top_in = helper.make_tensor_value_info("t0", TensorProto.FLOAT, input_shape)
-    top_out = helper.make_tensor_value_info(
-        "t" + str(num_of_nodes), TensorProto.FLOAT, input_shape
-    )
-
-    value_info = []
-    nodes = []
-    for i in range(num_of_nodes):
-        nodes += [
-            helper.make_node("Add", ["t" + str(i), "p" + str(i)], ["t" + str(i + 1)])
-        ]
-        value_info += [
-            helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape)
-        ]
-
-    nodes = np.random.permutation(nodes)
-
-    modelproto = helper.make_model(
-        helper.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=value_info,
-            nodes=nodes,
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-
-    for i in range(num_of_nodes):
-        model.set_initializer(
-            "p" + str(i), np.random.rand(*input_shape).astype(np.float32)
-        )
-
-    return model
-
-
-@pytest.mark.parametrize("num_of_nodes", [64])
-def test_sort_linear_graph(num_of_nodes):
-    model = make_randomly_sorted_linear_model(num_of_nodes, seed=0)
-    new_model = model.transform(SortGraph())
-
-    # Test
-    ret = new_model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"], "Nodes are not topologically sorted."
-
-
-def test_sort_nonlinear_graph():
-    ch = 2
-    ifmdim = 16
-    input_shape = (1, ch, ifmdim, ifmdim)
-
-    top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape)
-    top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape)
-
-    num_of_params = 8
-    value_info = []
-    for i in range(num_of_params):
-        value_info += [
-            helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape)
-        ]
-
-    modelproto = helper.make_model(
-        helper.make_graph(
-            name="test",
-            inputs=[top_in],
-            outputs=[top_out],
-            value_info=value_info,
-            nodes=[
-                # Not sorted nodes
-                helper.make_node("Mul", ["fork1", "p2"], ["t3"]),
-                helper.make_node("Add", ["t4", "p3"], ["t5"]),
-                helper.make_node("Add", ["t2", "t3"], ["t4"]),
-                helper.make_node("Add", ["t6", "t7"], ["t8"]),
-                helper.make_node("Add", ["fork3", "fork3"], ["top_out"]),
-                helper.make_node("Mul", ["t5", "p4"], ["fork2"]),
-                helper.make_node("Add", ["top_in", "p0"], ["fork1"]),
-                helper.make_node("Mul", ["fork1", "p1"], ["t2"]),
-                helper.make_node("Add", ["fork2", "p5"], ["t6"]),
-                helper.make_node("Add", ["fork2", "p6"], ["t7"]),
-                helper.make_node("Mul", ["t8", "p7"], ["fork3"]),
-            ],
-        )
-    )
-    model = ModelWrapper(modelproto)
-    model = model.transform(InferShapes())
-
-    np.random.seed(0)
-    for i in range(num_of_params):
-        model.set_initializer(
-            "p" + str(i), np.random.rand(*input_shape).astype(np.float32)
-        )
-
-    new_model = model.transform(SortGraph())
-
-    # Test
-    ret = new_model.analysis(ta.nodes_topologically_sorted)
-    assert ret["nodes_topologically_sorted"], "Nodes are not topologically sorted."
-
-
-if __name__ == "__main__":
-    import time
-
-    sizes = [10, 50, 100, 500, 1000]
-    times = []
-    reps = 10
-
-    print("SortGraph performance test:")
-    print("Test sizes", sizes)
-    print("Repetitions per size:", reps)
-    for sz in sizes:
-        acc_time = 0
-        print(" Testing size ", sz)
-        for i in range(reps):
-            # it should take the same time even with the sorted one
-            # but better new model each time as it is a more general approach
-            model = make_randomly_sorted_linear_model(sz)  # new model as seed is None
-            bef = time.time()
-            new_model = model.transform(SortGraph(), make_deepcopy=False)
-            acc_time += time.time() - bef
-
-        times += [acc_time / reps]
-
-    # print csv
-    print("\nnum_of_nodes,  seconds")
-    for sz, tm in zip(sizes, times):
-        print("{:12d}, {:6.4e}".format(sz, tm))
-
-    # plot
-    # import matplotlib.pyplot as plt
-    # plt.plot(sizes,times,"--o")
-    # plt.grid(True)
diff --git a/tests/util/test_create.py b/tests/util/test_create.py
deleted file mode 100644
index 4e2369785..000000000
--- a/tests/util/test_create.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import pytest
-import finn.util.create as create
-from finn.core.datatype import DataType
-
-
-@pytest.mark.parametrize("bitwidth", [DataType.BIPOLAR, DataType.INT2, DataType.INT4])
-def test_hls_random_mlp_maker(bitwidth):
-    w = bitwidth
-    a = bitwidth
-    layer_spec = [
-        {
-            "mw": 185,
-            "mh": 100,
-            "simd": 185,
-            "pe": 100,
-            "idt": DataType.BIPOLAR,
-            "wdt": w,
-            "act": a,
-        },
-        {"mw": 100, "mh": 100, "simd": 100, "pe": 100, "idt": a, "wdt": w, "act": a},
-        {"mw": 100, "mh": 100, "simd": 100, "pe": 100, "idt": a, "wdt": w, "act": a},
-        {"mw": 100, "mh": 100, "simd": 100, "pe": 100, "idt": a, "wdt": w, "act": a},
-        {
-            "mw": 100,
-            "mh": 1,
-            "simd": 100,
-            "pe": 1,
-            "idt": a,
-            "wdt": w,
-            "act": DataType.BIPOLAR,
-        },
-    ]
-
-    ret = create.hls_random_mlp_maker(layer_spec)
-    assert len(ret.graph.node) == 5
-    # ret.save("mlp-%s.onnx" % str(bitwidth))
diff --git a/tests/util/test_gen_finn_dt_tensor.py b/tests/util/test_gen_finn_dt_tensor.py
deleted file mode 100644
index f9944e7f5..000000000
--- a/tests/util/test_gen_finn_dt_tensor.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import finn.util.basic as util
-from finn.core.datatype import DataType
-
-
-def test_finn_tensor_generator():
-    # bipolar
-    shape_bp = [2, 2]
-    dt_bp = DataType.BIPOLAR
-    tensor_bp = util.gen_finn_dt_tensor(dt_bp, shape_bp)
-    # test shape
-    for i in range(len(shape_bp)):
-        assert (
-            shape_bp[i] == tensor_bp.shape[i]
-        ), """Shape of generated tensor
-            does not match the desired shape"""
-    # test if elements are FINN datatype
-    for value in tensor_bp.flatten():
-        assert dt_bp.allowed(
-            value
-        ), """Data type of generated tensor
-            does not match the desired Data type"""
-
-    # binary
-    shape_b = [4, 2, 3]
-    dt_b = DataType.BINARY
-    tensor_b = util.gen_finn_dt_tensor(dt_b, shape_b)
-    # test shape
-    for i in range(len(shape_b)):
-        assert (
-            shape_b[i] == tensor_b.shape[i]
-        ), """Shape of generated tensor
-            does not match the desired shape"""
-    # test if elements are FINN datatype
-    for value in tensor_b.flatten():
-        assert dt_b.allowed(
-            value
-        ), """Data type of generated tensor
-            does not match the desired Data type"""
-
-    # ternary
-    shape_t = [7, 1, 3, 1]
-    dt_t = DataType.TERNARY
-    tensor_t = util.gen_finn_dt_tensor(dt_t, shape_t)
-    # test shape
-    for i in range(len(shape_t)):
-        assert (
-            shape_t[i] == tensor_t.shape[i]
-        ), """Shape of generated tensor
-            does not match the desired shape"""
-    # test if elements are FINN datatype
-    for value in tensor_t.flatten():
-        assert dt_t.allowed(
-            value
-        ), """Data type of generated tensor
-            does not match the desired Data type"""
-
-    # int2
-    shape_int2 = [7, 4]
-    dt_int2 = DataType.INT2
-    tensor_int2 = util.gen_finn_dt_tensor(dt_int2, shape_int2)
-    # test shape
-    for i in range(len(shape_int2)):
-        assert (
-            shape_int2[i] == tensor_int2.shape[i]
-        ), """Shape of generated tensor
-            does not match the desired shape"""
-    # test if elements are FINN datatype
-    for value in tensor_int2.flatten():
-        assert value in [
-            -2,
-            -1,
-            0,
-            1,
-        ], """Data type of generated tensor
-            does not match the desired Data type"""
-
-    # import pdb; pdb.set_trace()
diff --git a/tests/util/test_padding.py b/tests/util/test_padding.py
deleted file mode 100644
index 4e49acf12..000000000
--- a/tests/util/test_padding.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-
-from finn.util.basic import pad_tensor_to_multiple_of
-
-
-def test_pad_tensor_to_multiple_of():
-    A = np.eye(3)
-    B = pad_tensor_to_multiple_of(A, [2, 2], val=-1)
-    assert B.shape == (4, 4)
-    assert (B[:3, :3] == A).all()
-    assert (B[3, :] == -1).all()
-    assert (B[:, 3] == -1).all()
-    B = pad_tensor_to_multiple_of(A, [5, 5], val=-1, distr_pad=True)
-    assert B.shape == (5, 5)
-    assert (B[1:4, 1:4] == A).all()
-    assert (B[0, :] == -1).all()
-    assert (B[:, 0] == -1).all()
-    assert (B[4, :] == -1).all()
-    assert (B[:, 4] == -1).all()
-    # using -1 in pad_to parameter should give an unpadded dimension
-    B = pad_tensor_to_multiple_of(A, [-1, 5], val=-1, distr_pad=True)
-    assert B.shape == (3, 5)
-    assert (B[:, 1:4] == A).all()
-    assert (B[:, 0] == -1).all()
-    assert (B[:, 4] == -1).all()
-    # if odd number of padding pixels required, 1 more should go after existing
-    B = pad_tensor_to_multiple_of(A, [6, 6], val=-1, distr_pad=True)
-    assert B.shape == (6, 6)
-    assert (B[1:4, 1:4] == A).all()
diff --git a/tests/util/test_rtlsim2npy.py b/tests/util/test_rtlsim2npy.py
deleted file mode 100644
index 87ea5c2c5..000000000
--- a/tests/util/test_rtlsim2npy.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-
-from finn.core.datatype import DataType
-from finn.util.data_packing import unpack_innermost_dim_from_hex_string
-
-
-def test_unpack_innermost_dim_from_hex_string():
-    # BINARY
-    A = np.asarray(["0x0e", "0x06"])
-    dtype = DataType.BINARY
-    shape = (1, 2, 4)
-    eA = [[1, 1, 1, 0], [0, 1, 1, 0]]
-    A_unpacked = unpack_innermost_dim_from_hex_string(A, dtype, shape, 8)
-    assert (A_unpacked == eA).all()
-
-    A = np.asarray(["0x0e", "0x06"])
-    eA_flipped = [[0, 1, 1, 1], [0, 1, 1, 0]]
-    A_unpacked_flipped = unpack_innermost_dim_from_hex_string(
-        A, dtype, shape, 8, reverse_inner=True
-    )
-    assert (A_unpacked_flipped == eA_flipped).all()
-
-    # UINT2
-    B = np.asarray([["0x0f", "0x0f"], ["0x07", "0x0d"]])
-    dtype = DataType.UINT2
-    shape = (1, 2, 2, 2)
-    eB = [[[3, 3], [3, 3]], [[1, 3], [3, 1]]]
-    B_unpacked = unpack_innermost_dim_from_hex_string(B, dtype, shape, 8)
-    assert (B_unpacked == eB).all()
-
-    B = np.asarray([["0x0f", "0x0f"], ["0x07", "0x0d"]])
-    eB_flipped = [[[3, 3], [3, 3]], [[3, 1], [1, 3]]]
-    B_unpacked_flipped = unpack_innermost_dim_from_hex_string(
-        B, dtype, shape, 8, reverse_inner=True
-    )
-    assert (B_unpacked_flipped == eB_flipped).all()
-
-    # INT2
-    C = np.asarray([["0x0f", "0x0f"], ["0x07", "0x0d"]])
-    dtype = DataType.INT2
-    shape = (1, 2, 2, 2)
-    eC = [[[-1, -1], [-1, -1]], [[1, -1], [-1, 1]]]
-    C_unpacked = unpack_innermost_dim_from_hex_string(C, dtype, shape, 8)
-    assert (C_unpacked == eC).all()
-
-    C = np.asarray([["0x0f", "0x0f"], ["0x07", "0x0d"]])
-    dtype = DataType.INT2
-    shape = (1, 2, 2, 2)
-    eC = [[[-1, -1], [-1, -1]], [[-1, 1], [1, -1]]]
-    C_unpacked = unpack_innermost_dim_from_hex_string(
-        C, dtype, shape, 8, reverse_inner=True
-    )
-    assert (C_unpacked == eC).all()
-
-    # INT4
-    D = np.asarray(["0x0e", "0x06"])
-    dtype = DataType.INT4
-    shape = (2, 1)
-    eD = [[-2], [6]]
-    D_unpacked = unpack_innermost_dim_from_hex_string(D, dtype, shape, 8)
-    assert (D_unpacked == eD).all()
-
-    D_unpacked = unpack_innermost_dim_from_hex_string(
-        D, dtype, shape, 8, reverse_inner=True
-    )
-    assert (D_unpacked == eD).all()
-
-    # INT32
-    E = np.asarray(["0xffffffff", "0xfffffffe", "0x02", "0xffffffef"])
-    dtype = DataType.INT32
-    shape = (1, 4, 1)
-    eE = [[[-1], [-2], [2], [-17]]]
-    E_unpacked = unpack_innermost_dim_from_hex_string(E, dtype, shape, 32)
-    assert (E_unpacked == eE).all()
-
-    E_unpacked = unpack_innermost_dim_from_hex_string(
-        E, dtype, shape, 32, reverse_inner=True
-    )
-    assert (E_unpacked == eE).all()
diff --git a/tests/util/test_shape_utils.py b/tests/util/test_shape_utils.py
deleted file mode 100644
index ab58f591f..000000000
--- a/tests/util/test_shape_utils.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2020, Xilinx
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-#   list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-#
-# * Neither the name of FINN nor the names of its
-#   contributors may be used to endorse or promote products derived from
-#   this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import numpy as np
-
-import finn.util.basic as util
-
-
-def test_interleave_matrix_outer_dim_from_partitions():
-    A = np.eye(10)
-    n_parts = 2
-    Ax = util.interleave_matrix_outer_dim_from_partitions(A, n_parts)
-    part_size = 10 // n_parts
-    assert Ax.shape == (n_parts, part_size, 10)
-    for r_ind in range(A.shape[0]):
-        assert (A[r_ind] == Ax[r_ind % n_parts][r_ind // n_parts]).all()
-- 
GitLab