diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py
index df03a036f9c9932a820a5798336ea893cddd433f..a3b6a8ff6a1395cebf1e37e9f408eb3833dfaaac 100644
--- a/src/finn/custom_op/fpgadataflow/__init__.py
+++ b/src/finn/custom_op/fpgadataflow/__init__.py
@@ -325,7 +325,7 @@ class HLSCustomOp(CustomOp):
         builder = CppBuilder()
         # to enable additional debug features please uncommand the next line
         # builder.append_includes("-DDEBUG")
-        builder.append_includes("-I/workspace/finn/src/finn/data/cpp")
+        builder.append_includes("-I/workspace/finn/src/finn/qnn-data/cpp")
         builder.append_includes("-I/workspace/cnpy/")
         builder.append_includes("-I/workspace/finn-hlslib")
         builder.append_includes("-I{}/include".format(os.environ["VIVADO_PATH"]))
diff --git a/src/finn/data/cifar10/cifar10-test-data-class3.npz b/src/finn/data/cifar10/cifar10-test-data-class3.npz
deleted file mode 100644
index 697d53830465fd26030b6e66444b42061b29392e..0000000000000000000000000000000000000000
Binary files a/src/finn/data/cifar10/cifar10-test-data-class3.npz and /dev/null differ
diff --git a/src/finn/data/cpp/npy2apintstream.hpp b/src/finn/data/cpp/npy2apintstream.hpp
deleted file mode 100644
index f3afbc5bfb16e2423184e334e78b96a8cdeef45c..0000000000000000000000000000000000000000
--- a/src/finn/data/cpp/npy2apintstream.hpp
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <iostream>
-#include "cnpy.h"
-#include "hls_stream.h"
-#include "ap_int.h"
-#include <vector>
-
-#ifdef DEBUG
-#define DEBUG_NPY2APINTSTREAM(x) std::cout << "[npy2apintstream] " << x << std::endl;
-#define DEBUG_APINTSTREAM2NPY(x) std::cout << "[apintstream2npy] " << x << std::endl;
-#else
-#define DEBUG_NPY2APINTSTREAM(x) ;
-#define DEBUG_APINTSTREAM2NPY(x) ;
-#endif
-
-template <typename PackedT, typename ElemT, int ElemBits, typename NpyT>
-void npy2apintstream(const char * npy_path, hls::stream<PackedT> & out_stream, bool reverse_inner = true, size_t numReps = 1) {
-  for(size_t rep = 0; rep < numReps; rep++) {
-    cnpy::NpyArray arr = cnpy::npy_load(npy_path);
-    DEBUG_NPY2APINTSTREAM("word_size " << arr.word_size << " num_vals " << arr.num_vals)
-    if(arr.word_size != sizeof(NpyT)) {
-      throw "Npy array word size and specified NpyT size do not match";
-    }
-    NpyT* loaded_data = arr.data<NpyT>();
-    size_t outer_dim_elems = 1;
-    for(size_t dim = 0; dim < arr.shape.size()-1; dim++) {
-      outer_dim_elems *= arr.shape[dim];
-    }
-    size_t inner_dim_elems = arr.shape[arr.shape.size()-1];
-    DEBUG_NPY2APINTSTREAM("n_outer " << outer_dim_elems << " n_inner " << inner_dim_elems)
-    for(size_t outer_elem = 0; outer_elem < outer_dim_elems; outer_elem++) {
-      PackedT packed_elem = 0;
-      for(size_t ii = 0; ii < inner_dim_elems; ii++) {
-        size_t i = reverse_inner ? inner_dim_elems-ii-1 : ii;
-        NpyT loaded_elem_npyt = *loaded_data;
-        ElemT loaded_elem = (ElemT) loaded_elem_npyt;
-        DEBUG_NPY2APINTSTREAM("NpyT " << loaded_elem_npyt << " elem " << loaded_elem)
-        packed_elem((i+1)*ElemBits-1, i*ElemBits) = loaded_elem;
-        loaded_data++;
-      }
-      DEBUG_NPY2APINTSTREAM("packed hls elem " << std::hex << packed_elem << std::dec)
-      out_stream << packed_elem;
-    }
-  }
-}
-
-template <typename PackedT, typename ElemT, int ElemBits, typename NpyT>
-void apintstream2npy(hls::stream<PackedT> & in_stream, const std::vector<size_t> & shape, const char * npy_path, bool reverse_inner = true, size_t numReps = 1) {
-  for(size_t rep = 0; rep < numReps; rep++) {
-    std::vector<NpyT> data_to_save;
-    size_t outer_dim_elems = 1;
-    for(size_t dim = 0; dim < shape.size()-1; dim++) {
-      outer_dim_elems *= shape[dim];
-    }
-    size_t inner_dim_elems = shape[shape.size()-1];
-    DEBUG_APINTSTREAM2NPY("n_outer " << outer_dim_elems << " n_inner " << inner_dim_elems)
-    for(size_t outer_elem = 0; outer_elem < outer_dim_elems; outer_elem++) {
-      PackedT packed_elem;
-      in_stream >> packed_elem;
-      DEBUG_APINTSTREAM2NPY("packed hls elem " << std::hex << packed_elem << std::dec)
-      for(size_t ii = 0; ii < inner_dim_elems; ii++) {
-        size_t i = reverse_inner ? inner_dim_elems-ii-1 : ii;
-        ElemT elem = packed_elem((i+1)*ElemBits-1, i*ElemBits);
-        NpyT npyt = (NpyT) elem;
-        DEBUG_APINTSTREAM2NPY("elem " << elem << " NpyT " << npyt)
-        data_to_save.push_back(npyt);
-      }
-    }
-    cnpy::npy_save(npy_path, &data_to_save[0], shape, "w");
-  }
-}
diff --git a/src/finn/data/onnx/finn-hls-model/finn-hls-onnx-model.onnx b/src/finn/data/onnx/finn-hls-model/finn-hls-onnx-model.onnx
deleted file mode 100644
index c2db9153f4a0269025da64f54b491ee6d511dbdd..0000000000000000000000000000000000000000
--- a/src/finn/data/onnx/finn-hls-model/finn-hls-onnx-model.onnx
+++ /dev/null
@@ -1,207 +0,0 @@
-finn-hls-onnx-model:º
-R
-inp	memInStrm	memInStrm"FIFO*
-backend"fpgadataflow *
-depth€ :finn
-Ò
-	memInStrm
-weights0
-thresh0out1"StreamingFCLayer_Batch*
-
-MH€ *
-
-MWÀ *	
-PE  *
-SIMD@ *
-backend"fpgadataflow *!
-resDataType"Recast<XnorMul> *
-resType"ap_resource_lut() :finn
-L
-out1inter0inter0"FIFO*
-backend"fpgadataflow *
-depth :finn
-Ï
-inter0
-weights1
-thresh1out2"StreamingFCLayer_Batch*
-
-MH€ *
-
-MW€ *	
-PE@ *
-SIMD  *
-backend"fpgadataflow *!
-resDataType"Recast<XnorMul> *
-resType"ap_resource_lut() :finn
-L
-out2inter1inter1"FIFO*
-backend"fpgadataflow *
-depth :finn
-Ï
-inter1
-weights2
-thresh2out3"StreamingFCLayer_Batch*
-
-MH€ *
-
-MW€ *	
-PE  *
-SIMD@ *
-backend"fpgadataflow *!
-resDataType"Recast<XnorMul> *
-resType"ap_resource_lut() :finn
-L
-out3inter2inter2"FIFO*
-backend"fpgadataflow *
-depth :finn
-Î
-inter2
-weights3
-thresh3out4"StreamingFCLayer_Batch*	
-MH@ *
-
-MW€ *	
-PE *
-SIMD *
-backend"fpgadataflow *!
-resDataType"Recast<XnorMul> *
-resType"ap_resource_lut() :finn
-O
-out4outp
-memOutStrm"FIFO*
-backend"fpgadataflow *
-depth€ :finnfinn_hls_onnx_graphZ
-inp
-
-
-
-@b
-outp
-
-
-
-@j
-	memInStrm
-
-
-
-@j
-weights0
-
-@
- 
- j%
-thresh0
-
- 
- 
-
-
-j
-out1
-
-
- 
- j
-inter0
-
-
- 
- j
-weights1
-
- 
-@
-€j%
-thresh1
-
-
-@
-
-
-j
-out2
-
-
-
-@j
-inter1
-
-
-
-@j
-weights2
-
-@
- 
-€j%
-thresh2
-
- 
- 
-
-
-j
-out3
-
-
- 
- j
-inter2
-
-
- 
- j
-weights3
-
-
-
-€j%
-thresh3
-
-
-
-
-
-j
-out4
-
-
-
-@r
-inp
-
finn_datatypeBIPOLARr 
-outp
-
finn_datatypeBIPOLARr%
-	memInStrm
-
finn_datatypeBIPOLARr$
-weights0
-
finn_datatypeBIPOLARr#
-thresh0
-
finn_datatypeBIPOLARr 
-out1
-
finn_datatypeBIPOLARr"
-inter0
-
finn_datatypeBIPOLARr$
-weights1
-
finn_datatypeBIPOLARr#
-thresh1
-
finn_datatypeBIPOLARr 
-out2
-
finn_datatypeBIPOLARr"
-inter1
-
finn_datatypeBIPOLARr$
-weights2
-
finn_datatypeBIPOLARr#
-thresh2
-
finn_datatypeBIPOLARr 
-out3
-
finn_datatypeBIPOLARr"
-inter2
-
finn_datatypeBIPOLARr$
-weights3
-
finn_datatypeBIPOLARr#
-thresh3
-
finn_datatypeBIPOLARr 
-out4
-
finn_datatypeBIPOLARB
\ No newline at end of file
diff --git a/src/finn/data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx b/src/finn/data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx
deleted file mode 100644
index aada6f07e9d3910122d2eb357d8a8c1224e9fbab..0000000000000000000000000000000000000000
Binary files a/src/finn/data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx and /dev/null differ
diff --git a/src/finn/util/test.py b/src/finn/util/test.py
index 32c6a0a3a3bb19b95590181dbe447e82cf9966a2..132b700a263247c8ca29d34b41747749d27c3ab7 100644
--- a/src/finn/util/test.py
+++ b/src/finn/util/test.py
@@ -139,11 +139,13 @@ def get_example_input(topology):
     "Get example numpy input tensor for given topology."
 
     if "fc" in topology:
-        raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+        raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
         onnx_tensor = onnx.load_tensor_from_string(raw_i)
         return nph.to_array(onnx_tensor)
     elif topology == "cnv":
-        fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+        fn = pk.resource_filename(
+            "finn.qnn-data", "cifar10/cifar10-test-data-class3.npz"
+        )
         input_tensor = np.load(fn)["arr_0"].astype(np.float32)
         return input_tensor
     else:
diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py
index 120c67646de08a1a9875b76bedd3a0130792b487..4b072535bdfe102a6c59ebd4c730de9ae827c00e 100644
--- a/tests/brevitas/test_brevitas_cnv.py
+++ b/tests/brevitas/test_brevitas_cnv.py
@@ -58,7 +58,7 @@ def test_brevitas_cnv_export_exec(wbits, abits):
     model = model.transform(RemoveStaticGraphInputs())
     assert len(model.graph.input) == 1
     assert len(model.graph.output) == 1
-    fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
     input_tensor = np.load(fn)["arr_0"].astype(np.float32)
     input_tensor = input_tensor / 255
     assert input_tensor.shape == (1, 3, 32, 32)
diff --git a/tests/brevitas/test_brevitas_debug.py b/tests/brevitas/test_brevitas_debug.py
index 50d0ca44cd0befe5d08b5c1b45edf602457bda19..2ebaa251919a5c62f8998f6e64101129afc59db2 100644
--- a/tests/brevitas/test_brevitas_debug.py
+++ b/tests/brevitas/test_brevitas_debug.py
@@ -55,7 +55,7 @@ def test_brevitas_debug():
     assert len(model.graph.input) == 1
     assert len(model.graph.output) == 1
     # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     # run using FINN-based execution
     input_dict = {"0": nph.to_array(input_tensor)}
diff --git a/tests/brevitas/test_brevitas_fc.py b/tests/brevitas/test_brevitas_fc.py
index 9369b25385080875efcb286c02291fc579a15a34..8397aaceb239b63ecfe54b86c680ae68e6d10ae1 100644
--- a/tests/brevitas/test_brevitas_fc.py
+++ b/tests/brevitas/test_brevitas_fc.py
@@ -68,7 +68,7 @@ def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits):
     assert len(model.graph.input) == 1
     assert len(model.graph.output) == 1
     # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     # run using FINN-based execution
     input_dict = {"0": nph.to_array(input_tensor)}
diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
index e8b50efef0723c1394c2bdd438a87e090071507d..20751a5877a879eeabf1ed6b67a7573208cf9367 100644
--- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
+++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
@@ -73,7 +73,7 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation):
     model = model.transform(InferDataLayouts())
     # model.save("golden.onnx")
     # load one of the test vectors
-    fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
     input_tensor = np.load(fn)["arr_0"].astype(np.float32)
     input_tensor = input_tensor / 255
     assert input_tensor.shape == (1, 3, 32, 32)
diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
index bd600c6c57d00d5fc03152f75b9f2f8c6beeeb2c..252ea05fb29eac09d51ca79181a1cf9562dc7928 100644
--- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
+++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
@@ -110,7 +110,7 @@ def test_convert_to_hls_layers_tfc_w1a1():
     model = model.transform(CompileCppSim())
     model = model.transform(SetExecMode("cppsim"))
 
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     # run using FINN-based execution
     input_dict = {"global_in": nph.to_array(input_tensor)}
@@ -175,7 +175,7 @@ def test_convert_to_hls_layers_tfc_w1a2():
     model = model.transform(PrepareCppSim())
     model = model.transform(CompileCppSim())
     model = model.transform(SetExecMode("cppsim"))
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     # run using FINN-based execution
     input_dict = {"global_in": nph.to_array(input_tensor)}
diff --git a/tests/fpgadataflow/test_create_dataflow_partition.py b/tests/fpgadataflow/test_create_dataflow_partition.py
index c4f748051ff038371353574298580f3bf9e05e9f..6732b92ae0865e390002bd3c65dfefe3890610e2 100644
--- a/tests/fpgadataflow/test_create_dataflow_partition.py
+++ b/tests/fpgadataflow/test_create_dataflow_partition.py
@@ -45,7 +45,7 @@ build_dir = make_build_dir("test_dataflow_partition_")
 def test_dataflow_partition_create():
     # load the onnx model
     raw_m = get_data(
-        "finn", "data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx"
+        "finn.qnn-data", "onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx"
     )
     model = ModelWrapper(raw_m)
     model = model.transform(CreateDataflowPartition())
diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py
index 82a38636e3927e17e5e2a3e8714f46082bba10e4..ca8cf3b1ceba6943828f47bcbcf974aa5b368c4e 100644
--- a/tests/transformation/streamline/test_streamline_cnv.py
+++ b/tests/transformation/streamline/test_streamline_cnv.py
@@ -67,7 +67,7 @@ def test_streamline_cnv(size, wbits, abits):
     model = model.transform(GiveReadableTensorNames())
     model = model.transform(RemoveStaticGraphInputs())
     # load one of the test vectors
-    fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
     input_tensor = np.load(fn)["arr_0"].astype(np.float32)
     input_tensor = input_tensor / 255
     assert input_tensor.shape == (1, 3, 32, 32)
diff --git a/tests/transformation/streamline/test_streamline_fc.py b/tests/transformation/streamline/test_streamline_fc.py
index 9ce98066cfbf9d1c64514b957d8a260705fd0d7c..008ea6f1b822e1c7a1978857b25e5a5794380e99 100644
--- a/tests/transformation/streamline/test_streamline_fc.py
+++ b/tests/transformation/streamline/test_streamline_fc.py
@@ -72,7 +72,7 @@ def test_streamline_fc(size, wbits, abits):
     model = model.transform(GiveReadableTensorNames())
     model = model.transform(RemoveStaticGraphInputs())
     # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     # run using FINN-based execution
     input_dict = {"global_in": nph.to_array(input_tensor)}
diff --git a/tests/transformation/test_batchnorm_to_affine.py b/tests/transformation/test_batchnorm_to_affine.py
index a3df5ae9bbd3f99bc29bc088a5f461122af06d81..6cb6e35eb323a7170f7a98200b60bbae8be8c9f6 100644
--- a/tests/transformation/test_batchnorm_to_affine.py
+++ b/tests/transformation/test_batchnorm_to_affine.py
@@ -51,7 +51,7 @@ def test_batchnorm_to_affine_cnv_w1a1():
     model = ModelWrapper(export_onnx_path)
     model = model.transform(InferShapes())
     model = model.transform(FoldConstants())
-    fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
     input_tensor = np.load(fn)["arr_0"].astype(np.float32)
     input_tensor = input_tensor / 255
     assert input_tensor.shape == (1, 3, 32, 32)
@@ -77,7 +77,7 @@ def test_batchnorm_to_affine_lfc_w1a1():
     model = model.transform(FoldConstants())
     new_model = model.transform(BatchNormToAffine())
     # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     input_dict = {"0": nph.to_array(input_tensor)}
     assert oxe.compare_execution(model, new_model, input_dict)
diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py
index b6ab634b374dea3ba309bbf12654c73c0a90e36c..9cfc45d0b99ca6cf3e2a4b68bdde98dfaf77c9ed 100644
--- a/tests/transformation/test_conv_lowering.py
+++ b/tests/transformation/test_conv_lowering.py
@@ -54,7 +54,7 @@ def test_conv_lowering_cnv_w1a1():
     model = ModelWrapper(export_onnx_path)
     model = model.transform(InferShapes())
     model = model.transform(FoldConstants())
-    fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
+    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
     input_tensor = np.load(fn)["arr_0"].astype(np.float32)
     input_tensor = input_tensor / 255
     assert input_tensor.shape == (1, 3, 32, 32)
diff --git a/tests/transformation/test_fold_constants.py b/tests/transformation/test_fold_constants.py
index a976ffd62bce744a474a6fac2a61a6478526777f..34d7d80169101b5151712871a70f591750540bfd 100644
--- a/tests/transformation/test_fold_constants.py
+++ b/tests/transformation/test_fold_constants.py
@@ -44,12 +44,12 @@ export_onnx_path = "test_fold_constants.onnx"
 
 
 def test_const_folding():
-    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
+    raw_m = get_data("finn.base-data", "onnx/mnist-conv/model.onnx")
     model = ModelWrapper(raw_m)
     model = model.transform(InferShapes())
     model = model.transform(FoldConstants())
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
-    raw_o = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/output_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_o = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/output_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     output_tensor = onnx.load_tensor_from_string(raw_o)
     input_dict = {"Input3": np_helper.to_array(input_tensor)}
diff --git a/tests/transformation/test_sign_to_thres.py b/tests/transformation/test_sign_to_thres.py
index a92f839e5f6ca8b45eadf939fa35973ac153e0b1..aecaf0cca59051b0eebac821c1f2c2d7d894ddc6 100644
--- a/tests/transformation/test_sign_to_thres.py
+++ b/tests/transformation/test_sign_to_thres.py
@@ -52,7 +52,7 @@ def test_sign_to_thres():
     new_model = model.transform(ConvertSignToThres())
     assert new_model.graph.node[3].op_type == "MultiThreshold"
     # load one of the test vectors
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     input_dict = {"0": nph.to_array(input_tensor)}
     assert oxe.compare_execution(model, new_model, input_dict)
diff --git a/tests/transformation/test_topk_insert.py b/tests/transformation/test_topk_insert.py
index a9faac4df0caf973d9aae6430e007eac349a7c43..bed9372dbe3b549bccf70e1f1815019c5d37979e 100644
--- a/tests/transformation/test_topk_insert.py
+++ b/tests/transformation/test_topk_insert.py
@@ -36,7 +36,7 @@ def test_topk_insert(k):
 
     # verification: generate random input, run through net, streamline,
     # run again, check that output is top-k
-    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
+    raw_i = get_data("finn.base-data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
     input_tensor = onnx.load_tensor_from_string(raw_i)
     input_brevitas = torch.from_numpy(nph.to_array(input_tensor)).float()
     output_golden = tfc.forward(input_brevitas).detach().numpy()
diff --git a/tests/util/test_data_packing.py b/tests/util/test_data_packing.py
index 7b77c4be20c1f41c11b53a9b65b79441c9bbbe47..f96ce276efecb1ffa6ca2a0f3916bc406022cdfe 100644
--- a/tests/util/test_data_packing.py
+++ b/tests/util/test_data_packing.py
@@ -95,7 +95,7 @@ def test_npy2apintstream(test_shape, dtype):
         f.write("\n".join(test_app_string))
     cmd_compile = """
 g++ -o test_npy2apintstream test.cpp /workspace/cnpy/cnpy.cpp \
--I/workspace/cnpy/ -I{}/include -I/workspace/finn/src/finn/data/cpp \
+-I/workspace/cnpy/ -I{}/include -I/workspace/finn/src/finn/qnn-data/cpp \
 --std=c++11 -lz""".format(
         os.environ["VIVADO_PATH"]
     )