diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh
index 72751817383dbdb441970e5816247cfa7760ef5b..84b3b6ea2983732890021517d9dcd2e0cf22fb4b 100644
--- a/docker/finn_entrypoint.sh
+++ b/docker/finn_entrypoint.sh
@@ -16,10 +16,9 @@ BREVITAS_COMMIT=f9a27226d4acf1661dd38bc449f71f89e0983cce
 CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4
 HLSLIB_COMMIT=cfafe11a93b79ab1af7529d68f08886913a6466e
 PYVERILATOR_COMMIT=c97a5ba41bbc7c419d6f25c74cdf3bdc3393174f
-PYNQSHELL_COMMIT=0c82a61b0ec1a07fa275a14146233824ded7a13d
+PYNQSHELL_COMMIT=bf281fc3a44eca29efbcbefd63f1196d82c7c255
 OMX_COMMIT=1bae737669901e762f581af73348332b5c4b2ada
 
-
 gecho "Setting up known-good commit versions for FINN dependencies"
 # Brevitas
 gecho "brevitas @ $BREVITAS_COMMIT"
diff --git a/docker/quicktest.sh b/docker/quicktest.sh
index 75d07d15338fd422bc6749b0a61b392616c61c5a..02e014cd3cc7bb88eebd02f03ff599913079152b 100755
--- a/docker/quicktest.sh
+++ b/docker/quicktest.sh
@@ -9,7 +9,7 @@ if [ -z $1 ]; then
   python setup.py test --addopts "-m 'not (vivado or slow or vitis)' --dist=loadfile -n $PYTEST_PARALLEL"
 elif [ $1 = "main" ]; then
   echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist"
-  python setup.py test --addopts "-k not (rtlsim or end2end) --dist=loadfile -n $PYTEST_PARALLEL"
+  python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL"
 elif [ $1 = "rtlsim" ]; then
   echo "Running rtlsim test suite with pytest-parallel"
   python setup.py test --addopts "-k rtlsim --workers $PYTEST_PARALLEL"
diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst
index 323692897800d45c6e6cf55b688a2c7b2b9a5277..8a20dad0e47b9458989039184cfa0e5d01d48aa2 100644
--- a/docs/finn/getting_started.rst
+++ b/docs/finn/getting_started.rst
@@ -92,7 +92,14 @@ These are summarized below:
 * `JUPYTER_PORT` (default 8888) changes the port for Jupyter inside Docker
 * `NETRON_PORT` (default 8081) changes the port for Netron inside Docker
 * `NUM_DEFAULT_WORKERS` (default 1) specifies the degree of parallelization for the transformations that can be run in parallel
-* `PYNQ_BOARD` specifies the type of PYNQ board used (Pynq-Z1, Pynq-Z2, Ultra96, ZCU104) for the test suite
+* `PYNQ_BOARD` specifies the type of PYNQ board used (see "supported hardware" below) for the test suite
 * `PYNQ_IP` and `PYNQ_PORT` specify ip address and port number to access the PYNQ board
 * `PYNQ_USERNAME` and `PYNQ_PASSWORD` specify the PYNQ board access credentials for the test suite
 * `PYNQ_TARGET_DIR` specifies the target dir on the PYNQ board for the test suite
+
+Supported Hardware
+===================
+**End-to-end support including driver:** For quick deployment, FINN targets boards supported by  `PYNQ <https://pynq.io/>`_ . For these platforms, we can build a full bitfile including DMAs to move data into and out of the FINN-generated accelerator, as well as a Python driver to launch the accelerator. We support the Pynq-Z1, Pynq-Z2, Ultra96, ZCU102 and ZCU104 boards.
+
+**Vivado IPI support for any Xilinx FPGA:** FINN generates a Vivado IP Integrator (IPI) design from the neural network with AXI stream (FIFO) in-out interfaces, which can be integrated onto any Xilinx FPGA as part of a larger system. It's up to you to take the FINN-generated accelerator (what we call "stitched IP" in the tutorials) and wire it up to your FPGA design.
+
diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
index 7b929edc4e672199a2eb6d7c8f427365af0dd9f5..e6dca0e4b05f943c971bc0f97af03f5038fd0dab 100644
--- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
+++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
@@ -29,6 +29,7 @@
 
 from onnx import helper, TensorProto
 import numpy as np
+import warnings
 
 from finn.core.datatype import DataType
 from finn.transformation import Transformation
diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py
index 91ff811069369383099f5ae5aebf3228fbdbaae5..6c92e9b2765b1c2be6f95ee148964bccfb3cd7be 100644
--- a/src/finn/util/basic.py
+++ b/src/finn/util/basic.py
@@ -42,6 +42,7 @@ pynq_part_map = dict()
 pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
 pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
 pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1"
+pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e"
 pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e"
 
 # native AXI HP port width (in bits) for PYNQ boards
@@ -49,6 +50,7 @@ pynq_native_port_width = dict()
 pynq_native_port_width["Pynq-Z1"] = 64
 pynq_native_port_width["Pynq-Z2"] = 64
 pynq_native_port_width["Ultra96"] = 128
+pynq_native_port_width["ZCU102"] = 128
 pynq_native_port_width["ZCU104"] = 128
 
 # Alveo device and platform mappings
diff --git a/tests/transformation/test_absorb_transp_into_flatten.py b/tests/transformation/test_absorb_transp_into_flatten.py
index fbfa15277717c554da01e38608601997407803b2..cbbb33b4606acf55ace662da0986105f8c456b39 100644
--- a/tests/transformation/test_absorb_transp_into_flatten.py
+++ b/tests/transformation/test_absorb_transp_into_flatten.py
@@ -57,9 +57,9 @@ def test_absorb_transp_into_flatten(perm, shape, ishape, data_layout):
     model = model.transform(InferDataLayouts())
     model = model.transform(GiveUniqueNodeNames())
     model = model.transform(GiveReadableTensorNames())
-    model.save("test.onnx")
+    # model.save("test.onnx")
     model_transformed = model.transform(AbsorbTransposeIntoFlatten())
-    model_transformed.save("test2.onnx")
+    # model_transformed.save("test2.onnx")
 
     # verify transformation
     inp_values = np.random.uniform(low=-1, high=1, size=tuple(ishape)).astype(
diff --git a/tests/transformation/test_topk_insert.py b/tests/transformation/test_topk_insert.py
index b85ed4aa6999faf751e535c1cc687d639c4eb74f..a18e63384150f140cb63ec7b438283eb4797266c 100644
--- a/tests/transformation/test_topk_insert.py
+++ b/tests/transformation/test_topk_insert.py
@@ -1,4 +1,4 @@
-# import os
+import os
 import onnx
 from finn.util.test import get_test_model_trained
 import brevitas.onnx as bo
@@ -57,4 +57,4 @@ def test_topk_insert(k):
     output_pysim_topk = output_pysim_topk.astype(np.int).flatten()
 
     assert np.array_equal(output_golden_topk, output_pysim_topk)
-    # os.remove(export_onnx_path)
+    os.remove(export_onnx_path)
diff --git a/tests/util/test_create.py b/tests/util/test_create.py
index 7173add35abf04a35c33b0ef10b42ffdb296a653..4e236978592b02e1c18b03aba56ff8b2369311a6 100644
--- a/tests/util/test_create.py
+++ b/tests/util/test_create.py
@@ -61,4 +61,4 @@ def test_hls_random_mlp_maker(bitwidth):
 
     ret = create.hls_random_mlp_maker(layer_spec)
     assert len(ret.graph.node) == 5
-    ret.save("mlp-%s.onnx" % str(bitwidth))
+    # ret.save("mlp-%s.onnx" % str(bitwidth))