diff --git a/src/finn/core/remote_exec.py b/src/finn/core/remote_exec.py
index ee07201315db23a9a1f8d0b7f1392d99517a8b63..2e139065ec0eff8cdbdb402f80113b039deed4da 100644
--- a/src/finn/core/remote_exec.py
+++ b/src/finn/core/remote_exec.py
@@ -44,7 +44,7 @@ def remote_exec(model, execution_context):
     pynq_target_dir = model.get_metadata_prop("pynq_target_dir")
     deployment_dir = model.get_metadata_prop("pynq_deploy_dir")
     platform = model.get_metadata_prop("platform")
-    assert platform in ["alveo", "zynq", "zynq-iodma"]
+    assert platform in ["alveo", "zynq-iodma"]
     bitfile = model.get_metadata_prop("bitfile")
     bitfile = os.path.basename(bitfile)
     if pynq_password == "":
diff --git a/src/finn/core/throughput_test.py b/src/finn/core/throughput_test.py
index 77c7b6c00179343048c52494fbffacd4c7447d7f..1306edfa23a9b25de41d0592796b4a03ad4e6508 100644
--- a/src/finn/core/throughput_test.py
+++ b/src/finn/core/throughput_test.py
@@ -49,7 +49,7 @@ def throughput_test_remote(model, batchsize=1000):
     # extracting last folder of absolute path (deployment_dir)
     deployment_folder = os.path.basename(os.path.normpath(deployment_dir))
     platform = model.get_metadata_prop("platform")
-    assert platform in ["alveo", "zynq", "zynq-iodma"]
+    assert platform in ["alveo", "zynq-iodma"]
     bitfile = model.get_metadata_prop("bitfile")
     bitfile = os.path.basename(bitfile)
     if pynq_password == "":
diff --git a/src/finn/transformation/fpgadataflow/make_pynq_driver.py b/src/finn/transformation/fpgadataflow/make_pynq_driver.py
index fc326b4a25a9784f3919b4246ec2b8f54fb881f4..0e50213ee6feee5f45c18f87cb31a5faf5fb1c50 100644
--- a/src/finn/transformation/fpgadataflow/make_pynq_driver.py
+++ b/src/finn/transformation/fpgadataflow/make_pynq_driver.py
@@ -28,7 +28,6 @@
 
 
 import shutil
-from finn.custom_op.registry import getCustomOp
 from finn.transformation import Transformation
 from finn.util.basic import gen_finn_dt_tensor, get_finn_root, make_build_dir
 from finn.util.data_packing import finnpy_to_packed_bytearray
@@ -41,7 +40,7 @@ class MakePYNQDriver(Transformation):
     accelerator, including data packing/unpacking. The MakePYNQProject
     transformation must have been already applied.
 
-    platform: one of ["zynq", "zynq-iodma", "alveo"]
+    platform: one of ["zynq-iodma", "alveo"]
 
     Outcome if successful: sets the pynq_driver_dir attribute in the ONNX
     ModelProto's metadata_props field, with the created driver dir as the
@@ -65,20 +64,15 @@ class MakePYNQDriver(Transformation):
         o_tensor_shape_normal = tuple(model.get_tensor_shape(o_tensor_name))
         i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
         o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
-        # handle folded i/o shapes due to differences in DMA engines
-        if self.platform == "zynq":
-            # extract HLSCustomOp instances to get folded i/o shapes
-            first_node = getCustomOp(model.find_consumer(i_tensor_name))
-            last_node = getCustomOp(model.find_producer(o_tensor_name))
-            i_tensor_shape_folded = tuple(first_node.get_folded_input_shape())
-            o_tensor_shape_folded = tuple(last_node.get_folded_output_shape())
-        else:
-            i_tensor_shape_folded = list(i_tensor_shape_normal)
-            i_tensor_shape_folded.insert(-1, 1)
-            i_tensor_shape_folded = tuple(i_tensor_shape_folded)
-            o_tensor_shape_folded = list(o_tensor_shape_normal)
-            o_tensor_shape_folded.insert(-1, 1)
-            o_tensor_shape_folded = tuple(o_tensor_shape_folded)
+        # folded shapes for i/o simply derived from regular tensor shapes
+        # this used to be extracted from first/last node folded shapes, but
+        # can't do this anymore due to IODMAs
+        i_tensor_shape_folded = list(i_tensor_shape_normal)
+        i_tensor_shape_folded.insert(-1, 1)
+        i_tensor_shape_folded = tuple(i_tensor_shape_folded)
+        o_tensor_shape_folded = list(o_tensor_shape_normal)
+        o_tensor_shape_folded.insert(-1, 1)
+        o_tensor_shape_folded = tuple(o_tensor_shape_folded)
 
         # generate dummy folded i/o tensors and their packed versions
         i_tensor_dummy_folded = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape_folded)
diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py
index d22cd031546bcfd836c859ad45c9c483a05a96af..66580c70d23a2d2b19bfe8d94fefcd39a3208bcb 100644
--- a/src/finn/transformation/fpgadataflow/templates.py
+++ b/src/finn/transformation/fpgadataflow/templates.py
@@ -126,18 +126,7 @@ class FINNAccelDriver():
         self.itersPerSample = self.oshape_packed[-2]
         # clock frequency as specified by user
         self.fclk_mhz = $CLOCK_FREQ_MHZ$
-        if self.platform == "zynq":
-            # set the clock frequency as specified by user during transformations
-            if self.fclk_mhz > 0:
-                Clocks.$CLK_NAME$ = self.fclk_mhz
-            self.dma = self.ol.axi_dma_0
-            self.ctrl_regs = self.ol.resize_accel_0
-            # AXI lite register offset for number of iterations
-            # used by TLastMarker to signal end of transmission for AXI CDMA
-            self.REG_OFFSET_NUM_ITERS = 0x10
-            # set up TLastMarker with correct num. samples
-            self.ctrl_regs.write(self.REG_OFFSET_NUM_ITERS, self.N*self.itersPerSample)
-        elif self.platform == "alveo":
+        if self.platform == "alveo":
             self.idma = self.ol.idma0
             self.odma = self.ol.odma0
         elif self.platform == "zynq-iodma":
@@ -147,7 +136,7 @@ class FINNAccelDriver():
             if self.fclk_mhz > 0:
                 Clocks.$CLK_NAME$ = self.fclk_mhz
         else:
-            raise ValueError("Supported platforms are zynq zynq-iodma alveo")
+            raise ValueError("Supported platforms are zynq-iodma alveo")
 
         # allocate a PYNQ buffer for the packed input and buffer
         if self.platform == "alveo":
@@ -203,13 +192,7 @@ class FINNAccelDriver():
         \"\"\"Executes accelerator by setting up the DMA(s) and
         waiting until all transfers/calls complete. Uses only member variables and
         returns nothing.\"\"\"
-        if self.platform == "zynq":
-            dma = self.dma
-            dma.sendchannel.transfer(self.ibuf_packed_device)
-            dma.recvchannel.transfer(self.obuf_packed_device)
-            dma.sendchannel.wait()
-            dma.recvchannel.wait()
-        elif self.platform == "zynq-iodma":
+        if self.platform == "zynq-iodma":
             # manually launch IODMAs since signatures are missing
             self.idma.write(0x10, self.ibuf_packed_device.device_address)
             self.idma.write(0x1c, self.N)
@@ -231,7 +214,7 @@ class FINNAccelDriver():
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='Set exec mode, batchsize N, bitfile name, inputfile name and outputfile name')
     parser.add_argument('--exec_mode', help='Please select functional verification ("execute") or throughput test ("throughput_test")', default="execute")
-    parser.add_argument('--platform', help='Target platform: zynq zynq-iodma alveo', default="zynq")
+    parser.add_argument('--platform', help='Target platform: zynq-iodma alveo', default="zynq")
     parser.add_argument('--batchsize', help='number of samples for inference', type=int, default=1)
     parser.add_argument('--bitfile', help='name of bitfile (i.e. "resizer.bit")', default="resizer.bit")
     parser.add_argument('--inputfile', help='name of input npy file (i.e. "input.npy")', default="input.npy")