Skip to content
Snippets Groups Projects
Commit 07b3b2db authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[Driver] flesh out and fill driver template

parent d3083c0e
No related merge requests found
......@@ -2,7 +2,8 @@ import os
import shutil
from finn.transformation import Transformation
from finn.util.basic import get_finn_root, make_build_dir
from finn.util.basic import gen_finn_dt_tensor, get_finn_root, make_build_dir
from finn.util.data_packing import finnpy_to_packed_bytearray
from . import templates
......@@ -30,13 +31,40 @@ class MakePYNQDriver(Transformation):
pynq_driver_dir = make_build_dir(prefix="pynq_driver_")
model.set_metadata_prop("pynq_driver_dir", pynq_driver_dir)
# generate the driver
# extract input-output shapes from the graph
# TODO convert this to an analysis pass
i_tensor_name = model.graph.input[0].name
o_tensor_name = model.graph.output[0].name
i_tensor_shape = model.get_tensor_shape(i_tensor_name)
o_tensor_shape = model.get_tensor_shape(o_tensor_name)
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
# generate dummy i/o tensors and their packed versions
i_tensor_dummy = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape)
o_tensor_dummy = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape)
i_tensor_dummy_packed = finnpy_to_packed_bytearray(i_tensor_dummy, i_tensor_dt)
o_tensor_dummy_packed = finnpy_to_packed_bytearray(o_tensor_dummy, o_tensor_dt)
i_tensor_shape_packed = i_tensor_dummy_packed.shape
o_tensor_shape_packed = o_tensor_dummy_packed.shape
# fill in the driver template
driver_py = pynq_driver_dir + "/driver.py"
driver = templates.pynq_driver_template
driver = driver.replace("$INPUT_FINN_DATATYPE$", str(i_tensor_dt))
driver = driver.replace("$INPUT_SHAPE_UNPACKED$", str(i_tensor_shape))
driver = driver.replace("$INPUT_SHAPE_PACKED$", str(i_tensor_shape_packed))
driver = driver.replace("$OUTPUT_FINN_DATATYPE$", str(o_tensor_dt))
driver = driver.replace("$OUTPUT_SHAPE_PACKED$", str(o_tensor_shape_packed))
driver = driver.replace("$OUTPUT_SHAPE_UNPACKED$", str(o_tensor_shape))
with open(driver_py, "w") as f:
f.write(templates.pynq_driver_template)
f.write(driver)
# copy all the dependencies into the driver folder
shutil.copytree(
get_finn_root() + "/src/finn/util", pynq_driver_dir + "/finn/util"
)
shutil.copytree(
get_finn_root() + "/src/finn/core", pynq_driver_dir + "/finn/core"
)
return (model, False)
......@@ -51,28 +51,51 @@ pynq_driver_template = """
from pynq import Overlay
import numpy as np
from pynq import allocate
from finn.util.data_packing import (
finnpy_to_packed_bytearray,
packed_bytearray_to_finnpy
)
from finn.core.datatype import DataType
bitfile_path = "/home/xilinx/finn/resizer.bit"
bitfile_path = "resizer.bit"
ol = Overlay(bitfile_path)
dma=ol.axi_dma_0
ibuf = np.load("input.npy")
idt = DataType.INT2
ishape_packed = (1,)
ibuf_packed = npy2packedbytes(ibuf, idt)
ibuf_packed_device = allocate(shape=ishape_packed, dtype=np.int8)
# declare input/output types and shapes for the accelerator
# input FINN DataType
idt = $INPUT_FINN_DATATYPE$
# unpacked and packed input shapes
ishape_unpacked = $INPUT_SHAPE_UNPACKED$
ishape_packed = $INPUT_SHAPE_PACKED$
# output FINN DataType
odt = $OUTPUT_FINN_DATATYPE$
# unpacked and packed output shapes
oshape_packed = $OUTPUT_SHAPE_PACKED$
oshape_unpacked = $OUTPUT_SHAPE_UNPACKED$
# load desired input .npy file
ibuf_unpacked = np.load("input.npy")
# ensure that shape is as expected
assert ibuf_unpacked.shape == ishape_unpacked
# pack the input buffer
ibuf_packed = finnpy_to_packed_bytearray(ibuf_unpacked, idt)
# allocate a PYNQ buffer for the packed input buffer
ibuf_packed_device = allocate(shape=ishape_packed, dtype=np.uint8)
# copy the packed data into the PYNQ buffer
# TODO optimization: pack directly into the PYNQ buffer?
np.copyto(ibuf_packed_device, ibuf_packed)
odt = DataType.INT32
oshape_packed = (16,)
obuf_packed = allocate(shape=oshape_packed, dtype=np.int8)
# allocate a PYNQ buffer for the returned packed output buffer
obuf_packed = allocate(shape=oshape_packed, dtype=np.uint8)
# set up the DMA and wait until all transfers complete
dma.sendchannel.transfer(ibuf_packed_device)
dma.recvchannel.transfer(obuf_packed)
dma.sendchannel.wait()
dma.recvchannel.wait()
obuf = packedbytes2npy(obuf_packed, odt)
np.save("output.npy", obuf)
# unpack the packed output buffer from accelerator
obuf_unpacked = packed_bytearray_to_finnpy(obuf_packed, odt, oshape_unpacked)
np.save("output.npy", obuf_unpacked)
"""
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment