Skip to content
Snippets Groups Projects
Commit ddf9b50f authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[PYNQ] fix i/o shape extraction for project creation and driver

parent 6bcf9a12
No related branches found
No related tags found
No related merge requests found
import os
import shutil
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import gen_finn_dt_tensor, get_finn_root, make_build_dir
from finn.util.data_packing import finnpy_to_packed_bytearray
......@@ -18,9 +19,8 @@ class MakePYNQDriver(Transformation):
value.
"""
def __init__(self, platform):
def __init__(self):
super().__init__()
self.platform = platform
def apply(self, model):
vivado_pynq_proj = model.get_metadata_prop("vivado_pynq_proj")
......@@ -35,15 +35,24 @@ class MakePYNQDriver(Transformation):
# TODO convert this to an analysis pass
i_tensor_name = model.graph.input[0].name
o_tensor_name = model.graph.output[0].name
i_tensor_shape = tuple(model.get_tensor_shape(i_tensor_name))
o_tensor_shape = tuple(model.get_tensor_shape(o_tensor_name))
i_tensor_shape_normal = tuple(model.get_tensor_shape(i_tensor_name))
o_tensor_shape_normal = tuple(model.get_tensor_shape(o_tensor_name))
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
# generate dummy i/o tensors and their packed versions
i_tensor_dummy = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape)
o_tensor_dummy = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape)
i_tensor_dummy_packed = finnpy_to_packed_bytearray(i_tensor_dummy, i_tensor_dt)
o_tensor_dummy_packed = finnpy_to_packed_bytearray(o_tensor_dummy, o_tensor_dt)
# extract HLSCustomOp instances to get folded i/o shapes
first_node = getCustomOp(model.graph.node[0])
last_node = getCustomOp(model.graph.node[-1])
i_tensor_shape_folded = first_node.get_folded_input_shape()
o_tensor_shape_folded = last_node.get_folded_output_shape()
# generate dummy folded i/o tensors and their packed versions
i_tensor_dummy_folded = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape_folded)
o_tensor_dummy_folded = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape_folded)
i_tensor_dummy_packed = finnpy_to_packed_bytearray(
i_tensor_dummy_folded, i_tensor_dt
)
o_tensor_dummy_packed = finnpy_to_packed_bytearray(
o_tensor_dummy_folded, o_tensor_dt
)
i_tensor_shape_packed = i_tensor_dummy_packed.shape
o_tensor_shape_packed = o_tensor_dummy_packed.shape
......@@ -51,11 +60,13 @@ class MakePYNQDriver(Transformation):
driver_py = pynq_driver_dir + "/driver.py"
driver = templates.pynq_driver_template
driver = driver.replace("$INPUT_FINN_DATATYPE$", str(i_tensor_dt))
driver = driver.replace("$INPUT_SHAPE_UNPACKED$", str(i_tensor_shape))
driver = driver.replace("$INPUT_SHAPE_NORMAL$", str(i_tensor_shape_normal))
driver = driver.replace("$INPUT_SHAPE_FOLDED$", str(i_tensor_shape_folded))
driver = driver.replace("$INPUT_SHAPE_PACKED$", str(i_tensor_shape_packed))
driver = driver.replace("$OUTPUT_FINN_DATATYPE$", str(o_tensor_dt))
driver = driver.replace("$OUTPUT_SHAPE_NORMAL$", str(o_tensor_shape_normal))
driver = driver.replace("$OUTPUT_SHAPE_FOLDED$", str(o_tensor_shape_folded))
driver = driver.replace("$OUTPUT_SHAPE_PACKED$", str(o_tensor_shape_packed))
driver = driver.replace("$OUTPUT_SHAPE_UNPACKED$", str(o_tensor_shape))
with open(driver_py, "w") as f:
f.write(driver)
......
import os
import subprocess
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import get_by_name, make_build_dir, roundup_to_integer_multiple
......@@ -50,16 +51,11 @@ class MakePYNQProject(Transformation):
ip_dirs += [ipstitch_path + "/ip"]
ip_dirs_str = "[%s]" % (" ".join(ip_dirs))
# extract the actual in-out bytes from graph
# TODO convert this to an analysis pass
i_tensor_name = model.graph.input[0].name
o_tensor_name = model.graph.output[0].name
i_tensor_shape = model.get_tensor_shape(i_tensor_name)
o_tensor_shape = model.get_tensor_shape(o_tensor_name)
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
i_bits_per_cycle = i_tensor_dt.bitwidth() * i_tensor_shape[-1]
o_bits_per_cycle = o_tensor_dt.bitwidth() * o_tensor_shape[-1]
# extract HLSCustomOp instances to get i/o stream widths
first_node = getCustomOp(model.graph.node[0])
last_node = getCustomOp(model.graph.node[-1])
i_bits_per_cycle = first_node.get_instream_width()
o_bits_per_cycle = last_node.get_outstream_width()
# ensure i/o is padded to bytes
i_bits_per_cycle_padded = roundup_to_integer_multiple(i_bits_per_cycle, 8)
o_bits_per_cycle_padded = roundup_to_integer_multiple(o_bits_per_cycle, 8)
......
......@@ -64,22 +64,26 @@ dma=ol.axi_dma_0
# declare input/output types and shapes for the accelerator
# input FINN DataType
idt = $INPUT_FINN_DATATYPE$
# unpacked and packed input shapes
ishape_unpacked = $INPUT_SHAPE_UNPACKED$
# normal, folded and packed input shapes
ishape_normal = $INPUT_SHAPE_NORMAL$
ishape_folded = $INPUT_SHAPE_FOLDED$
ishape_packed = $INPUT_SHAPE_PACKED$
# output FINN DataType
odt = $OUTPUT_FINN_DATATYPE$
# unpacked and packed output shapes
# normal, folded and packed output shapes
oshape_normal = $OUTPUT_SHAPE_NORMAL$
oshape_folded = $OUTPUT_SHAPE_FOLDED$
oshape_packed = $OUTPUT_SHAPE_PACKED$
oshape_unpacked = $OUTPUT_SHAPE_UNPACKED$
# load desired input .npy file
ibuf_unpacked = np.load("input.npy")
ibuf_normal = np.load("input.npy")
# ensure that shape is as expected
assert ibuf_unpacked.shape == ishape_unpacked
assert ibuf_normal.shape == ishape_normal
# convert to folded form
ibuf_folded = ibuf_normal.reshape(ishape_folded)
# pack the input buffer
ibuf_packed = finnpy_to_packed_bytearray(ibuf_unpacked, idt)
ibuf_packed = finnpy_to_packed_bytearray(ibuf_folded, idt)
# allocate a PYNQ buffer for the packed input buffer
ibuf_packed_device = allocate(shape=ishape_packed, dtype=np.uint8)
# copy the packed data into the PYNQ buffer
......@@ -96,6 +100,8 @@ dma.sendchannel.wait()
dma.recvchannel.wait()
# unpack the packed output buffer from accelerator
obuf_unpacked = packed_bytearray_to_finnpy(obuf_packed, odt, oshape_unpacked)
obuf_folded = packed_bytearray_to_finnpy(obuf_packed, odt, oshape_folded)
# convert to normal reshape and save
obuf_normal = obuf_folded.reshape(oshape_normal)
np.save("output.npy", obuf_unpacked)
"""
......@@ -259,7 +259,7 @@ def test_fpgadataflow_ipstitch_pynq_synth():
@pytest.mark.dependency(depends=["test_fpgadataflow_ipstitch_pynq_projgen"])
def test_fpgadataflow_ipstitch_pynq_driver():
model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx")
model = model.transform(MakePYNQDriver(test_pynq_board))
model = model.transform(MakePYNQDriver())
driver_dir = model.get_metadata_prop("pynq_driver_dir")
assert driver_dir is not None
assert os.path.isdir(driver_dir)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment