Skip to content
Snippets Groups Projects
Commit 3086e249 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

Merge branch 'feature/notebook_end_to_end_flow' into dev

parents d5785d44 c2435a76
No related branches found
No related tags found
No related merge requests found
Showing
with 683 additions and 196 deletions
......@@ -25,6 +25,7 @@ ENV PYTHONPATH "${PYTHONPATH}:/workspace/brevitas_cnv_lfc/training_scripts"
ENV PYTHONPATH "${PYTHONPATH}:/workspace/brevitas"
ENV PYTHONPATH "${PYTHONPATH}:/workspace/pyverilator"
ENV PYNQSHELL_PATH "/workspace/PYNQ-HelloWorld/boards"
ENV PYNQ_BOARD "Pynq-Z1"
ARG GID
ARG GNAME
......
This diff is collapsed.
notebooks/pynq_shell_project.png

90.5 KiB

notebooks/stitched_ip.png

43.3 KiB

......@@ -14,7 +14,9 @@ DOCKER_PASSWD="finn"
# containers from the same user
DOCKER_RND=$(shuf -i0-32768 -n1)
DOCKER_TAG="finn_${DOCKER_UNAME}"
DOCKER_INST_NAME="finn_${DOCKER_UNAME}_${DOCKER_RND}"
# uncomment to run multiple instances with different names
# DOCKER_INST_NAME="finn_${DOCKER_UNAME}_${DOCKER_RND}"
DOCKER_INST_NAME="finn_${DOCKER_UNAME}"
: ${JUPYTER_PORT=8888}
: ${NETRON_PORT=8081}
......@@ -38,6 +40,7 @@ PYVERILATOR_LOCAL=$SCRIPTPATH/pyverilator
PYNQSHELL_LOCAL=$SCRIPTPATH/PYNQ-HelloWorld
BUILD_LOCAL=/tmp/$DOCKER_INST_NAME
VIVADO_HLS_LOCAL=$VIVADO_PATH
: ${VIVADO_IP_CACHE=$BUILD_LOCAL/vivado_ip_cache}
# clone dependency repos
git clone --branch feature/finn_onnx_export $BREVITAS_REPO $BREVITAS_LOCAL || git -C "$BREVITAS_LOCAL" pull
......@@ -49,6 +52,7 @@ git clone $PYNQSHELL_REPO $PYNQSHELL_LOCAL || git -C "$PYNQSHELL_LOCAL" pull
# ensure build dir exists locally
mkdir -p $BUILD_LOCAL
mkdir -p $VIVADO_IP_CACHE
echo "Instance is named as $DOCKER_INST_NAME"
echo "Mounting $SCRIPTPATH into /workspace/finn"
......@@ -62,6 +66,7 @@ echo "Mounting $BUILD_LOCAL into $BUILD_LOCAL"
echo "Mounting $VIVADO_PATH into $VIVADO_PATH"
echo "Port-forwarding for Jupyter $JUPYTER_PORT:$JUPYTER_PORT"
echo "Port-forwarding for Netron $NETRON_PORT:$NETRON_PORT"
echo "Vivado IP cache dir is at $VIVADO_IP_CACHE"
if [ "$1" = "test" ]; then
echo "Running test suite"
......@@ -101,6 +106,7 @@ docker run -t --rm --name $DOCKER_INST_NAME -it \
-e VIVADO_PATH=$VIVADO_PATH \
-e FINN_INST_NAME=$DOCKER_INST_NAME \
-e FINN_ROOT="/workspace/finn" \
-e VIVADO_IP_CACHE="$VIVADO_IP_CACHE" \
-p $JUPYTER_PORT:$JUPYTER_PORT \
-p $NETRON_PORT:$NETRON_PORT \
$DOCKER_TAG bash -c "$DOCKER_CMD"
......@@ -299,3 +299,15 @@ compilation transformations?
@abstractmethod
def pragmas(self):
pass
def get_folded_input_shape(self):
raise Exception("get_folded_input_shape not implemented for this op")
def get_folded_output_shape(self):
raise Exception("get_folded_output_shape not implemented for this op")
def get_instream_width(self):
raise Exception("get_instream_width not implemented for this op")
def get_outstream_width(self):
raise Exception("get_outstream_width not implemented for this op")
......@@ -208,10 +208,21 @@ class StreamingFCLayer_Batch(HLSCustomOp):
o_bits = self.get_output_datatype().bitwidth()
return o_bits * self.get_nodeattr("PE")
def get_number_output_values(self):
def get_folded_input_shape(self):
mw = self.get_nodeattr("MW")
simd = self.get_nodeattr("SIMD")
sf = mw // simd
return (1, sf, simd)
def get_folded_output_shape(self):
mh = self.get_nodeattr("MH")
pe = self.get_nodeattr("PE")
return mh // pe
nf = mh // pe
return (1, nf, pe)
def get_number_output_values(self):
nf = self.get_folded_output_shape()[1]
return nf
def get_template_param_values(self):
ret = dict()
......
......@@ -8,8 +8,10 @@ class TLastMarker(HLSCustomOp):
def get_nodeattr_types(self):
my_attrs = {
"NumIters": ("i", True, 0),
# width of input-output data streams
# width of input-output data streams, in bits
"StreamWidth": ("i", True, 0),
# width of individual element in stream, in bits
"ElemWidth": ("i", True, 0),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
......@@ -87,3 +89,21 @@ class TLastMarker(HLSCustomOp):
def get_number_output_values(self):
return self.get_nodeattr("NumIters")
def get_folded_input_shape(self):
stream_width = self.get_nodeattr("StreamWidth")
elem_width = self.get_nodeattr("ElemWidth")
n_packed_elems = stream_width // elem_width
n_iters = self.get_nodeattr("NumIters")
return (1, n_iters, n_packed_elems)
def get_folded_output_shape(self):
return self.get_folded_input_shape()
def get_instream_width(self):
stream_width = self.get_nodeattr("StreamWidth")
return stream_width
def get_outstream_width(self):
stream_width = self.get_nodeattr("StreamWidth")
return stream_width
from onnx import TensorProto
from onnx import helper as oh
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
class InsertTLastMarker(Transformation):
"""Ensure that the graph is terminated with a TLastMarker node, inserting
one if necessary."""
def __init__(self):
super().__init__()
def apply(self, model):
# TODO only makes sense for a pure fpgadataflow graph -- check!
graph_out_name = model.graph.output[0].name
final_node = model.find_producer(graph_out_name)
if final_node.op_type == "TLastMarker":
# TODO maybe check the correctness of properties
return (model, False)
else:
custom_op = getCustomOp(final_node)
num_iters = int(custom_op.get_number_output_values())
stream_width = int(custom_op.get_outstream_width())
out_shape = model.get_tensor_shape(graph_out_name)
out_dtype = model.get_tensor_datatype(graph_out_name)
elem_width = out_dtype.bitwidth()
# make new buffer
final_node_out = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape
)
model.graph.value_info.append(final_node_out)
model.set_tensor_datatype(final_node_out.name, out_dtype)
# reroute final node output to final_node_out_name
final_node.output[0] = final_node_out.name
tlast_node = oh.make_node(
"TLastMarker",
[final_node_out.name],
[graph_out_name],
NumIters=num_iters,
StreamWidth=stream_width,
ElemWidth=elem_width,
domain="finn",
backend="fpgadataflow",
)
model.graph.node.append(tlast_node)
return (model, True)
import os
from distutils.dir_util import copy_tree
from shutil import copy
from finn.transformation import Transformation
from finn.util.basic import make_build_dir
class DeployToPYNQ(Transformation):
"""Collects all necessary files for deployment and copies them to the PYNQ board.
Expects information about PYNQ board to make scp possible:
* ip address of board
* username and password for board
* target directory where the files are stored on the board"""
def __init__(self, ip, username, password, target_dir):
super().__init__()
self.ip = ip
self.username = username
self.password = password
self.target_dir = target_dir
def apply(self, model):
# set metadata properties accordingly to user input specifications
model.set_metadata_prop("pynq_ip", self.ip)
model.set_metadata_prop("pynq_username", self.username)
model.set_metadata_prop("pynq_password", self.password)
model.set_metadata_prop("pynq_target_dir", self.target_dir)
# create directory for deployment files
deployment_dir = make_build_dir(prefix="pynq_deployment_")
model.set_metadata_prop("pynq_deployment_dir", deployment_dir)
# get and copy necessary files
# .bit and .hwh file
vivado_pynq_proj = model.get_metadata_prop("vivado_pynq_proj")
for file in os.listdir(vivado_pynq_proj):
if file.endswith(".bit"):
bitfile = os.path.join(vivado_pynq_proj, file)
elif file.endswith(".hwh"):
hwhfile = os.path.join(vivado_pynq_proj, file)
copy(bitfile, deployment_dir)
copy(hwhfile, deployment_dir)
# driver.py and python libraries
pynq_driver_dir = model.get_metadata_prop("pynq_driver_dir")
copy_tree(pynq_driver_dir, deployment_dir)
model.set_metadata_prop("pynq_deploy_dir", deployment_dir)
return (model, False)
import os
import shutil
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import gen_finn_dt_tensor, get_finn_root, make_build_dir
from finn.util.data_packing import finnpy_to_packed_bytearray
......@@ -18,9 +19,8 @@ class MakePYNQDriver(Transformation):
value.
"""
def __init__(self, platform):
def __init__(self):
super().__init__()
self.platform = platform
def apply(self, model):
vivado_pynq_proj = model.get_metadata_prop("vivado_pynq_proj")
......@@ -35,15 +35,24 @@ class MakePYNQDriver(Transformation):
# TODO convert this to an analysis pass
i_tensor_name = model.graph.input[0].name
o_tensor_name = model.graph.output[0].name
i_tensor_shape = tuple(model.get_tensor_shape(i_tensor_name))
o_tensor_shape = tuple(model.get_tensor_shape(o_tensor_name))
i_tensor_shape_normal = tuple(model.get_tensor_shape(i_tensor_name))
o_tensor_shape_normal = tuple(model.get_tensor_shape(o_tensor_name))
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
# generate dummy i/o tensors and their packed versions
i_tensor_dummy = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape)
o_tensor_dummy = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape)
i_tensor_dummy_packed = finnpy_to_packed_bytearray(i_tensor_dummy, i_tensor_dt)
o_tensor_dummy_packed = finnpy_to_packed_bytearray(o_tensor_dummy, o_tensor_dt)
# extract HLSCustomOp instances to get folded i/o shapes
first_node = getCustomOp(model.find_consumer(i_tensor_name))
last_node = getCustomOp(model.find_producer(o_tensor_name))
i_tensor_shape_folded = first_node.get_folded_input_shape()
o_tensor_shape_folded = last_node.get_folded_output_shape()
# generate dummy folded i/o tensors and their packed versions
i_tensor_dummy_folded = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape_folded)
o_tensor_dummy_folded = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape_folded)
i_tensor_dummy_packed = finnpy_to_packed_bytearray(
i_tensor_dummy_folded, i_tensor_dt
)
o_tensor_dummy_packed = finnpy_to_packed_bytearray(
o_tensor_dummy_folded, o_tensor_dt
)
i_tensor_shape_packed = i_tensor_dummy_packed.shape
o_tensor_shape_packed = o_tensor_dummy_packed.shape
......@@ -51,11 +60,13 @@ class MakePYNQDriver(Transformation):
driver_py = pynq_driver_dir + "/driver.py"
driver = templates.pynq_driver_template
driver = driver.replace("$INPUT_FINN_DATATYPE$", str(i_tensor_dt))
driver = driver.replace("$INPUT_SHAPE_UNPACKED$", str(i_tensor_shape))
driver = driver.replace("$INPUT_SHAPE_NORMAL$", str(i_tensor_shape_normal))
driver = driver.replace("$INPUT_SHAPE_FOLDED$", str(i_tensor_shape_folded))
driver = driver.replace("$INPUT_SHAPE_PACKED$", str(i_tensor_shape_packed))
driver = driver.replace("$OUTPUT_FINN_DATATYPE$", str(o_tensor_dt))
driver = driver.replace("$OUTPUT_SHAPE_NORMAL$", str(o_tensor_shape_normal))
driver = driver.replace("$OUTPUT_SHAPE_FOLDED$", str(o_tensor_shape_folded))
driver = driver.replace("$OUTPUT_SHAPE_PACKED$", str(o_tensor_shape_packed))
driver = driver.replace("$OUTPUT_SHAPE_UNPACKED$", str(o_tensor_shape))
with open(driver_py, "w") as f:
f.write(driver)
......
import os
import subprocess
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import get_by_name, make_build_dir, roundup_to_integer_multiple
......@@ -50,16 +51,13 @@ class MakePYNQProject(Transformation):
ip_dirs += [ipstitch_path + "/ip"]
ip_dirs_str = "[%s]" % (" ".join(ip_dirs))
# extract the actual in-out bytes from graph
# TODO convert this to an analysis pass
# extract HLSCustomOp instances to get i/o stream widths
i_tensor_name = model.graph.input[0].name
o_tensor_name = model.graph.output[0].name
i_tensor_shape = model.get_tensor_shape(i_tensor_name)
o_tensor_shape = model.get_tensor_shape(o_tensor_name)
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
i_bits_per_cycle = i_tensor_dt.bitwidth() * i_tensor_shape[-1]
o_bits_per_cycle = o_tensor_dt.bitwidth() * o_tensor_shape[-1]
first_node = getCustomOp(model.find_consumer(i_tensor_name))
last_node = getCustomOp(model.find_producer(o_tensor_name))
i_bits_per_cycle = first_node.get_instream_width()
o_bits_per_cycle = last_node.get_outstream_width()
# ensure i/o is padded to bytes
i_bits_per_cycle_padded = roundup_to_integer_multiple(i_bits_per_cycle, 8)
o_bits_per_cycle_padded = roundup_to_integer_multiple(o_bits_per_cycle, 8)
......@@ -71,6 +69,7 @@ class MakePYNQProject(Transformation):
out_if_name = "out_r_0"
clk_name = "ap_clk_0"
nrst_name = "ap_rst_n_0"
vivado_ip_cache = os.getenv("VIVADO_IP_CACHE", default="")
# create a temporary folder for the project
vivado_pynq_proj_dir = make_build_dir(prefix="vivado_pynq_proj_")
......@@ -87,6 +86,7 @@ class MakePYNQProject(Transformation):
out_if_name,
clk_name,
nrst_name,
vivado_ip_cache,
)
with open(vivado_pynq_proj_dir + "/ip_config.tcl", "w") as f:
......
......@@ -9,6 +9,7 @@ variable config_ip_axis_name_out
variable config_ip_use_axilite
variable config_ip_project_dir
variable config_output_products_dir
variable config_remote_cache
# for arguments involving paths below: use absolute paths or relative to the
# platform/overlay/bitstream folder
......@@ -36,6 +37,8 @@ set config_ip_clk_name %s
set config_ip_nrst_name %s
# whether the IP needs an AXI Lite interface for control
set config_ip_use_axilite 0
# Vivado OOC IP cache
set config_remote_cache "%s"
"""
call_pynqshell_makefile_template = """
......@@ -64,22 +67,26 @@ dma=ol.axi_dma_0
# declare input/output types and shapes for the accelerator
# input FINN DataType
idt = $INPUT_FINN_DATATYPE$
# unpacked and packed input shapes
ishape_unpacked = $INPUT_SHAPE_UNPACKED$
# normal, folded and packed input shapes
ishape_normal = $INPUT_SHAPE_NORMAL$
ishape_folded = $INPUT_SHAPE_FOLDED$
ishape_packed = $INPUT_SHAPE_PACKED$
# output FINN DataType
odt = $OUTPUT_FINN_DATATYPE$
# unpacked and packed output shapes
# normal, folded and packed output shapes
oshape_normal = $OUTPUT_SHAPE_NORMAL$
oshape_folded = $OUTPUT_SHAPE_FOLDED$
oshape_packed = $OUTPUT_SHAPE_PACKED$
oshape_unpacked = $OUTPUT_SHAPE_UNPACKED$
# load desired input .npy file
ibuf_unpacked = np.load("input.npy")
ibuf_normal = np.load("input.npy")
# ensure that shape is as expected
assert ibuf_unpacked.shape == ishape_unpacked
assert ibuf_normal.shape == ishape_normal
# convert to folded form
ibuf_folded = ibuf_normal.reshape(ishape_folded)
# pack the input buffer
ibuf_packed = finnpy_to_packed_bytearray(ibuf_unpacked, idt)
ibuf_packed = finnpy_to_packed_bytearray(ibuf_folded, idt)
# allocate a PYNQ buffer for the packed input buffer
ibuf_packed_device = allocate(shape=ishape_packed, dtype=np.uint8)
# copy the packed data into the PYNQ buffer
......@@ -96,6 +103,8 @@ dma.sendchannel.wait()
dma.recvchannel.wait()
# unpack the packed output buffer from accelerator
obuf_unpacked = packed_bytearray_to_finnpy(obuf_packed, odt, oshape_unpacked)
np.save("output.npy", obuf_unpacked)
obuf_folded = packed_bytearray_to_finnpy(obuf_packed, odt, oshape_folded)
# convert to normal reshape and save
obuf_normal = obuf_folded.reshape(oshape_normal)
np.save("output.npy", obuf_normal)
"""
......@@ -8,6 +8,11 @@ import numpy as np
from finn.core.datatype import DataType
# mapping from PYNQ board names to FPGA part names
pynq_part_map = dict()
pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
def get_finn_root():
"Return the root directory that FINN is cloned into."
......
......@@ -277,7 +277,7 @@ def finnpy_to_packed_bytearray(ndarray, dtype):
return np.apply_along_axis(fn, packed_hexstring.ndim - 1, packed_hexstring)
def packed_bytearray_to_finnpy(packed_bytearray, dtype, output_shape=None):
def packed_bytearray_to_finnpy(packed_bytearray, dtype, output_shape=None, reverse_inner=False):
"""Given a packed numpy uint8 ndarray, unpack it into a FINN array of
given DataType. output_shape can be specified to remove padding from the
packed dimension, or set to None to be inferred from the input."""
......@@ -300,6 +300,6 @@ def packed_bytearray_to_finnpy(packed_bytearray, dtype, output_shape=None):
packed_hexstring = np.apply_along_axis(
npbytearray2hexstring, packed_dim, packed_bytearray
)
ret = unpack_innermost_dim_from_hex_string(packed_hexstring, dtype, output_shape)
ret = unpack_innermost_dim_from_hex_string(packed_hexstring, dtype, output_shape, reverse_inner)
return ret
import os.path
from pkgutil import get_data
import pytest
from finn.core.modelwrapper import ModelWrapper
from finn.custom_op.registry import getCustomOp
from finn.transformation.fpgadataflow.create_dataflow_partition import (
CreateDataflowPartition,
)
from finn.transformation.fpgadataflow.insert_tlastmarker import InsertTLastMarker
from finn.util.basic import make_build_dir
build_dir = make_build_dir("test_dataflow_partition_")
def test_create_dataflow_partition():
@pytest.mark.dependency()
def test_dataflow_partition_create():
# load the onnx model
raw_m = get_data(
"finn", "data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx"
......@@ -19,3 +26,21 @@ def test_create_dataflow_partition():
sdp_node = getCustomOp(model.graph.node[2])
assert sdp_node.__class__.__name__ == "StreamingDataflowPartition"
assert os.path.isfile(sdp_node.get_nodeattr("model"))
model.save(build_dir + "/test_dataflow_partition_create.onnx")
@pytest.mark.dependency(depends=["test_dataflow_partition_create"])
def test_dataflow_partition_tlastmarker():
model = ModelWrapper(build_dir + "/test_dataflow_partition_create.onnx")
model_path = getCustomOp(model.graph.node[2]).get_nodeattr("model")
model = ModelWrapper(model_path)
model = model.transform(InsertTLastMarker())
assert model.graph.node[-1].op_type == "TLastMarker"
assert model.graph.node[-1].domain == "finn"
tl_node = getCustomOp(model.graph.node[-1])
assert tl_node.get_nodeattr("NumIters") == 1
assert tl_node.get_nodeattr("StreamWidth") == 320
assert tl_node.get_nodeattr("ElemWidth") == 32
model.save(build_dir + "/test_dataflow_partition_tlastmarker.onnx")
model = model.transform(InsertTLastMarker())
model.save(build_dir + "/test_dataflow_partition_tlastmarker2.onnx")
import shutil
import os
import subprocess
import numpy as np
......@@ -59,8 +60,8 @@ def make_npy2apintstream_testcase(ndarray, dtype):
f.write("\n".join(test_app_string))
cmd_compile = """
g++ -o test_npy2apintstream test.cpp /workspace/cnpy/cnpy.cpp \
-I/workspace/cnpy/ -I/workspace/vivado-hlslib -I/workspace/finn/src/finn/data/cpp \
--std=c++11 -lz"""
-I/workspace/cnpy/ -I{}/include -I/workspace/finn/src/finn/data/cpp \
--std=c++11 -lz""".format(os.environ["VIVADO_PATH"])
with open(test_dir + "/compile.sh", "w") as f:
f.write(cmd_compile)
compile = subprocess.Popen(
......
import os.path
# import os.path
import os
import pytest
......@@ -10,6 +11,7 @@ from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.codegen_ipgen import CodeGen_ipgen
from finn.transformation.fpgadataflow.codegen_ipstitch import CodeGen_ipstitch
from finn.transformation.fpgadataflow.hlssynth_ipgen import HLSSynth_IPGen
from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ
from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver
from finn.transformation.fpgadataflow.make_pynq_proj import MakePYNQProject
from finn.transformation.fpgadataflow.synth_pynq_proj import SynthPYNQProject
......@@ -18,16 +20,11 @@ from finn.util.basic import (
calculate_signed_dot_prod_range,
gen_finn_dt_tensor,
make_build_dir,
pynq_part_map,
)
# TODO control board/part for tests from a global place
# settings for Ultra96
test_fpga_part = "xczu3eg-sbva484-1-e"
test_pynq_board = "Ultra96"
# settings for PYNQ-Z1
# test_fpga_part = "xc7z020clg400-1"
# test_pynq_board = "Pynq-Z1"
test_pynq_board = os.getenv("PYNQ_BOARD", default="Pynq-Z1")
test_fpga_part = pynq_part_map[test_pynq_board]
ip_stitch_model_dir = make_build_dir("test_fpgadataflow_ipstitch")
......@@ -57,7 +54,7 @@ def create_one_fc_model():
MW=m,
MH=m,
SIMD=m,
PE=m,
PE=m // 2,
inputDataType=idt.name,
weightDataType=wdt.name,
outputDataType=odt.name,
......@@ -72,7 +69,8 @@ def create_one_fc_model():
["outp_tlast"],
domain="finn",
backend="fpgadataflow",
NumIters=1,
NumIters=2,
ElemWidth=odt.bitwidth(),
StreamWidth=odt.bitwidth() * m,
)
......@@ -162,6 +160,7 @@ def create_two_fc_model():
backend="fpgadataflow",
NumIters=m,
StreamWidth=2,
ElemWidth=odt.bitwidth(),
)
graph = helper.make_graph(
......@@ -255,8 +254,35 @@ def test_fpgadataflow_ipstitch_pynq_synth():
@pytest.mark.dependency(depends=["test_fpgadataflow_ipstitch_pynq_projgen"])
def test_fpgadataflow_ipstitch_pynq_driver():
model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx")
model = model.transform(MakePYNQDriver(test_pynq_board))
model = model.transform(MakePYNQDriver())
driver_dir = model.get_metadata_prop("pynq_driver_dir")
assert driver_dir is not None
assert os.path.isdir(driver_dir)
model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx")
@pytest.mark.dependency(depends=["test_fpgadataflow_ipstitch_pynq_driver"])
def test_fpgadataflow_ipstitch_pynq_deployment_folder():
model = ModelWrapper(
ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx"
)
ip = "172.21.165.113"
username = "xilinx"
password = "xilinx"
target_dir = "/home/xilinx/" + os.environ["FINN_INST_NAME"]
model = model.transform(DeployToPYNQ(ip, username, password, target_dir))
pynq_ip = model.get_metadata_prop("pynq_ip")
pynq_username = model.get_metadata_prop("pynq_username")
pynq_password = model.get_metadata_prop("pynq_password")
pynq_target_dir = model.get_metadata_prop("pynq_target_dir")
assert pynq_ip == ip
assert pynq_username == username
assert pynq_password == password
assert pynq_target_dir == target_dir
deployment_dir = model.get_metadata_prop("pynq_deploy_dir")
assert deployment_dir is not None
assert os.path.isdir(deployment_dir)
model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_deployment.onnx")
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment