Skip to content
Snippets Groups Projects
Unverified Commit 9187e247 authored by auphelia's avatar auphelia Committed by GitHub
Browse files

Merge pull request #649 from Xilinx/feature/decoupled-vvau

Decoupled mode support for VVAU
parents 233d089a 49c89688
No related branches found
No related tags found
No related merge requests found
......@@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366"
BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03"
PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2"
CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4"
HLSLIB_COMMIT="36e6c8cb1019ba0307e1886011692a58e02f3bfa"
HLSLIB_COMMIT="bb43a97f799b63f536885919f03ecdfcfb04f405"
OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc"
AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b"
XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e"
......
This diff is collapsed.
......@@ -910,6 +910,10 @@ class InferVectorVectorActivation(Transformation):
a depthwise convolution. Any immediately following MultiThreshold
layers will also be absorbed into the VVAU."""
def __init__(self, mem_mode="const"):
super().__init__()
self.mem_mode = mem_mode
def apply(self, model):
graph = model.graph
node_ind = 0
......@@ -1010,6 +1014,7 @@ class InferVectorVectorActivation(Transformation):
ActVal=actval,
noActivation=0,
name="VectorVectorActivation_" + n.name,
mem_mode=self.mem_mode,
)
graph.node.insert(node_ind, new_node)
# remove old nodes
......
......@@ -75,7 +75,19 @@ def _calculate_dot_prod_range(dt_a, dt_b, len):
def _make_single_vvau_modelwrapper(
W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T=None, tdt=None
W,
pe,
k_h,
k_w,
channels,
dim_h,
dim_w,
wdt,
idt,
odt,
T=None,
tdt=None,
mem_mode="const",
):
in_shape = [1, dim_h, dim_w, k_h * k_w * channels] # [N, H, W, K*K*CH]
out_shape = [
......@@ -113,6 +125,7 @@ def _make_single_vvau_modelwrapper(
weightDataType=wdt.name,
outputDataType=odt.name,
noActivation=no_act,
mem_mode=mem_mode,
)
graph = helper.make_graph(
......@@ -140,7 +153,7 @@ def prepare_inputs(input_tensor):
return {"inp": input_tensor}
# mem_mode: const or decoupled
# input datatype
@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]])
# weight datatype
@pytest.mark.parametrize("wdt", [DataType["INT4"]])
......@@ -156,13 +169,15 @@ def prepare_inputs(input_tensor):
@pytest.mark.parametrize("k_w", [3, 1])
# Number of input and output channels
@pytest.mark.parametrize("channels", [3, 4])
# memory mode
@pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
# execution mode
@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
@pytest.mark.fpgadataflow
@pytest.mark.slow
@pytest.mark.vivado
def test_fpgadataflow_vvau(
idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, exec_mode
idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, mem_mode, exec_mode
):
if pe == "channels":
pe = channels
......@@ -198,7 +213,7 @@ def test_fpgadataflow_vvau(
tdt = DataType["INT32"]
model = _make_single_vvau_modelwrapper(
W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt
W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt, mem_mode
)
if exec_mode == "cppsim":
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment