From 87529826556586ab49d91dd8ad5847f28653690b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu <maltanar@gmail.com> Date: Fri, 22 Jul 2022 19:07:27 +0200 Subject: [PATCH] [InferVVAU] add suport for setting mem_mode at conversion time --- .../transformation/fpgadataflow/convert_to_hls_layers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 429bc34ff..7f3f8bff9 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -870,6 +870,10 @@ class InferVectorVectorActivation(Transformation): a depthwise convolution. Any immediately following MultiThreshold layers will also be absorbed into the VVAU.""" + def __init__(self, mem_mode="const"): + super().__init__() + self.mem_mode = mem_mode + def apply(self, model): graph = model.graph node_ind = 0 @@ -970,6 +974,7 @@ class InferVectorVectorActivation(Transformation): ActVal=actval, noActivation=0, name="VectorVectorActivation_" + n.name, + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old nodes -- GitLab