diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 113ea8ea02dc64aacf92b3fc3f5dda6417e25517..dbd98623c4cdf5baca9fa9c137debf8be0f70981 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -139,6 +139,10 @@ class InferBinaryStreamingFCLayer(Transformation): StreamingFCLayer_Batch layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.""" + def __init__(self, mem_mode="const"): + super().__init__() + self.mem_mode = mem_mode + def apply(self, model): graph = model.graph node_ind = 0 @@ -219,6 +223,7 @@ class InferBinaryStreamingFCLayer(Transformation): binaryXnorMode=1, noActivation=0, numInputVectors=list(mm_in_shape[:-1]), + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old nodes @@ -249,6 +254,7 @@ class InferBinaryStreamingFCLayer(Transformation): binaryXnorMode=1, noActivation=1, numInputVectors=list(mm_in_shape[:-1]), + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old node @@ -265,6 +271,10 @@ class InferQuantizedStreamingFCLayer(Transformation): StreamingFCLayer_Batch layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.""" + def __init__(self, mem_mode="const"): + super().__init__() + self.mem_mode = mem_mode + def apply(self, model): graph = model.graph node_ind = 0 @@ -347,6 +357,7 @@ class InferQuantizedStreamingFCLayer(Transformation): binaryXnorMode=0, noActivation=0, numInputVectors=list(mm_in_shape[:-1]), + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old nodes @@ -377,6 +388,7 @@ class InferQuantizedStreamingFCLayer(Transformation): binaryXnorMode=0, noActivation=1, numInputVectors=list(mm_in_shape[:-1]), + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old node