From 49055cf229f1b959ba38916d5c25cd3a7036d35e Mon Sep 17 00:00:00 2001 From: icolbert <Ian.Colbert@amd.com> Date: Tue, 21 Feb 2023 11:13:36 -0800 Subject: [PATCH] Remove MinimizeAccumulatorWidth from convert_to_hls layers --- .../fpgadataflow/convert_to_hls_layers.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 7b8a1bf6b..3029e09d4 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -40,10 +40,6 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.basic import get_by_name from qonnx.util.onnx import nchw_to_nhwc -from finn.transformation.fpgadataflow.minimize_accumulator_width import ( - MinimizeAccumulatorWidth, -) - class InferConvInpGen(Transformation): """Convert Im2Col layers to ConvolutionInputGenerator layers.""" @@ -761,7 +757,6 @@ class InferBinaryMatrixVectorActivation(Transformation): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -904,7 +899,6 @@ class InferQuantizedMatrixVectorActivation(Transformation): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -1057,7 +1051,6 @@ class InferVectorVectorActivation(Transformation): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -1135,7 +1128,7 @@ class InferThresholdingLayer(Transformation): PE=pe, numSteps=thl_thres_shape[1], inputDataType=idt.name, - weightDataType=idt.name, # will be set by MinimizeAccumulatorWidth + weightDataType=idt.name, # can be tightened by MinimizeAccumulatorWidth outputDataType=odt.name, numInputVectors=list(thl_in_shape[:-1]), ActVal=actval, @@ -1148,7 +1141,6 @@ class InferThresholdingLayer(Transformation): graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) -- GitLab