diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 5ce7cf3553ffab2dd0b928823157995f698b0dd8..1572ba2872a46a8eaa9331a601a77aada0b0aa1c 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -86,7 +86,7 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg # git-based Python repo dependencies # these are installed in editable mode for easier co-development -ARG FINN_BASE_COMMIT="1fdf06c068f77ed5a312cd3a6edad098f64b09ed" +ARG FINN_BASE_COMMIT="7c2603a95e90e4de2575020e575c24eab6a15889" ARG FINN_EXP_COMMIT="f82c0d9868bb88ea045dfadb28508d327d287221" ARG BREVITAS_COMMIT="462f86cdc60f9915baf13afd1676fb21da44c2ee" ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e" diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 856f84fae0cb90abcc08fc098d684872b1a6a6a1..fa80e47485eef4f289b0272fd73ac185bd1c2c5e 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -84,19 +83,7 @@ class AddStreams_Batch(HLSCustomOp): assert ishape == exp_ishape, "Unexpected input1 shape." ishape = tuple(model.get_tensor_shape(self.onnx_node.input[1])) assert ishape == exp_ishape, "Unexpected input2 shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 3cd6a7dfdbb2db7df08e1c726ee522b3c2ed20a0..4961f6148231252d255c1830ced418308032ce41 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -30,7 +30,6 @@ import numpy as np import os import warnings from math import ceil -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -125,18 +124,7 @@ class ChannelwiseOp_Batch(HLSCustomOp): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 19732e44398665cfd9b97f9a1abcec56372e2523..a4018836846257c15ad203b1cef54c03cd081e45 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -29,7 +29,6 @@ import math import numpy as np import os -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -148,18 +147,7 @@ class ConvolutionInputGenerator(HLSCustomOp): ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index 1f9fcade0f974df2b2f21171d42e63f4af5e7eac..c4cf804126328b27fd56091d70f0b6e658b5b3c1 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -29,7 +29,6 @@ import math import numpy as np import os -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -137,19 +136,7 @@ class ConvolutionInputGenerator1D(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index e8948c322a2543ee8ecbf682ee4a7989270ee3ad..6a0667f67dc9f127637bd80cc3e88f682a320cbb 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -83,19 +82,7 @@ class DownSampler(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for DownSampler." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index 03d3436346aa716e9ea9e49027fdbb17bee74311..f29ea431ffc95d6291407c35c17d943647cf45df 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -99,19 +98,7 @@ class FMPadding_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for SameResize." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index eabdcf599d23d35ed13069cb81afa3ec4999e8e7..6d4a55ee5c86b68776f4c7c2e58930034bb0be02 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -95,19 +94,7 @@ class GlobalAccPool_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten(), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 4fa74e35dbec902d6c4d980c48ffaa69cbd5ccd9..802c7e78515336ef884e5ff09356085b5cc6069f 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -29,7 +29,6 @@ import math import numpy as np import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -146,19 +145,7 @@ class IODMA(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index d70d0f6a9b0cacb491ce748b84c8c7c474605170..1eb5962fdbc54092eaeb4796806b3a623c65aea8 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -102,18 +102,14 @@ class LabelSelect_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.int64) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.INT64, - dims=values.shape, - vals=values.flatten(), - ), + mean=0.0, + scale=1.0, + dtype=TensorProto.INT64, + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index f4638e6de3616c568da295f091a1ad39262e6dd8..ba8a446f2cf7541c0bd2e1dff731afe2397942ef 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -28,7 +28,6 @@ import numpy as np import os -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -163,19 +162,7 @@ class Pool_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape for Pool_Batch." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 11809b9bc267d00c8cd630163cc969187efc7417..1791706afa217d5eb453064547c1ea66b306d227 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -30,7 +30,6 @@ import math import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -165,19 +164,7 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingDWC." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index 968c9a6bad0364813c4e70829628da3d07152fbd..90abb66e66bc54bc9d1f4c7a08c58ca58e6d1741 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -31,7 +31,6 @@ import numpy as np import os import textwrap import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -151,19 +150,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index c8ae83cc90e1199831da5286d5051fdb969e825a..91f6ed5b8d29fd72ea1fbb8a3da94cfc103af88e 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -30,7 +30,6 @@ import numpy as np import os import subprocess import warnings -from onnx import TensorProto, helper from shutil import copy from finn.core.datatype import DataType @@ -78,19 +77,7 @@ class StreamingFIFO(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 87ecde8f9c7c50b7a22213db2b856d7439050421..1e66a5c204cc62bb7620907f82fcd5b2072bc184 100644 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -140,19 +139,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index bd136d000e675c6b2e1d7fd84ecdd6ac5002ff06..610139f44ee7e8be1320b47c99222667fa6ed850 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -31,7 +31,6 @@ import os import textwrap import warnings from math import ceil, log2 -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -112,19 +111,7 @@ class Thresholding_Batch(HLSCustomOp): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index 8331610dc11bb440e6bb56924bbf8efeed2653c2..d5f809305b6bf30285e58e1c702bfe9d21f5fa03 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -69,19 +68,7 @@ class UpsampleNearestNeighbour_Batch(HLSCustomOp): assert ( ishape == exp_ishape ), "Unexpect input shape for UpsampleNearestNeighbour_Batch." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py index fa990f28087c0ec47a071e85ed1af6ff9328f70f..c67eb0f21bd3a3a21bd92d3b4595ab66bd703b93 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py @@ -2,7 +2,6 @@ import math import numpy as np import os import warnings -from onnx import TensorProto, helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -129,19 +128,7 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) - return helper.make_node( - "Constant", - inputs=[], - outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node