Skip to content
Snippets Groups Projects
Unverified Commit 9bcc269c authored by Yaman Umuroglu's avatar Yaman Umuroglu Committed by GitHub
Browse files

Merge pull request #325 from Xilinx/feature/update-pytorch-brevitas

Update PyTorch and Brevitas
parents e5da788b 79f0bb5d
No related branches found
No related tags found
No related merge requests found
Showing
with 445 additions and 175 deletions
......@@ -27,15 +27,26 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM pytorch/pytorch:1.1.0-cuda10.0-cudnn7.5-devel
MAINTAINER Yaman Umuroglu <yamanu@xilinx.com>
LABEL maintainer="Yaman Umuroglu <yamanu@xilinx.com>"
WORKDIR /workspace
RUN apt-get update
RUN apt-get -y upgrade
RUN apt-get install -y build-essential libglib2.0-0 libsm6 libxext6 libxrender-dev
RUN apt-get install -y verilator zsh nano rsync
RUN apt-get install -y sshpass wget unzip
RUN apt-get install -y build-essential
RUN apt-get install -y libglib2.0-0
RUN apt-get install -y libsm6
RUN apt-get install -y libxext6
RUN apt-get install -y libxrender-dev
RUN apt-get install -y verilator
RUN apt-get install -y nano
RUN apt-get install -y zsh
RUN apt-get install -y rsync
RUN apt-get install -y git
RUN apt-get install -y sshpass
RUN apt-get install -y wget
RUN apt-get install -y unzip
RUN apt-get install -y zip
RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config
# XRT deps
......
......@@ -26,8 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM pytorch/pytorch:1.1.0-cuda10.0-cudnn7.5-devel
MAINTAINER Yaman Umuroglu <yamanu@xilinx.com>
FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-runtime
LABEL maintainer="Yaman Umuroglu <yamanu@xilinx.com>"
ARG GID
ARG GNAME
ARG UNAME
......@@ -38,9 +38,20 @@ WORKDIR /workspace
RUN apt-get update
RUN apt-get -y upgrade
RUN apt-get install -y build-essential libglib2.0-0 libsm6 libxext6 libxrender-dev
RUN apt-get install -y verilator nano zsh rsync
RUN apt-get -y install sshpass wget unzip
RUN apt-get install -y build-essential
RUN apt-get install -y libglib2.0-0
RUN apt-get install -y libsm6
RUN apt-get install -y libxext6
RUN apt-get install -y libxrender-dev
RUN apt-get install -y verilator
RUN apt-get install -y nano
RUN apt-get install -y zsh
RUN apt-get install -y rsync
RUN apt-get install -y git
RUN apt-get install -y sshpass
RUN apt-get install -y wget
RUN apt-get install -y unzip
RUN apt-get install -y zip
RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config
COPY requirements.txt .
......@@ -60,16 +71,18 @@ RUN pip install scikit-learn==0.24.1
RUN pip install tqdm==4.31.1
RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading
# switch user
RUN groupadd -g $GID $GNAME
RUN useradd -M -u $UID $UNAME -g $GNAME
RUN usermod -aG sudo $UNAME
RUN echo "$UNAME:$PASSWD" | chpasswd
RUN echo "root:$PASSWD" | chpasswd
RUN chown -R $UNAME:$GNAME /workspace
RUN ln -s /workspace /home/$UNAME
RUN chown -R $UNAME:$GNAME /home/$UNAME
USER $UNAME
# cloning dependency repos (as user)
# finn-base
RUN git clone https://github.com/Xilinx/finn-base.git /workspace/finn-base
......
......@@ -14,7 +14,7 @@ gecho () {
# the repos themselves are cloned in the Dockerfile
FINN_BASE_COMMIT=8908c6a3f6674c4fa790954bd41c23ee5bf053df
FINN_EXP_COMMIT=e9f97dcdb4db2f889b0f36af079a6a1792b7d4de
BREVITAS_COMMIT=aff49758ec445d77c75721c7de3091a2a1797ca8
BREVITAS_COMMIT=14abbe1e7ef82485d79415871fcf5766b0a40a00
CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4
HLSLIB_COMMIT=2e49322d1bbc4969ca293843bda1f3f9c05456fc
PYVERILATOR_COMMIT=e2ff74030de3992dcac54bf1b6aad2915946e8cb
......
......@@ -158,6 +158,7 @@ def step_streamline(model: ModelWrapper, cfg: DataflowBuildConfig):
topologies.
"""
model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold())
model = model.transform(MoveScalarLinearPastInvariants())
model = model.transform(Streamline())
need_lowering = len(model.get_nodes_by_op_type("Conv")) > 0
......
......@@ -32,6 +32,23 @@ from finn.transformation.infer_shapes import InferShapes
import numpy as np
def _remove_node_and_rewire(model, node):
producer = model.find_producer(node.input[0])
if producer is not None:
# wire output tensor to
# output of producer node
producer.output[0] = node.output[0]
else:
# node is first in graph
consumer = model.find_consumer(node.output[0])
assert consumer is not None, "Whole graph is identity"
assert consumer.input[0] == node.output[0]
# rewire consumer's input directly to graph input
consumer.input[0] = node.input[0]
# remove node
model.graph.node.remove(node)
class RemoveIdentityOps(Transformation):
"""Remove identity ops like Add/Sub with zero or Mul/Div with one"""
......@@ -48,11 +65,7 @@ class RemoveIdentityOps(Transformation):
):
A = model.get_initializer(n.input[1])
if A is not None and (A == np.zeros_like(A)).all():
producer = model.find_producer(n.input[0])
# remove node and wire output tensor to
# output of producer node
producer.output[0] = n.output[0]
graph.node.remove(n)
_remove_node_and_rewire(model, n)
elif (
n.op_type in ["Mul", "Div"]
......@@ -61,10 +74,6 @@ class RemoveIdentityOps(Transformation):
):
A = model.get_initializer(n.input[1])
if A is not None and (A == np.ones_like(A)).all():
producer = model.find_producer(n.input[0])
# remove node and wire output tensor to
# output of producer node
producer.output[0] = n.output[0]
graph.node.remove(n)
_remove_node_and_rewire(model, n)
model = model.transform(InferShapes())
return (model, graph_modified)
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import os
import numpy as np
......@@ -19,8 +47,11 @@ export_onnx_path = "test_brevitas_conv.onnx"
@pytest.mark.parametrize("dw", [False, True])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("in_channels", [32])
def test_brevitas_QConv2d(dw, in_channels):
def test_brevitas_QConv2d(dw, bias, in_channels):
if bias:
pytest.xfail("bias export bug")
ishape = (1, 32, 111, 111)
if dw is True:
groups = in_channels
......@@ -45,10 +76,8 @@ def test_brevitas_QConv2d(dw, in_channels):
kernel_size=kernel_size,
padding=padding,
stride=stride,
bias=False,
bias=bias,
bias_quant_type=QuantType.FP,
compute_output_bit_width=False,
compute_output_scale=False,
weight_bit_width=4,
weight_quant_type=QuantType.INT,
weight_scaling_impl_type=ScalingImplType.STATS,
......@@ -60,7 +89,7 @@ def test_brevitas_QConv2d(dw, in_channels):
)
weight_tensor = gen_finn_dt_tensor(DataType.INT4, w_shape)
b_conv.weight = torch.nn.Parameter(torch.from_numpy(weight_tensor).float())
b_conv.eval()
bo.export_finn_onnx(b_conv, ishape, export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
......@@ -69,7 +98,6 @@ def test_brevitas_QConv2d(dw, in_channels):
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
inp_tensor = torch.from_numpy(inp_tensor).float()
b_conv.eval()
expected = b_conv.forward(inp_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import onnx # noqa
import torch
import numpy as np
import brevitas.onnx as bo
from brevitas.nn import QuantAvgPool2d
from brevitas.quant_tensor import pack_quant_tensor
from brevitas.core.quant import QuantType
import pytest
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.core.datatype import DataType
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_datatypes import InferDataTypes
from finn.util.basic import gen_finn_dt_tensor
import finn.core.onnx_exec as oxe
import pytest
from brevitas.export import FINNManager
from brevitas.nn import QuantAvgPool2d
from brevitas.quant_tensor import QuantTensor
export_onnx_path = "test_brevitas_avg_pool_export.onnx"
@pytest.mark.parametrize("kernel_size", [2, 3])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("signed", [False, True])
@pytest.mark.parametrize("signed", [True, False])
@pytest.mark.parametrize("bit_width", [2, 4])
@pytest.mark.parametrize("input_bit_width", [4, 8, 16])
@pytest.mark.parametrize("channels", [2, 4])
......@@ -29,73 +55,46 @@ export_onnx_path = "test_brevitas_avg_pool_export.onnx"
def test_brevitas_avg_pool_export(
kernel_size, stride, signed, bit_width, input_bit_width, channels, idim
):
ishape = (1, channels, idim, idim)
ibw_tensor = torch.Tensor([input_bit_width])
b_avgpool = QuantAvgPool2d(
kernel_size=kernel_size,
stride=stride,
bit_width=bit_width,
quant_type=QuantType.INT,
)
# call forward pass manually once to cache scale factor and bitwidth
input_tensor = torch.from_numpy(np.zeros(ishape)).float()
scale = np.ones((1, channels, 1, 1))
output_scale = torch.from_numpy(scale).float()
input_quant_tensor = pack_quant_tensor(
tensor=input_tensor, scale=output_scale, bit_width=ibw_tensor, signed=signed
quant_avgpool = QuantAvgPool2d(
kernel_size=kernel_size, stride=stride, bit_width=bit_width
)
bo.export_finn_onnx(b_avgpool, ishape, export_onnx_path, input_t=input_quant_tensor)
model = ModelWrapper(export_onnx_path)
quant_avgpool.eval()
# determine input FINN datatype
if signed is True:
prefix = "INT"
else:
prefix = "UINT"
# determine input
prefix = "INT" if signed else "UINT"
dt_name = prefix + str(input_bit_width)
dtype = DataType[dt_name]
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
# execution with input tensor using integers and scale = 1
# calculate golden output
inp = gen_finn_dt_tensor(dtype, ishape)
input_tensor = torch.from_numpy(inp).float()
input_quant_tensor = pack_quant_tensor(
tensor=input_tensor, scale=output_scale, bit_width=ibw_tensor, signed=signed
)
b_avgpool.eval()
expected = b_avgpool.forward(input_quant_tensor).tensor.detach().numpy()
# finn execution
idict = {model.graph.input[0].name: inp}
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
assert (expected == produced).all()
# execution with input tensor using float and scale != 1
scale = np.random.uniform(low=0, high=1, size=(1, channels, 1, 1)).astype(
input_shape = (1, channels, idim, idim)
input_array = gen_finn_dt_tensor(dtype, input_shape)
# Brevitas QuantAvgPool layers need QuantTensors to export correctly
# which requires setting up a QuantTensor instance with the scale
# factor, zero point, bitwidth and signedness
scale_array = np.random.uniform(low=0, high=1, size=(1, channels, 1, 1)).astype(
np.float32
)
inp_tensor = inp * scale
input_tensor = torch.from_numpy(inp_tensor).float()
input_scale = torch.from_numpy(scale).float()
input_quant_tensor = pack_quant_tensor(
tensor=input_tensor, scale=input_scale, bit_width=ibw_tensor, signed=signed
input_tensor = torch.from_numpy(input_array * scale_array).float()
scale_tensor = torch.from_numpy(scale_array).float()
zp = torch.tensor(0.0)
input_quant_tensor = QuantTensor(
input_tensor, scale_tensor, zp, input_bit_width, signed
)
# export
FINNManager.export(
quant_avgpool, export_path=export_onnx_path, input_t=input_quant_tensor
)
# export again to set the scale values correctly
bo.export_finn_onnx(b_avgpool, ishape, export_onnx_path, input_t=input_quant_tensor)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
b_avgpool.eval()
expected = b_avgpool.forward(input_quant_tensor).tensor.detach().numpy()
# finn execution
idict = {model.graph.input[0].name: inp_tensor}
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
assert np.isclose(expected, produced).all()
# reference brevitas output
ref_output_array = quant_avgpool(input_quant_tensor).tensor.detach().numpy()
# finn output
idict = {model.graph.input[0].name: input_array}
odict = oxe.execute_onnx(model, idict, True)
finn_output = odict[model.graph.output[0].name]
# compare outputs
assert np.isclose(ref_output_array, finn_output).all()
# cleanup
os.remove(export_onnx_path)
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PIL import Image
import numpy as np
import brevitas.onnx as bo
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import onnx # noqa
import numpy as np
......
# Copyright (c) 2021, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import os
import numpy as np
import torch
import brevitas.onnx as bo
from brevitas.nn import QuantLinear
from brevitas.core.quant import QuantType
from finn.core.modelwrapper import ModelWrapper
from finn.core.datatype import DataType
import finn.core.onnx_exec as oxe
from finn.transformation.infer_shapes import InferShapes
from finn.util.basic import gen_finn_dt_tensor
export_onnx_path = "test_brevitas_qlinear.onnx"
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("out_features", [4])
@pytest.mark.parametrize("in_features", [3])
@pytest.mark.parametrize("w_bits", [4])
@pytest.mark.parametrize("i_dtype", [DataType.UINT4])
def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype):
if bias:
pytest.xfail("bias export bug")
i_shape = (1, in_features)
w_shape = (out_features, in_features)
b_linear = QuantLinear(
out_features=out_features,
in_features=in_features,
bias=bias,
bias_quant_type=QuantType.FP,
weight_bit_width=w_bits,
weight_quant_type=QuantType.INT,
weight_scaling_per_output_channel=True,
)
weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype(
np.float32
)
b_linear.weight.data = torch.from_numpy(weight_tensor_fp)
b_linear.eval()
bo.export_finn_onnx(b_linear, i_shape, export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
inp_tensor = gen_finn_dt_tensor(i_dtype, i_shape)
idict = {model.graph.input[0].name: inp_tensor}
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
inp_tensor = torch.from_numpy(inp_tensor).float()
expected = b_linear.forward(inp_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
os.remove(export_onnx_path)
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import onnx # noqa
import numpy as np
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import onnx # noqa
import os
import numpy as np
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import csv
import numpy as np
......
......@@ -129,12 +129,7 @@ def update_dashboard_data(topology, wbits, abits, key, val):
def fold_tfc(model):
fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch")
# (PE, SIMD, ramstyle) for each layer
config = [
(16, 49, "block"),
(8, 8, "auto"),
(8, 8, "auto"),
(10, 8, "distributed"),
]
config = [(16, 49, "block"), (8, 8, "auto"), (8, 8, "auto"), (10, 8, "distributed")]
for fcl, (pe, simd, ramstyle) in zip(fc_layers, config):
fcl_inst = getCustomOp(fcl)
fcl_inst.set_nodeattr("PE", pe)
......@@ -372,6 +367,7 @@ class TestEnd2End:
def test_streamline(self, topology, wbits, abits):
prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post")
model = load_test_checkpoint_or_skip(prev_chkpt_name)
model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold())
# move past any reshapes to be able to streamline input scaling
model = model.transform(MoveScalarLinearPastInvariants())
model = model.transform(Streamline())
......
......@@ -28,6 +28,7 @@
import torch
from brevitas.nn import QuantLinear, QuantReLU
from brevitas.quant_tensor import QuantTensor
import torch.nn as nn
import numpy as np
from brevitas.core.quant import QuantType
......@@ -115,19 +116,32 @@ def test_end2end_cybsec_mlp_export():
model_for_export = CybSecMLPForExport(model)
export_onnx_path = get_checkpoint_name("export")
input_shape = (1, 600)
bo.export_finn_onnx(model_for_export, input_shape, export_onnx_path)
# create a QuantTensor instance to mark the input as bipolar during export
input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32)
input_a = 2 * input_a - 1
scale = 1.0
input_t = torch.from_numpy(input_a * scale)
input_qt = QuantTensor(
input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True
)
bo.export_finn_onnx(
model_for_export, export_path=export_onnx_path, input_t=input_qt
)
assert os.path.isfile(export_onnx_path)
# fix input datatype
finn_model = ModelWrapper(export_onnx_path)
finnonnx_in_tensor_name = finn_model.graph.input[0].name
finn_model.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR)
finn_model.save(export_onnx_path)
assert tuple(finn_model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600)
assert len(finn_model.graph.node) == 30
assert finn_model.graph.node[0].op_type == "Add"
assert finn_model.graph.node[1].op_type == "Div"
assert finn_model.graph.node[2].op_type == "MatMul"
# verify a few exported ops
assert finn_model.graph.node[1].op_type == "Add"
assert finn_model.graph.node[2].op_type == "Div"
assert finn_model.graph.node[3].op_type == "MatMul"
assert finn_model.graph.node[-1].op_type == "MultiThreshold"
# verify datatypes on some tensors
assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType.BIPOLAR
first_matmul_w_name = finn_model.graph.node[3].input[1]
assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType.INT2
@pytest.mark.slow
......
......@@ -11,7 +11,7 @@ from finn.transformation.streamline.remove import RemoveIdentityOps
from finn.util.basic import gen_finn_dt_tensor
def insert_identity_op(model, op):
def insert_identity_op(model, op, as_first_node):
if op in ["Add", "Sub"]:
val = np.asarray([0.0], dtype=np.float32)
elif op in ["Mul", "Div"]:
......@@ -19,10 +19,15 @@ def insert_identity_op(model, op):
else:
return
identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"])
graph = model.graph
graph.node.insert(3, identity_node)
graph.node[-1].input[0] = "ident_out"
if as_first_node:
identity_node = helper.make_node(op, ["inp", "value"], ["ident_out"])
graph.node.insert(0, identity_node)
graph.node[1].input[0] = "ident_out"
else:
identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"])
graph.node.insert(3, identity_node)
graph.node[-1].input[0] = "ident_out"
model.set_initializer("value", val)
return model
......@@ -30,7 +35,8 @@ def insert_identity_op(model, op):
# identity operations to be inserted
@pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div"])
def test_remove_identity_ops(op):
@pytest.mark.parametrize("as_first_node", [False, True])
def test_remove_identity_ops(op, as_first_node):
# set up onnx model
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 4, 1, 1])
......@@ -64,7 +70,7 @@ def test_remove_identity_ops(op):
model.set_initializer("shape", shape_values)
model.set_initializer("div", div_values)
model.set_initializer("matmul", matmul_values)
insert_identity_op(model, op)
insert_identity_op(model, op, as_first_node)
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
idict = {"inp": inp_values}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment