Skip to content
Snippets Groups Projects
Unverified Commit 7b4bfe45 authored by Yaman Umuroglu's avatar Yaman Umuroglu Committed by GitHub
Browse files

Merge pull request #216 from Xilinx/feature/brevitas_finn_codebug

Brevitas-FINN co-debug
parents 98899203 b4e09c46
No related branches found
No related tags found
No related merge requests found
......@@ -12,7 +12,7 @@ gecho () {
# checkout the correct dependency repo commits
# the repos themselves are cloned in the Dockerfile
BREVITAS_COMMIT=f9a27226d4acf1661dd38bc449f71f89e0983cce
BREVITAS_COMMIT=172e423164402a07826877fa9730063bee10a208
CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4
HLSLIB_COMMIT=cfafe11a93b79ab1af7529d68f08886913a6466e
PYVERILATOR_COMMIT=c97a5ba41bbc7c419d6f25c74cdf3bdc3393174f
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from finn.custom_op import CustomOp
from onnx import helper
class DebugMarker(CustomOp):
def get_nodeattr_types(self):
return {"export_debug_name": ("s", True, "")}
def make_shape_compatible_op(self, model):
node = self.onnx_node
return helper.make_node("Identity", [node.input[0]], [node.output[0]])
def infer_node_datatype(self, model):
node = self.onnx_node
# data type stays the same
dtype = model.get_tensor_datatype(node.input[0])
model.set_tensor_datatype(node.output[0], dtype)
# create quantization annotation for debug marker
model.set_tensor_datatype(self.get_nodeattr("export_debug_name"), dtype)
def execute_node(self, context, graph):
node = self.onnx_node
inp_name = node.input[0]
out_name = node.output[0]
inp = context[inp_name]
context[out_name] = inp
# insert debug marker output as separate tensor
context[self.get_nodeattr("export_debug_name")] = inp
def verify_node(self):
info_messages = []
# verify that "domain" is set to "finn"
domain_value = self.onnx_node.domain
if domain_value == "finn":
info_messages.append("Attribute domain is set correctly")
else:
info_messages.append('Attribute domain should be set to "finn"')
return info_messages
......@@ -57,6 +57,7 @@ from finn.custom_op.fpgadataflow.vector_vector_activate_batch import (
)
from finn.custom_op.fpgadataflow.channelwise_op_batch import ChannelwiseOp_Batch
from finn.custom_op.fpgadataflow.iodma import IODMA
from finn.custom_op.debugmarker import DebugMarker
# create a mapping of all known CustomOp names and classes
custom_op = {}
......@@ -84,6 +85,7 @@ custom_op["DuplicateStreams_Batch"] = DuplicateStreams_Batch
custom_op["Vector_Vector_Activate_Batch"] = Vector_Vector_Activate_Batch
custom_op["ChannelwiseOp_Batch"] = ChannelwiseOp_Batch
custom_op["IODMA"] = IODMA
custom_op["DebugMarker"] = DebugMarker
def getCustomOp(node):
......
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from torch.nn import Module, Sequential
from brevitas.quant_tensor import QuantTensor
class Normalize(Module):
def __init__(self, mean, std, channels):
super(Normalize, self).__init__()
self.mean = mean
self.std = std
self.channels = channels
def forward(self, x):
x = x - torch.tensor(self.mean, device=x.device).reshape(1, self.channels, 1, 1)
x = x / self.std
return x
class ToTensor(Module):
def __init__(self):
super(ToTensor, self).__init__()
def forward(self, x):
x = x / 255
return x
class NormalizePreProc(Module):
def __init__(self, mean, std, channels):
super(NormalizePreProc, self).__init__()
self.features = Sequential()
scaling = ToTensor()
self.features.add_module("scaling", scaling)
normalize = Normalize(mean, std, channels)
self.features.add_module("normalize", normalize)
def forward(self, x):
return self.features(x)
class BrevitasDebugHook:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
tensor = module_out
if isinstance(module_out, QuantTensor):
tensor = module_out[0]
self.outputs[module.export_debug_name] = tensor.detach().numpy()
def clear(self):
self.outputs = {}
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pkgutil import get_data
import os
import brevitas.onnx as bo
import numpy as np
import onnx
import onnx.numpy_helper as nph
import torch
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.general import RemoveStaticGraphInputs
from finn.transformation.infer_shapes import InferShapes
from finn.util.test import get_test_model_trained
from finn.util.pytorch import BrevitasDebugHook
def test_brevitas_debug():
finn_onnx = "test_brevitas_debug.onnx"
fc = get_test_model_trained("TFC", 2, 2)
dbg_hook = BrevitasDebugHook()
bo.enable_debug(fc, dbg_hook)
bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx)
model = ModelWrapper(finn_onnx)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(RemoveStaticGraphInputs())
assert len(model.graph.input) == 1
assert len(model.graph.output) == 1
# load one of the test vectors
raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
input_tensor = onnx.load_tensor_from_string(raw_i)
# run using FINN-based execution
input_dict = {"0": nph.to_array(input_tensor)}
output_dict = oxe.execute_onnx(model, input_dict, return_full_exec_context=True)
produced = output_dict[model.graph.output[0].name]
# run using PyTorch/Brevitas
input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
assert input_tensor.shape == (1, 1, 28, 28)
# do forward pass in PyTorch/Brevitas
expected = fc.forward(input_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
# check all tensors at debug markers
names_brevitas = set(dbg_hook.outputs.keys())
names_finn = set(output_dict.keys())
names_common = names_brevitas.intersection(names_finn)
assert len(names_common) == 8
for dbg_name in names_common:
assert (dbg_hook.outputs[dbg_name] == output_dict[dbg_name]).all()
os.remove(finn_onnx)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment