Skip to content
Snippets Groups Projects
Commit 0ef15992 authored by auphelia's avatar auphelia
Browse files

Merge branch 'dev' into feature/weight_streamers

parents 10fdc992 855d9de6
No related branches found
No related tags found
No related merge requests found
......@@ -86,7 +86,7 @@ VIVADO_IP_CACHE=$BUILD_LOCAL/vivado_ip_cache
git clone --branch feature/finn_onnx_export $BREVITAS_REPO $BREVITAS_LOCAL || git -C "$BREVITAS_LOCAL" pull
git clone $EXAMPLES_REPO $EXAMPLES_LOCAL || git -C "$EXAMPLES_LOCAL" pull
git clone $CNPY_REPO $CNPY_LOCAL || git -C "$CNPY_LOCAL" pull
git clone $FINN_HLS_REPO $FINN_HLS_LOCAL; git -C "$FINN_HLS_LOCAL" checkout b5dc957a16017b8356a7010144b0a4e2f8cfd124 || git -C "$FINN_HLS_LOCAL" checkout b5dc957a16017b8356a7010144b0a4e2f8cfd124
git clone $FINN_HLS_REPO $FINN_HLS_LOCAL || git -C "$FINN_HLS_LOCAL" checkout master; git -C "$FINN_HLS_LOCAL" pull
git clone $PYVERILATOR_REPO $PYVERILATOR_LOCAL || git -C "$PYVERILATOR_LOCAL" pull
git clone $PYNQSHELL_REPO $PYNQSHELL_LOCAL || git -C "$PYNQSHELL_LOCAL" pull
......
......@@ -280,13 +280,13 @@ class StreamingFCLayer_Batch(HLSCustomOp):
out_hls_str = self.get_output_datatype().get_hls_datatype_str()
wt_hls_str = self.get_weight_datatype().get_hls_datatype_str()
inp_is_binary = self.get_input_datatype() == DataType.BINARY
out_is_binary = self.get_output_datatype() == DataType.BINARY
# out_is_binary = self.get_output_datatype() == DataType.BINARY
wt_is_binary = self.get_weight_datatype() == DataType.BINARY
bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1
if (inp_is_binary or wt_is_binary) and (not bin_xnor_mode):
raise Exception("True binary (non-bipolar) inputs not yet supported")
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
out_is_bipolar = self.get_output_datatype() == DataType.BIPOLAR
# out_is_bipolar = self.get_output_datatype() == DataType.BIPOLAR
wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR
# reinterpret inp/wt as bipolar if bin_xnor_mode is iset
inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)
......@@ -324,10 +324,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
ret["TWeightI"] = "Slice<%s>" % wt_hls_str
# fill in TDstI
if out_is_bipolar or out_is_binary:
ret["TDstI"] = "Identity"
else:
ret["TDstI"] = "Slice<%s>" % out_hls_str
ret["TDstI"] = "Slice<%s>" % out_hls_str
return ret
def get_hls_compatible_weight_tensor(self, orig_weight_matrix):
......
......@@ -67,6 +67,16 @@ class BatchNormToAffine(Transformation):
if consumer.op_type == "Squeeze":
bn_output = consumer.output[0]
data_shape = model.get_tensor_shape(bn_input)
assert A.ndim == B.ndim, "Unexpected mul/add dims in BatchNormToAffine"
assert (
len(data_shape) >= A.ndim
), "Unexpected number of dims found in BatchNormToAffine"
# reshape the mul/add constants to match the data shape/dims
# by adding (1,) dimensions to the right
n_spatial_dims = len(data_shape) - 2
target_shape = (1, -1) + tuple(1 for i in range(n_spatial_dims))
A = A.reshape(target_shape)
B = B.reshape(target_shape)
# create value_info and initializers for Mul and Add constants
mul_const = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.FLOAT, A.shape
......
......@@ -28,10 +28,12 @@
import os
from pkgutil import get_data
import pkg_resources as pk
import brevitas.onnx as bo
import onnx
import onnx.numpy_helper as nph
import numpy as np
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
......@@ -58,18 +60,23 @@ def test_batchnorm_to_affine_lfc_w1a1():
os.remove(export_onnx_path)
# cnv batchnorm to affine not yet supported
# def test_batchnorm_to_affine_cnv_w1a1():
# lfc = get_test_model_trained("CNV", 1, 1)
# bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path)
# model = ModelWrapper(export_onnx_path)
# model = model.transform(InferShapes())
# model = model.transform(FoldConstants())
# # TODO shape inference failing on transformed model below -- needs debug
# new_model = model.transform(BatchNormToAffine())
# # check that there are no BN nodes left
# # TODO replace this with execution test
# op_types = list(map(lambda x: x.op_type, new_model.graph.node))
# assert "BatchNormalization" not in op_types
# os.remove(export_onnx_path)
def test_batchnorm_to_affine_cnv_w1a1():
lfc = get_test_model_trained("CNV", 1, 1)
bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
input_tensor = np.load(fn)["arr_0"].astype(np.float32)
assert input_tensor.shape == (1, 3, 32, 32)
input_dict = {"0": input_tensor}
output_dict = oxe.execute_onnx(model, input_dict)
expected = output_dict[list(output_dict.keys())[0]]
new_model = model.transform(BatchNormToAffine())
# check that there are no BN nodes left
op_types = list(map(lambda x: x.op_type, new_model.graph.node))
assert "BatchNormalization" not in op_types
output_dict_p = oxe.execute_onnx(new_model, input_dict)
produced = output_dict_p[list(output_dict_p.keys())[0]]
assert np.isclose(expected, produced).all()
os.remove(export_onnx_path)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment