Skip to content
Snippets Groups Projects
Commit 80fb658e authored by Lucian Petrica's avatar Lucian Petrica
Browse files

Various fixes for review

parent 1860f80f
No related branches found
No related tags found
No related merge requests found
...@@ -222,12 +222,7 @@ class Thresholding_Batch(HLSCustomOp): ...@@ -222,12 +222,7 @@ class Thresholding_Batch(HLSCustomOp):
inp_hls_str = self.get_input_datatype().get_hls_datatype_str() inp_hls_str = self.get_input_datatype().get_hls_datatype_str()
out_hls_str = self.get_output_datatype().get_hls_datatype_str() out_hls_str = self.get_output_datatype().get_hls_datatype_str()
# fill in TSrcI # fill in TSrcI
# TODO checks/adjustments for bipolar inputs ret["TSrcI"] = "Slice<%s>" % inp_hls_str
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
if inp_is_bipolar:
ret["TSrcI"] = "Recast<Binary>"
else:
ret["TSrcI"] = "Slice<%s>" % inp_hls_str
# fill in TDstI # fill in TDstI
ret["TDstI"] = "Slice<%s>" % out_hls_str ret["TDstI"] = "Slice<%s>" % out_hls_str
...@@ -237,7 +232,7 @@ class Thresholding_Batch(HLSCustomOp): ...@@ -237,7 +232,7 @@ class Thresholding_Batch(HLSCustomOp):
"""Convert the original numpy weight matrix orig_weight_matrix into """Convert the original numpy weight matrix orig_weight_matrix into
a form suitable for passing to the hlslib call: a form suitable for passing to the hlslib call:
* ensure MH % PE == 0 * ensure MH % PE == 0
* for bipolar weights&inputs, ensure thresholds are positive * for unsigned inputs, ensure thresholds are positive
* interleave rows between PEs * interleave rows between PEs
* reshape into (PE, TMEM, n_thres_steps) and return * reshape into (PE, TMEM, n_thres_steps) and return
""" """
...@@ -250,12 +245,11 @@ class Thresholding_Batch(HLSCustomOp): ...@@ -250,12 +245,11 @@ class Thresholding_Batch(HLSCustomOp):
), """Threshold matrix dimension is ), """Threshold matrix dimension is
not as expected (2).""" not as expected (2)."""
n_thres_steps = orig_thres_matrix.shape[1] n_thres_steps = orig_thres_matrix.shape[1]
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR if not self.get_input_datatype().signed():
if inp_is_bipolar:
# ensure all thresholds are nonnegative # ensure all thresholds are nonnegative
assert (orig_thres_matrix >= 0).all() assert (orig_thres_matrix >= 0).all()
# ensure all thresholds are integer # ensure all thresholds are integer
assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all() assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all()
ret = orig_thres_matrix ret = orig_thres_matrix
# ensure channels = mh , duplicating if necessary # ensure channels = mh , duplicating if necessary
if ret.shape[0] == 1: if ret.shape[0] == 1:
...@@ -286,10 +280,6 @@ class Thresholding_Batch(HLSCustomOp): ...@@ -286,10 +280,6 @@ class Thresholding_Batch(HLSCustomOp):
threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)
tdt = DataType.INT32 tdt = DataType.INT32
# use UINT32 threshold export for bipolar times bipolar
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
if inp_is_bipolar:
tdt = DataType.UINT32
thresholds_hls_code = numpy_to_hls_code( thresholds_hls_code = numpy_to_hls_code(
threshold_tensor, tdt, "thresholds", False, True threshold_tensor, tdt, "thresholds", False, True
) )
...@@ -426,9 +416,6 @@ class Thresholding_Batch(HLSCustomOp): ...@@ -426,9 +416,6 @@ class Thresholding_Batch(HLSCustomOp):
def read_npy_data(self): def read_npy_data(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
dtype = self.get_input_datatype() dtype = self.get_input_datatype()
if dtype == DataType.BIPOLAR:
# use binary for bipolar storage
dtype = DataType.BINARY
elem_bits = dtype.bitwidth() elem_bits = dtype.bitwidth()
packed_bits = self.get_instream_width() packed_bits = self.get_instream_width()
packed_hls_type = "ap_uint<%d>" % packed_bits packed_hls_type = "ap_uint<%d>" % packed_bits
......
...@@ -33,6 +33,7 @@ from finn.transformation import Transformation ...@@ -33,6 +33,7 @@ from finn.transformation import Transformation
from finn.custom_op.registry import getCustomOp from finn.custom_op.registry import getCustomOp
from finn.transformation.infer_shapes import InferShapes from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_datatypes import InferDataTypes from finn.transformation.infer_datatypes import InferDataTypes
import finn.core.data_layout as DataLayout
class InferConvInpGen(Transformation): class InferConvInpGen(Transformation):
...@@ -414,25 +415,24 @@ class InferThresholdingLayer(Transformation): ...@@ -414,25 +415,24 @@ class InferThresholdingLayer(Transformation):
thl_threshold = node.input[1] thl_threshold = node.input[1]
thl_output = node.output[0] thl_output = node.output[0]
thl_in_shape = model.get_tensor_shape(thl_input) thl_in_shape = model.get_tensor_shape(thl_input)
thl_out_shape = model.get_tensor_shape(thl_output)
idt = model.get_tensor_datatype(thl_input) idt = model.get_tensor_datatype(thl_input)
# skip conversion for layers with float input # skip conversion for layers with float input
if not idt.is_integer(): if not idt.is_integer():
continue continue
# extract weight shape, note that ONNX and finn-hlslib # skip conversion if input is not NHWC or NC
# make different assumptions about dim order here thl_in_layout = model.get_tensor_layout(thl_input)
# ONNX assumes W has (in, out) shape if thl_in_layout != DataLayout.NHWC and thl_in_layout != DataLayout.NC:
# finn-hlslib assumes W has (out, in) shape continue
# now safe to assume number of channels is in last dimension
ifc = int(thl_in_shape[-1]) ifc = int(thl_in_shape[-1])
# create node with no parallelization first # create node with no parallelization first
pe = 1 pe = 1
assert ifc % pe == 0, "Requirement IFC divisable by PE is violated." assert ifc % pe == 0, "Requirement IFC divisable by PE is violated."
odt = model.get_tensor_datatype(thl_output) odt = model.get_tensor_datatype(thl_output)
model.set_tensor_shape(thl_input, thl_in_shape)
model.set_tensor_shape(thl_output, thl_out_shape)
# create and insert new StreamingFCLayer node # create and insert new StreamingFCLayer node
new_node = helper.make_node( new_node = helper.make_node(
"Thresholding_Batch", "Thresholding_Batch",
......
...@@ -39,6 +39,7 @@ from finn.core.modelwrapper import ModelWrapper ...@@ -39,6 +39,7 @@ from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants from finn.transformation.fold_constants import FoldConstants
from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
from finn.transformation.infer_shapes import InferShapes from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_data_layouts import InferDataLayouts
from finn.transformation.streamline import Streamline from finn.transformation.streamline import Streamline
from finn.util.test import get_test_model_trained from finn.util.test import get_test_model_trained
from finn.transformation.double_to_single_float import DoubleToSingleFloat from finn.transformation.double_to_single_float import DoubleToSingleFloat
...@@ -71,6 +72,7 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): ...@@ -71,6 +72,7 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation):
model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold())
model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(ConvertBipolarMatMulToXnorPopcount())
model = model.transform(Streamline()) model = model.transform(Streamline())
model = model.transform(InferDataLayouts())
# model.save("golden.onnx") # model.save("golden.onnx")
# load one of the test vectors # load one of the test vectors
fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz") fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
......
...@@ -86,25 +86,16 @@ def make_single_thresholding_modelwrapper(T, pe, idt, odt): ...@@ -86,25 +86,16 @@ def make_single_thresholding_modelwrapper(T, pe, idt, odt):
return model return model
def prepare_inputs(input_tensor, idt):
if idt == DataType.BIPOLAR:
# convert bipolar to binary
return {"inp": (input_tensor + 1) / 2}
else:
return {"inp": input_tensor}
# TODO binary/bipolar inputs/outputs
# activation: None or DataType # activation: None or DataType
@pytest.mark.parametrize("act", [DataType.INT4]) @pytest.mark.parametrize("act", [DataType.INT4, DataType.BIPOLAR])
# input datatype # input datatype
@pytest.mark.parametrize("idt", [DataType.INT2, DataType.INT4]) @pytest.mark.parametrize("idt", [DataType.INT16, DataType.UINT16])
# folding, -1 is maximum possible # folding, -1 is maximum possible
@pytest.mark.parametrize("nf", [-1, 2, 1]) @pytest.mark.parametrize("nf", [-1, 2, 1])
# number of input features # number of input features
@pytest.mark.parametrize("ich", [16]) @pytest.mark.parametrize("ich", [16])
# execution mode # execution mode
@pytest.mark.parametrize("exec_mode", ["rtlsim", "cppsim"]) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
@pytest.mark.vivado @pytest.mark.vivado
@pytest.mark.slow @pytest.mark.slow
def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode): def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode):
...@@ -121,11 +112,6 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode): ...@@ -121,11 +112,6 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode):
T = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype(np.float32) T = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype(np.float32)
# provide non-decreasing thresholds # provide non-decreasing thresholds
T = np.sort(T, axis=1) T = np.sort(T, axis=1)
# generate thresholds for activation
if idt == DataType.BIPOLAR:
# bias thresholds to be positive
T = np.ceil((T + ich) / 2)
assert (T >= 0).all()
model = make_single_thresholding_modelwrapper(T, pe, idt, odt) model = make_single_thresholding_modelwrapper(T, pe, idt, odt)
...@@ -143,8 +129,8 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode): ...@@ -143,8 +129,8 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode):
else: else:
raise Exception("Unknown exec_mode") raise Exception("Unknown exec_mode")
# prepare input data # package input data as dictionary
input_dict = prepare_inputs(x, idt) input_dict = {"inp": x}
y = multithreshold(x, T) y = multithreshold(x, T)
if act == DataType.BIPOLAR: if act == DataType.BIPOLAR:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment