Skip to content
Snippets Groups Projects
Commit ccc634cd authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

Merge branch 'feature/exec_hlslib_streamingfclayer' into feature/npy2apint_stream

parents 486a3a23 c8d9a0fc
No related branches found
No related tags found
No related merge requests found
......@@ -175,18 +175,21 @@ def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False):
assert (np.asarray(ret.shape, dtype=np.int32) == desired).all()
return ret
def gen_FINN_dt_tensor(FINN_dt, tensor_shape):
def gen_finn_dt_tensor(finn_dt, tensor_shape):
# generates random tensor in given shape and with given FINN data type
if FINN_dt == DataType.BIPOLAR:
if finn_dt == DataType.BIPOLAR:
tensor_values = np.random.randint(2, size=tensor_shape)
tensor_values = 2 * tensor_values - 1
elif FINN_dt == DataType.BINARY:
elif finn_dt == DataType.BINARY:
tensor_values = np.random.randint(2, size=tensor_shape)
elif FINN_dt == DataType.TERNARY:
elif finn_dt == DataType.TERNARY:
tensor_values = np.random.randint(-1, high=1, size=tensor_shape)
elif FINN_dt == DataType.INT2:
elif finn_dt == DataType.INT2:
tensor_values = np.random.randint(-2, high=1, size=tensor_shape)
else:
raise ValueError("Datatype {} is not supported, no tensor could be generated".format(FINN_dt))
return tensor_values
raise ValueError(
"Datatype {} is not supported, no tensor could be generated".format(finn_dt)
)
# always use float type as container
return tensor_values.astype(np.float32)
......@@ -112,7 +112,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
# for the code generation
weights = np.expand_dims(weights, 0)
weights = numpy_to_hls_code(
weights, DataType.BINARY, "weights", True, True
weights, self.get_weight_datatype(), "weights", True, True
)
# write weights into params.h
......@@ -132,7 +132,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
thresholds = context[inputs]
thresholds = np.expand_dims(thresholds, 0)
thresholds = numpy_to_hls_code(
thresholds, DataType.BINARY, "thresholds", True, True
thresholds, DataType.UINT32, "thresholds", True, True
)
# write weights into thresh.h
......
......@@ -2,11 +2,14 @@ import numpy as np
from onnx import TensorProto, helper
import finn.core.onnx_exec as oxe
import finn.custom_op.xnorpopcount as xp
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.core.utils import interleave_matrix_outer_dim_from_partitions
from finn.core.utils import (
gen_finn_dt_tensor,
interleave_matrix_outer_dim_from_partitions
)
from finn.custom_op.multithreshold import multithreshold
import finn.custom_op.xnorpopcount as xp
def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=None):
......@@ -79,57 +82,176 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non
return model
def prepare_inputs(model, input_tensor, idt):
ishape = model.get_tensor_shape("inp")
input_tensor = (np.asarray(input_tensor, dtype=np.float32)).reshape(*ishape)
return {"inp": input_tensor}
def create_noativation_testcases(idt, wdt, odt):
mh = 8
mw = 8
# generate weights
W = gen_finn_dt_tensor(wdt, [mh, mw])
# generate input data
x = gen_finn_dt_tensor(idt, mw)
# set up layers with different pe and simd
pe_values = [1, int(mh / 2), mh]
simd_values = [1, int(mw / 2), mw]
for pe in pe_values:
for simd in simd_values:
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt)
# prepare input data
input_dict = prepare_inputs(model, x, idt)
# # execute model
# produced = oxe.execute_onnx(model, input_dict)["outp"]
# expected output
if wdt == DataType.BIPOLAR:
W_expected = 2 * W - 1
else:
W_expected = W
if idt == DataType.BIPOLAR:
x_expected = 2 * x - 1
else:
x_expected = x
oshape = model.get_tensor_shape("outp")
y = np.dot(W, x).reshape(oshape)
# XnorMul produces positive outputs only, adjust expectation accordingly
expected = 2 * y - mw
# execute model
produced = oxe.execute_onnx(model, input_dict)["outp"]
assert (produced.reshape(expected.shape) == expected).all()
# no activation cases
# no act -all bipolar
def test_fpgadataflow_fclayer_ibp_wbp_noact():
mh = 4
mw = 4
pe = 4
simd = 4
wdt = idt = DataType.BIPOLAR
odt = DataType.UINT32
# generate weights
W = np.random.randint(2, size=(mh, mw))
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt)
# generate input data
x = np.random.randint(2, size=mw)
ishape = model.get_tensor_shape("inp")
oshape = model.get_tensor_shape("outp")
input_tensor = (np.asarray(x, dtype=np.float32)).reshape(*ishape)
input_dict = {"inp": input_tensor}
produced = oxe.execute_onnx(model, input_dict)["outp"]
# convert to bipolar values
Wb = 2 * W - 1
xb = 2 * x - 1
yb = np.dot(Wb, xb).reshape(oshape.shape)
# XnorMul produces positive outputs only, adjust expectation accordingly
expected = 2 * yb - mw
assert (produced == expected).all()
create_noativation_testcases(idt, wdt, odt)
# no act - all signed
def test_fpgadataflow_fclayer_ibint2_wbint2_noact():
wdt = idt = DataType.INT2
odt = DataType.INT32
create_noativation_testcases(idt, wdt, odt)
# no act - all ternary
def test_fpgadataflow_fclayer_ibt_wbt_noact():
wdt = idt = DataType.TERNARY
odt = DataType.INT32
create_noativation_testcases(idt, wdt, odt)
def test_fpgadataflow_fclayer_all_bipolar():
mh = 4
mw = 4
pe = 4
simd = 4
mh = 8
mw = 8
wdt = idt = odt = DataType.BIPOLAR
tdt = DataType.UINT32
# generate weights
W = np.random.randint(2, size=(mh, mw))
W = gen_finn_dt_tensor(wdt, [mh, mw])
# single global threshold at zero
T = np.zeros((1, 1))
# generate input data
x = np.random.randint(2, size=mw)
x = gen_finn_dt_tensor(idt, mw)
# set up layers with different pe and simd
pe_values = [1, int(mh/2), mh]
simd_values = [1, int(mw/2), mw]
pe_values = [1, int(mh / 2), mh]
simd_values = [1, int(mw / 2), mw]
for pe in pe_values:
for simd in simd_values:
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt)
ishape = model.get_tensor_shape("inp")
input_tensor = (np.asarray(x, dtype=np.float32)).reshape(*ishape)
input_dict = {"inp": input_tensor}
# prepare input data
input_dict = prepare_inputs(model, x, idt)
# execute model
produced = oxe.execute_onnx(model, input_dict)["outp"]
y = xp.xnorpopcountmatmul(W, x.reshape(-1,1))
# expected output
# correction of bipolar values to enable xnorpopcountmutmal
Wb = (W + 1) * 0.5
xb = (x + 1) * 0.5
y = xp.xnorpopcountmatmul(Wb, xb.reshape(-1, 1))
expected = multithreshold(y.reshape(1, mh), T)
assert (produced.reshape(expected.shape) == expected).all()
def test_fpgadataflow_fclayer_all_signed():
mh = 8
mw = 8
wdt = idt = odt = DataType.INT2
tdt = DataType.INT32
# generate weights
W = gen_finn_dt_tensor(wdt, [mh, mw])
# single global threshold at zero
T = np.zeros((1, 1))
# generate input data
x = gen_finn_dt_tensor(idt, mw)
# set up layers with different pe and simd
pe_values = [1, int(mh / 2), mh]
simd_values = [1, int(mw / 2), mw]
for pe in pe_values:
for simd in simd_values:
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt)
# prepare input data
input_dict = prepare_inputs(model, x, idt)
# execute model
produced = oxe.execute_onnx(model, input_dict)["outp"]
# expected output
oshape = model.get_tensor_shape("outp")
y = np.dot(W, x).reshape(oshape.shape)
expected = multithreshold(y.reshape(1, mh), T)
assert (produced.reshape(expected.shape) == expected).all()
def test_fpgadataflow_fclayer_all_ternary():
mh = 8
mw = 8
wdt = idt = odt = DataType.TERNARY
tdt = DataType.INT32
# generate weights
W = gen_finn_dt_tensor(wdt, [mh, mw])
# single global threshold at zero
T = np.zeros((1, 1))
# generate input data
x = gen_finn_dt_tensor(idt, mw)
# set up layers with different pe and simd
pe_values = [1, int(mh / 2), mh]
simd_values = [1, int(mw / 2), mw]
for pe in pe_values:
for simd in simd_values:
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt)
# prepare input data
input_dict = prepare_inputs(model, x, idt)
# execute model
produced = oxe.execute_onnx(model, input_dict)["outp"]
# expected output
oshape = model.get_tensor_shape("outp")
y = np.dot(W, x).reshape(oshape.shape)
expected = multithreshold(y.reshape(1, mh), T)
assert (produced.reshape(expected.shape) == expected).all()
import numpy as np
import finn.core.utils as util
from finn.core.datatype import DataType
def test_FINN_tensor_generator():
def test_finn_tensor_generator():
# bipolar
shape_bp = [2,2]
shape_bp = [2, 2]
dt_bp = DataType.BIPOLAR
tensor_bp = util.gen_FINN_dt_tensor(dt_bp, shape_bp)
tensor_bp = util.gen_finn_dt_tensor(dt_bp, shape_bp)
# test shape
for i in range(len(shape_bp)):
assert shape_bp[i] == tensor_bp.shape[i], """Shape of generated tensor
assert (
shape_bp[i] == tensor_bp.shape[i]
), """Shape of generated tensor
does not match the desired shape"""
# test if elements are FINN datatype
for value in tensor_bp.flatten():
assert dt_bp.allowed(value), """Data type of generated tensor
assert dt_bp.allowed(
value
), """Data type of generated tensor
does not match the desired Data type"""
# binary
shape_b = [4,2,3]
shape_b = [4, 2, 3]
dt_b = DataType.BINARY
tensor_b = util.gen_FINN_dt_tensor(dt_b, shape_b)
tensor_b = util.gen_finn_dt_tensor(dt_b, shape_b)
# test shape
for i in range(len(shape_b)):
assert shape_b[i] == tensor_b.shape[i], """Shape of generated tensor
assert (
shape_b[i] == tensor_b.shape[i]
), """Shape of generated tensor
does not match the desired shape"""
# test if elements are FINN datatype
for value in tensor_b.flatten():
assert dt_b.allowed(value), """Data type of generated tensor
assert dt_b.allowed(
value
), """Data type of generated tensor
does not match the desired Data type"""
# ternary
shape_t = [7,1,3,1]
# ternary
shape_t = [7, 1, 3, 1]
dt_t = DataType.TERNARY
tensor_t = util.gen_FINN_dt_tensor(dt_t, shape_t)
tensor_t = util.gen_finn_dt_tensor(dt_t, shape_t)
# test shape
for i in range(len(shape_t)):
assert shape_t[i] == tensor_t.shape[i], """Shape of generated tensor
assert (
shape_t[i] == tensor_t.shape[i]
), """Shape of generated tensor
does not match the desired shape"""
# test if elements are FINN datatype
for value in tensor_t.flatten():
assert dt_t.allowed(value), """Data type of generated tensor
assert dt_t.allowed(
value
), """Data type of generated tensor
does not match the desired Data type"""
# int2
shape_int2 = [7,4]
# int2
shape_int2 = [7, 4]
dt_int2 = DataType.INT2
tensor_int2 = util.gen_FINN_dt_tensor(dt_int2, shape_int2)
tensor_int2 = util.gen_finn_dt_tensor(dt_int2, shape_int2)
# test shape
for i in range(len(shape_int2)):
assert shape_int2[i] == tensor_int2.shape[i], """Shape of generated tensor
assert (
shape_int2[i] == tensor_int2.shape[i]
), """Shape of generated tensor
does not match the desired shape"""
# test if elements are FINN datatype
for value in tensor_int2.flatten():
assert value in [-2, -1, 0, 1], """Data type of generated tensor
assert value in [
-2,
-1,
0,
1,
], """Data type of generated tensor
does not match the desired Data type"""
#import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment