Skip to content
Snippets Groups Projects
Commit c125ec55 authored by auphelia's avatar auphelia
Browse files

[Test] Changed test layer to streaming fc layer; [Recaftoring] Changed...

[Test] Changed test layer to streaming fc layer; [Recaftoring] Changed generating weights an thresholds
parent 4d6732c6
No related branches found
No related tags found
No related merge requests found
...@@ -38,11 +38,15 @@ class HLSCustomOp(CustomOp): ...@@ -38,11 +38,15 @@ class HLSCustomOp(CustomOp):
self.code_gen_dict = {} self.code_gen_dict = {}
self.tmp_dir = "" self.tmp_dir = ""
self.code_gen_dir = (util.get_by_name(onnx_node.attribute, "code_gen_dir")).s self.code_gen_dir = util.get_by_name(onnx_node.attribute, "code_gen_dir")
self.executable_path = "" self.executable_path = ""
def code_generation(self): def code_generation(self, context):
node = self.onnx_node node = self.onnx_node
if "weights" in context:
self.generate_weights(context)
if "thresh" in context:
self.generate_thresholds(context)
self.global_includes() self.global_includes()
self.defines() self.defines()
self.read_npy_data() self.read_npy_data()
...@@ -62,6 +66,14 @@ class HLSCustomOp(CustomOp): ...@@ -62,6 +66,14 @@ class HLSCustomOp(CustomOp):
f.write(template) f.write(template)
f.close() f.close()
@abstractmethod
def generate_weights(self, context):
pass
@abstractmethod
def generate_thresholds(self, context):
pass
@abstractmethod @abstractmethod
def global_includes(self): def global_includes(self):
pass pass
......
...@@ -148,6 +148,71 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -148,6 +148,71 @@ class StreamingFCLayer_Batch(HLSCustomOp):
assert ret.shape[2] == n_thres_steps assert ret.shape[2] == n_thres_steps
return ret return ret
def generate_weights(self, context):
weights = context["weights"]
# convert weights into hlslib-compatible format
weight_tensor = self.get_hls_compatible_weight_tensor(weights)
export_wdt = self.get_weight_datatype()
# we have converted bipolar weights to binary for export,
# so use it as such for weight generation
if self.get_weight_datatype() == DataType.BIPOLAR:
export_wdt = DataType.BINARY
weight_hls_code = numpy_to_hls_code(
weight_tensor, export_wdt, "weights", True, True
)
# write weights into params.h
f_weights = open("{}/params.h".format(self.tmp_dir), "w")
if export_wdt.bitwidth() != 1:
f_weights.write(
"static FixedPointWeights<{},{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"),
export_wdt.get_hls_datatype_str(),
self.get_nodeattr("PE"),
self.get_nodeattr("WMEM"),
)
)
else:
f_weights.write(
"static BinaryWeights<{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"),
self.get_nodeattr("PE"),
self.get_nodeattr("WMEM"),
)
)
f_weights.write(weight_hls_code)
f_weights.close()
def generate_thresholds(self, context):
thresholds = context["thresh"]
threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)
tdt = DataType.INT32
# use UINT32 threshold export for bipolar times bipolar
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR
if inp_is_bipolar and wt_is_bipolar:
tdt = DataType.UINT32
thresholds_hls_code = numpy_to_hls_code(
threshold_tensor, tdt, "thresholds", False, True
)
# write thresholds into thresh.h
f_thresh = open("{}/thresh.h".format(self.tmp_dir), "w")
tdt_hls = tdt.get_hls_datatype_str()
odt_hls = self.get_output_datatype().get_hls_datatype_str()
f_thresh.write(
"static ThresholdsActivation<{},{},{},{},{},{}> threshs = ".format(
self.get_nodeattr("TMEM"),
self.get_nodeattr("PE"),
threshold_tensor.shape[-1],
tdt_hls,
odt_hls,
self.get_nodeattr("ActVal"),
)
)
f_thresh.write(thresholds_hls_code)
f_thresh.close()
def execute_node(self, context, graph): def execute_node(self, context, graph):
node = self.onnx_node node = self.onnx_node
# make temporary directory for generated files # make temporary directory for generated files
...@@ -180,78 +245,16 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -180,78 +245,16 @@ class StreamingFCLayer_Batch(HLSCustomOp):
context[inputs], context[inputs],
) )
temp_files.append("{}/input_{}.npy".format(self.tmp_dir, in_ind)) temp_files.append("{}/input_{}.npy".format(self.tmp_dir, in_ind))
elif in_ind == 1: elif in_ind > 2:
weights = context[inputs]
# convert weights into hlslib-compatible format
weight_tensor = self.get_hls_compatible_weight_tensor(weights)
export_wdt = self.get_weight_datatype()
# we have converted bipolar weights to binary for export,
# so use it as such for weight generation
if self.get_weight_datatype() == DataType.BIPOLAR:
export_wdt = DataType.BINARY
weight_hls_code = numpy_to_hls_code(
weight_tensor, export_wdt, "weights", True, True
)
# write weights into params.h
f_weights = open("{}/params.h".format(self.tmp_dir), "w")
if export_wdt.bitwidth() != 1:
f_weights.write(
"static FixedPointWeights<{},{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"),
export_wdt.get_hls_datatype_str(),
self.get_nodeattr("PE"),
self.get_nodeattr("WMEM"),
)
)
else:
f_weights.write(
"static BinaryWeights<{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"),
self.get_nodeattr("PE"),
self.get_nodeattr("WMEM"),
)
)
f_weights.write(weight_hls_code)
f_weights.close()
temp_files.append("{}/params.h".format(self.tmp_dir))
elif in_ind == 2:
thresholds = context[inputs]
threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)
tdt = DataType.INT32
# use UINT32 threshold export for bipolar times bipolar
inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR
wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR
if inp_is_bipolar and wt_is_bipolar:
tdt = DataType.UINT32
thresholds_hls_code = numpy_to_hls_code(
threshold_tensor, tdt, "thresholds", False, True
)
# write weights into thresh.h
f_thresh = open("{}/thresh.h".format(self.tmp_dir), "w")
tdt_hls = tdt.get_hls_datatype_str()
odt_hls = self.get_output_datatype().get_hls_datatype_str()
f_thresh.write(
"static ThresholdsActivation<{},{},{},{},{},{}> threshs = ".format(
self.get_nodeattr("TMEM"),
self.get_nodeattr("PE"),
threshold_tensor.shape[-1],
tdt_hls,
odt_hls,
self.get_nodeattr("ActVal"),
)
)
f_thresh.write(thresholds_hls_code)
f_thresh.close()
temp_files.append("{}/thresh.h".format(self.tmp_dir))
else:
raise Exception("Unexpected input found for StreamingFCLayer") raise Exception("Unexpected input found for StreamingFCLayer")
in_ind += 1 in_ind += 1
temp_files.append("{}/params.h".format(self.tmp_dir))
temp_files.append("{}/thresh.h".format(self.tmp_dir))
# code generation # code generation
self.code_generation() self.code_generation(context)
# c++ compilation and execution flow # c++ compilation and execution flow
temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type)) temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
......
...@@ -12,7 +12,7 @@ def code_gen_transformation(node): ...@@ -12,7 +12,7 @@ def code_gen_transformation(node):
# get the path of the code generation directory if already set # get the path of the code generation directory if already set
# check instance and check node attributes for value # check instance and check node attributes for value
code_gen_dir = inst.code_gen_dir code_gen_dir = inst.code_gen_dir
print(code_gen_dir)
# parameter is empty # parameter is empty
if not code_gen_dir: if not code_gen_dir:
print("parameter is empty") print("parameter is empty")
......
import numpy as np import numpy as np
from onnx import TensorProto, helper from onnx import TensorProto, helper
import finn.core.utils as util
import finn.transformation.code_gen_transformation as cg_trafo import finn.transformation.code_gen_transformation as cg_trafo
from finn.core.datatype import DataType from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper from finn.core.modelwrapper import ModelWrapper
def test_code_gen_trafo(): def test_code_gen_trafo():
inp = helper.make_tensor_value_info("in", TensorProto.FLOAT, [2, 2, 4, 4]) idt = wdt = odt = DataType.BIPOLAR
outp = helper.make_tensor_value_info("out", TensorProto.FLOAT, [2, 2, 2, 2]) tdt = DataType.UINT32
mw = 8
MaxPool_batch_node = helper.make_node( mh = 8
"StreamingMaxPool_Batch", pe = 4
["in"], simd = 4
["out"], wmem = mw * mh // (pe * simd)
assert mw * mh == wmem * pe * simd
nf = mh // pe
sf = mw // simd
tmem = nf
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, sf, simd])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, nf, pe])
node_inp_list = ["inp", "weights", "thresh"]
FCLayer_node = helper.make_node(
"StreamingFCLayer_Batch",
node_inp_list,
["outp"],
domain="finn", domain="finn",
backend="fpgadataflow", backend="fpgadataflow",
code_gen_dir="hifch", code_gen_dir="dummy_directory",
executable_path="", executable_path="",
ImgDim=4, resType="ap_resource_lut()",
PoolDim=2, MW=mw,
NumChannels=2, MH=mh,
SIMD=simd,
PE=pe,
WMEM=wmem,
TMEM=tmem,
inputDataType=idt.name,
weightDataType=wdt.name,
outputDataType=odt.name,
) )
graph = helper.make_graph( graph = helper.make_graph(
nodes=[MaxPool_batch_node], nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp]
name="max_pool_batch_graph",
inputs=[inp],
outputs=[outp],
) )
model = helper.make_model(graph, producer_name="finn-hls-onnx-model")
model = ModelWrapper(model)
# set the tensor datatypes (in this case: all to bipolar) model = helper.make_model(graph, producer_name="fclayer-model")
for tensor in graph.input: model = ModelWrapper(model)
model.set_tensor_datatype(tensor.name, DataType["BIPOLAR"])
for tensor in graph.output:
model.set_tensor_datatype(tensor.name, DataType["BIPOLAR"])
input_tensor = np.asarray( model.set_tensor_datatype("inp", idt)
[ model.set_tensor_datatype("outp", odt)
1, model.set_tensor_datatype("weights", wdt)
1, W = util.gen_finn_dt_tensor(wdt, (mh, mw))
1, model.set_initializer("weights", W)
1, model.set_tensor_datatype("thresh", tdt)
1, T = np.zeros((1, 1))
1, model.set_initializer("thresh", T)
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
],
dtype=np.float32,
).reshape(2, 2, 4, 4)
input_dict = {"in": input_tensor}
for nodes in model.graph.node: for nodes in model.graph.node:
cg_trafo.code_gen_transformation(nodes) cg_trafo.code_gen_transformation(nodes)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment