Skip to content
Snippets Groups Projects
Commit 2ad5f505 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

Merge branch 'dev' into feature/ip_stitching

parents 84697bd9 b42e6acd
No related branches found
No related tags found
No related merge requests found
......@@ -5,6 +5,7 @@
| Export/Import | x | x | x | | |
| Streamlining | x | x | | | |
| Convert to HLS layers | x | | | | |
| hlslib simulation | x | | | | |
| Monolithic HLS generation | | | | | |
| npysim | x | | | | |
| Stitched IPI design | | | | | |
| rtlsim | | | | | |
| Hardware demo | | | | | |
This diff is collapsed.
import finn.backend.fpgadataflow.layers as ly
def strm_decl(model, code_gen_dict):
num_FIFOs = get_num_of_FIFOs(model)
code_gen_dict["stream_declarations"] = []
FIFO_ind = 1
for node in model.graph.node:
if node.op_type == "FIFO":
name = node.name
if FIFO_ind == 1:
code_gen_dict["stream_declarations"].append(
'hls::stream<ap_uint<L{}_SIMD>> {}("DoCompute.{}");'.format(
FIFO_ind - 1, name, name
)
)
# TO DO: check if elif and else path can be summarized
elif FIFO_ind == num_FIFOs:
code_gen_dict["stream_declarations"].append(
'hls::stream<ap_uint<L{}_PE>> {}("DoCompute.{}");'.format(
FIFO_ind - 2, name, name
)
)
else:
code_gen_dict["stream_declarations"].append(
"hls::stream<ap_uint<L{}_PE * (L{}_AP + L{}_APF)>> "
'{}("DoCompute.{}");'.format(
FIFO_ind - 2, FIFO_ind - 2, FIFO_ind - 2, name, name
)
)
FIFO_ind += 1
def get_num_of_FIFOs(model):
i = 0
for node in model.graph.node:
if node.op_type == "FIFO":
i += 1
return i
def strm_prgm(model, code_gen_dict):
code_gen_dict["stream_pragmas"] = ["#pragma HLS DATAFLOW"]
for node in model.graph.node:
if node.op_type == "FIFO":
name = node.name
# TO DO: FIFOs have only one attribute, at the moment
# if there are more, change here
depth = node.attribute[0].i
code_gen_dict["stream_pragmas"].append(
"#pragma HLS stream depth={} variable={}".format(depth, name)
)
def computation_cmds(model, all_strmfcl, code_gen_dict):
code_gen_dict["compute"] = []
for i in range(len(all_strmfcl)):
consumer = model.find_consumer(all_strmfcl[i].output)
output_name = consumer.output[0]
code_gen_dict["compute"].append(
"{}<L{}_MW, L{}_MH, L{}_SIMD, L{}_PE, {}> "
"({}, {}, {}, {}, numReps, {});".format(
all_strmfcl[i].op_type,
i,
i,
i,
i,
all_strmfcl[i].resDataType,
all_strmfcl[i].input,
output_name,
all_strmfcl[i].weights,
all_strmfcl[i].thresholds,
all_strmfcl[i].resType,
)
)
def config_cmds(model, code_gen_dict):
all_strmfcl = []
code_gen_dict["config"] = []
# TO DO: Find out values and add them to get_layer_parameters()
WPI = 1
WPF = 0
APF = 0
i = -1
for node in model.graph.node:
if node.op_type == "StreamingFCLayer_Batch":
i += 1
layer = ly.StreamingFCLayer_Batch(node, model)
code_gen_dict["config"].append(
"#define L{}_SIMD {} \n "
"#define L{}_PE {} \n "
"#define L{}_WMEM {} \n "
"#define L{}_TMEM {} \n "
"#define L{}_MW {} \n "
"#define L{}_MH {} \n "
"#define L{}_WPI {} \n "
"#define L{}_API {} \n "
"#define L{}_WPF {} \n "
"#define L{}_APF {} \n ".format(
i,
layer.SIMD,
i,
layer.PE,
i,
layer.WMEM,
i,
layer.TMEM,
i,
layer.MW,
i,
layer.MH,
i,
WPI,
i,
layer.API,
i,
WPF,
i,
APF,
)
)
all_strmfcl.append(layer)
return all_strmfcl
def code_generation(model):
code_gen_dict = {}
# config commands
all_strmfcl = config_cmds(model, code_gen_dict)
# stream declarations
strm_decl(model, code_gen_dict)
# stream pragmas
strm_prgm(model, code_gen_dict)
# computation commands
computation_cmds(model, all_strmfcl, code_gen_dict)
# print(code_gen_dict)
return code_gen_dict
class StreamingFCLayer_Batch:
def __init__(self, node, model):
self.op_type = "StreamingFCLayer_Batch"
# Layer attributes
num_attr = len(node.attribute)
for k in range(num_attr):
if node.attribute[k].name == "PE":
self.PE = node.attribute[k].i
if node.attribute[k].name == "SIMD":
self.SIMD = node.attribute[k].i
if node.attribute[k].name == "MH":
self.MH = node.attribute[k].i
if node.attribute[k].name == "MW":
self.MW = node.attribute[k].i
if node.attribute[k].name == "resDataType":
self.resDataType = node.attribute[k].s.decode("utf-8")
if node.attribute[k].name == "resType":
self.resType = node.attribute[k].s.decode("utf-8")
# get input and output names
self.input = node.input[0]
self.weights = node.input[1]
self.thresholds = node.input[2]
self.output = node.output[0]
# get other parameters
weights_shape = model.get_tensor_shape(self.weights)
thresholds_shape = model.get_tensor_shape(self.thresholds)
self.WMEM = weights_shape[2]
self.TMEM = thresholds_shape[0]
self.API = thresholds_shape[2]
def get_PE(self):
return self.PE
def get_SIMD(self):
return self.SIMD
def get_MH(self):
return self.MH
def get_MW(self):
return self.MW
def get_resDataType(self):
return self.resDataType
def get_resType(self):
return self.resType
def get_WMEM(self):
return self.WMEM
def get_TMEM(self):
return self.TMEM
def get_API(self):
return self.API
def get_input_name(self):
return self.input
def get_weights_name(self):
return self.weights
def get_thresholds_name(self):
return self.thresholds
def get_output_name(self):
return self.output
from pkgutil import get_data
import finn.backend.fpgadataflow.code_gen as cg
from finn.core.modelwrapper import ModelWrapper
def test_code_generation():
# load the onnx model
raw_m = get_data("finn", "data/onnx/finn-hls-model/finn-hls-onnx-model.onnx")
model = ModelWrapper(raw_m)
code_gen_dict = cg.code_generation(model)
print(code_gen_dict)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment