Skip to content
Snippets Groups Projects
Commit 24e15e46 authored by auphelia's avatar auphelia
Browse files

[Merge Conflict] Added both modifications

parents 5c2fb59a 1b6fcc3c
No related branches found
No related tags found
No related merge requests found
import finn.core.utils as util
import finn.custom_op.registry as registry
def res_estimation(model):
"""Estimates the resources needed for the given model.
Returns {node name : resource estimation}"""
res_dict = {}
for node in model.graph.node:
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
op_type = node.op_type
inst = registry.custom_op[op_type](node)
res_dict[node.name] = inst.node_res_estimation()
return res_dict
......@@ -35,6 +35,20 @@ class HLSCustomOp(CustomOp):
"sim_cycles": ("i", False, 0),
}
def node_res_estimation(self):
resources = []
resources.append("BRAMs: " + str(self.bram_estimation()))
resources.append("LUTs: " + str(self.lut_estimation()))
return resources
@abstractmethod
def bram_estimation(self):
pass
@abstractmethod
def lut_estimation(self):
pass
def code_generation_ipgen(self, model, fpgapart, clk):
node = self.onnx_node
......
......@@ -36,6 +36,12 @@ class ConvolutionInputGenerator(HLSCustomOp):
def verify_node(self):
pass
def bram_estimation(self):
pass
def lut_estimation(self):
pass
def get_input_datatype(self):
return DataType[self.get_nodeattr("inputDataType")]
......
import math
import os
import numpy as np
......@@ -146,6 +147,48 @@ class StreamingFCLayer_Batch(HLSCustomOp):
return info_messages
def bram_estimation(self):
"""the calculations are based on:
- FINN-R: An End-to-End Deep-Learning Framework for Fast
Exploration of Quantized Neural Networks
- M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,
Y. Umuroglu, M. Leeser and K. Vissers
- 12. Sep 2018
"""
P = self.get_nodeattr("PE")
Q = self.get_nodeattr("SIMD")
wdt = self.get_weight_datatype()
W = wdt.bitwidth()
D_in = self.get_instream_width()
D_out = self.get_outstream_width()
omega = (D_in * D_out) / (Q * P)
return P * (math.ceil(omega / 512)) * (math.ceil((Q * W) / 36))
def lut_estimation(self):
"""the calculations are based on:
- FINN-R: An End-to-End Deep-Learning Framework for Fast
Exploration of Quantized Neural Networks
- M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,
Y. Umuroglu, M. Leeser and K. Vissers
- 12. Sep 2018
"""
P = self.get_nodeattr("PE")
Q = self.get_nodeattr("SIMD")
wdt = self.get_weight_datatype()
W = wdt.bitwidth()
# determine tdt with input and weight data types
idt = self.get_input_datatype()
if idt == wdt == DataType.BIPOLAR:
tdt = DataType.UINT32
else:
tdt = DataType.INT32
A = tdt.bitwidth()
# parameters from experiments in paper mentioned above
c0 = 300
c1 = 1.1
return c0 + c1 * (P * Q) * (W * A)
def get_input_datatype(self):
return DataType[self.get_nodeattr("inputDataType")]
......
......@@ -69,9 +69,16 @@ class StreamingMaxPool_Batch(HLSCustomOp):
return info_messages
def get_number_output_values(self):
pass
def bram_estimation(self):
pass
def lut_estimation(self):
pass
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
......
from onnx import TensorProto, helper
from finn.analysis.fpgadataflow.res_estimation import res_estimation
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.general import GiveUniqueNodeNames
def check_two_dict_for_equality(dict1, dict2):
for key in dict1:
assert key in dict2, "Key: {} is not in both dictionaries".format(key)
assert (
dict1[key] == dict2[key]
), """Values for key {} are not the same
in both dictionaries""".format(
key
)
return True
def test_res_estimate():
mw = mh = 4
simd = 1
pe = 1
idt = DataType.INT2
wdt = DataType.INT2
odt = DataType.INT32
actval = odt.min()
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, mw])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, mh])
node_inp_list = ["inp", "weights", "thresh"]
FCLayer_node = helper.make_node(
"StreamingFCLayer_Batch",
node_inp_list,
["outp"],
domain="finn",
backend="fpgadataflow",
resType="ap_resource_lut()",
MW=mw,
MH=mh,
SIMD=simd,
PE=pe,
inputDataType=idt.name,
weightDataType=wdt.name,
outputDataType=odt.name,
ActVal=actval,
binaryXnorMode=0,
noActivation=0,
)
graph = helper.make_graph(
nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp]
)
model = helper.make_model(graph, producer_name="fclayer-model")
model = ModelWrapper(model)
model.set_tensor_datatype("inp", idt)
model.set_tensor_datatype("outp", odt)
model.set_tensor_datatype("weights", wdt)
model = model.transform(GiveUniqueNodeNames())
prod_resource_estimation = model.analysis(res_estimation)
expect_resource_estimation = {
"StreamingFCLayer_Batch_0": ["BRAMs: 1", "LUTs: 370.4"]
}
assert check_two_dict_for_equality(
prod_resource_estimation, expect_resource_estimation
), """The produced output of
the resource estimation analysis pass is not equal to the expected one"""
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment