Skip to content
Snippets Groups Projects
Commit d6bf1d2b authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[Refactor] access customop node instance through member variable

parent f5fbd0bc
No related branches found
No related tags found
No related merge requests found
......@@ -8,7 +8,7 @@ def execute_custom_node(node, context, graph):
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
inst.execute_node(node, context, graph)
inst.execute_node(context, graph)
except KeyError:
# exception if op_type is not supported
raise Exception("Custom op_type %s is currently not supported." % op_type)
......@@ -8,13 +8,13 @@ class CustomOp(ABC):
# TODO consider specifying a list of allowed attributes
@abstractmethod
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
pass
@abstractmethod
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
pass
@abstractmethod
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
pass
......@@ -38,18 +38,19 @@ class HLSCustomOp(CustomOp):
self.tmp_dir = " "
@abstractmethod
def get_attributes(self, node):
def get_attributes(self):
pass
def code_generation(self, node):
self.get_attributes(node)
self.global_includes(node)
self.defines(node)
self.read_npy_data(node)
self.strm_decl(node)
self.docompute(node)
self.dataoutstrm(node)
self.save_as_npy(node)
def code_generation(self):
node = self.onnx_node
self.get_attributes()
self.global_includes()
self.defines()
self.read_npy_data()
self.strm_decl()
self.docompute()
self.dataoutstrm()
self.save_as_npy()
template = self.docompute_template
......@@ -63,29 +64,29 @@ class HLSCustomOp(CustomOp):
f.close()
@abstractmethod
def global_includes(self, node):
def global_includes(self):
pass
@abstractmethod
def defines(self, node):
def defines(self):
pass
@abstractmethod
def read_npy_data(self, node):
def read_npy_data(self):
pass
@abstractmethod
def strm_decl(self, node):
def strm_decl(self):
pass
@abstractmethod
def docompute(self, node):
def docompute(self):
pass
@abstractmethod
def dataoutstrm(self, node):
def dataoutstrm(self):
pass
@abstractmethod
def save_as_npy(self, node):
def save_as_npy(self):
pass
......@@ -16,13 +16,14 @@ class StreamingFCLayer_Batch(HLSCustomOp):
self.WMEM = 0
self.TMEM = 0
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
pass
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
pass
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
node = self.onnx_node
# make temporary directory for generated files
self.tmp_dir = tmp.mkdtemp(prefix=str(node.op_type) + "_")
......@@ -31,7 +32,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
temp_files = []
# get attributes for correct packing of weights and thresholds
self.get_attributes(node)
self.get_attributes()
# create a npy file fore each input of the node (in_ind is input index)
in_ind = 0
......@@ -89,7 +90,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
in_ind += 1
# code generation
self.code_generation(node)
self.code_generation()
# c++ compilation and execution flow
temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
......@@ -114,7 +115,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
# for temp_file in temp_files:
# os.remove(temp_file)
def get_attributes(self, node):
def get_attributes(self):
node = self.onnx_node
self.resType = utils.get_by_name(node.attribute, "resType").s.decode("utf-8")
self.MW = utils.get_by_name(node.attribute, "MW").i
self.MH = utils.get_by_name(node.attribute, "MH").i
......@@ -124,7 +126,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
"utf-8"
)
def global_includes(self, node):
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = '#include "weights.hpp" \n'
self.code_gen_dict["$GLOBALS$"] += '#include "activations.hpp" \n'
if self.WMEM != 0:
......@@ -134,7 +136,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
# TODO find a better way of checking for no pregenerated thresholds
self.code_gen_dict["$GLOBALS$"] += '#include "thresh.h" \n'
def defines(self, node):
def defines(self):
numReps = 2
self.code_gen_dict["$DEFINES$"] = [
"""#define MW1 {}\n #define MH1 {}\n #define SIMD1 {}\n
......@@ -144,7 +146,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
)
]
def read_npy_data(self, node):
def read_npy_data(self):
# c++ code to read out an npy file
# and put it in hls::stream in the correct order
self.code_gen_dict["$READNPYDATA$"] = []
......@@ -175,7 +177,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
self.code_gen_dict["$READNPYDATA$"].append("in0 << dat0;")
self.code_gen_dict["$READNPYDATA$"].append("}")
def strm_decl(self, node):
def strm_decl(self):
self.code_gen_dict["$STREAMDECLARATIONS$"] = []
self.code_gen_dict["$STREAMDECLARATIONS$"].append(
'hls::stream<ap_uint<{}>> in0 ("in0");'.format(self.SIMD)
......@@ -184,7 +186,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
'hls::stream<ap_uint<{}>> out ("out");'.format(self.PE)
)
def docompute(self, node):
def docompute(self):
node = self.onnx_node
self.code_gen_dict["$DOCOMPUTE$"] = [
"""{}<MW1, MH1, SIMD1, PE1, {}>
(in0, out, weights, threshs, numReps, {});""".format(
......@@ -192,7 +195,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
)
]
def dataoutstrm(self, node):
def dataoutstrm(self):
self.code_gen_dict["$DATAOUTSTREAM$"] = [
"ap_uint<{}> out_data;\n std::vector<ap_uint<{}>> out_data_vector;".format(
self.PE, self.PE
......@@ -222,7 +225,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
)
self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
def save_as_npy(self, node):
def save_as_npy(self):
self.code_gen_dict["$SAVEASCNPY$"] = [
"""cnpy::npy_save("{}/output.npy",&output_data_vector[0],
{{1,{},{}}},"w");""".format(
......
......@@ -9,13 +9,14 @@ from finn.custom_op.fpgadataflow import HLSCustomOp
class StreamingMaxPool(HLSCustomOp):
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
pass
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
pass
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
node = self.onnx_node
# make temporary directory for generated files
self.tmp_dir = tmp.mkdtemp(prefix=str(node.op_type) + "_")
......@@ -34,7 +35,7 @@ class StreamingMaxPool(HLSCustomOp):
in_ind += 1
# code generation
self.code_generation(node)
self.code_generation()
# c++ compilation and execution flow
temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
......@@ -59,22 +60,24 @@ class StreamingMaxPool(HLSCustomOp):
# for temp_file in temp_files:
# os.remove(temp_file)
def get_attributes(self, node):
def get_attributes(self):
node = self.onnx_node
self.ImgDim = get_by_name(node.attribute, "ImgDim").i
self.PoolDim = get_by_name(node.attribute, "PoolDim").i
self.NumChannels = get_by_name(node.attribute, "NumChannels").i
def global_includes(self, node):
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
def defines(self, node):
def defines(self):
self.code_gen_dict["$DEFINES$"] = [
"#define ImgDim {}\n #define PoolDim {}\n #define NumChannels {}".format(
self.ImgDim, self.PoolDim, self.NumChannels
)
]
def read_npy_data(self, node):
def read_npy_data(self):
node = self.onnx_node
# c++ code to read out an npy file
# and put it in hls::stream in the correct order
self.code_gen_dict["$READNPYDATA$"] = []
......@@ -113,7 +116,8 @@ class StreamingMaxPool(HLSCustomOp):
self.code_gen_dict["$READNPYDATA$"].append("}")
input_ind += 1
def strm_decl(self, node):
def strm_decl(self):
node = self.onnx_node
self.code_gen_dict["$STREAMDECLARATIONS$"] = []
input_ind = 0
for inputs in node.input:
......@@ -127,12 +131,13 @@ class StreamingMaxPool(HLSCustomOp):
'hls::stream<ap_uint<{}>> out ("out");'.format(self.NumChannels)
)
def docompute(self, node):
def docompute(self):
node = self.onnx_node
self.code_gen_dict["$DOCOMPUTE$"] = [
"{}<ImgDim, PoolDim, NumChannels>(in0, out);".format(node.op_type)
]
def dataoutstrm(self, node):
def dataoutstrm(self):
self.code_gen_dict["$DATAOUTSTREAM$"] = [
"ap_uint<{}> out_data;\n std::vector<ap_uint<{}>> out_data_vector;".format(
self.NumChannels, self.NumChannels
......@@ -162,7 +167,7 @@ class StreamingMaxPool(HLSCustomOp):
)
self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
def save_as_npy(self, node):
def save_as_npy(self):
self.code_gen_dict["$SAVEASCNPY$"] = [
"""cnpy::npy_save("{}/output.npy",&output_data_vector[0],
{{{},{},{}}},"w");""".format(
......
......@@ -9,13 +9,14 @@ from finn.custom_op.fpgadataflow import HLSCustomOp
class StreamingMaxPool_Batch(HLSCustomOp):
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
pass
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
pass
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
node = self.onnx_node
# make temporary directory for generated files
self.tmp_dir = tmp.mkdtemp(prefix=str(node.op_type) + "_")
......@@ -34,7 +35,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
in_ind += 1
# code generation
self.code_generation(node)
self.code_generation()
# c++ compilation and execution flow
temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
......@@ -59,15 +60,16 @@ class StreamingMaxPool_Batch(HLSCustomOp):
# for temp_file in temp_files:
# os.remove(temp_file)
def get_attributes(self, node):
def get_attributes(self):
node = self.onnx_node
self.ImgDim = get_by_name(node.attribute, "ImgDim").i
self.PoolDim = get_by_name(node.attribute, "PoolDim").i
self.NumChannels = get_by_name(node.attribute, "NumChannels").i
def global_includes(self, node):
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
def defines(self, node):
def defines(self):
numReps = 2
self.code_gen_dict["$DEFINES$"] = [
"""#define ImgDim {}\n #define PoolDim {}\n
......@@ -76,7 +78,8 @@ class StreamingMaxPool_Batch(HLSCustomOp):
)
]
def read_npy_data(self, node):
def read_npy_data(self):
node = self.onnx_node
# c++ code to read out an npy file
# and put it in hls::stream in the correct order
self.code_gen_dict["$READNPYDATA$"] = []
......@@ -115,7 +118,8 @@ class StreamingMaxPool_Batch(HLSCustomOp):
self.code_gen_dict["$READNPYDATA$"].append("}")
input_ind += 1
def strm_decl(self, node):
def strm_decl(self):
node = self.onnx_node
self.code_gen_dict["$STREAMDECLARATIONS$"] = []
input_ind = 0
for inputs in node.input:
......@@ -129,12 +133,13 @@ class StreamingMaxPool_Batch(HLSCustomOp):
'hls::stream<ap_uint<{}>> out ("out");'.format(self.NumChannels)
)
def docompute(self, node):
def docompute(self):
node = self.onnx_node
self.code_gen_dict["$DOCOMPUTE$"] = [
"{}<ImgDim, PoolDim, NumChannels>(in0, out, numReps);".format(node.op_type)
]
def dataoutstrm(self, node):
def dataoutstrm(self):
self.code_gen_dict["$DATAOUTSTREAM$"] = [
"ap_uint<{}> out_data;\n std::vector<ap_uint<{}>> out_data_vector;".format(
self.NumChannels, self.NumChannels
......@@ -164,7 +169,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
)
self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
def save_as_npy(self, node):
def save_as_npy(self):
numReps = 2
self.code_gen_dict["$SAVEASCNPY$"] = [
"""cnpy::npy_save("{}/output.npy",&output_data_vector[0],
......
......@@ -58,10 +58,12 @@ def multithreshold(v, thresholds, out_scale=None, out_bias=None):
class MultiThreshold(CustomOp):
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
node = self.onnx_node
return helper.make_node("Relu", [node.input[0]], [node.output[0]])
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
node = self.onnx_node
try:
odt = get_by_name(node.attribute, "out_dtype").s.decode("utf-8")
model.set_tensor_datatype(node.output[0], DataType[odt])
......@@ -72,7 +74,8 @@ class MultiThreshold(CustomOp):
odtype = DataType.get_smallest_possible(n_thres)
model.set_tensor_datatype(node.output[0], odtype)
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
node = self.onnx_node
# save inputs
v = context[node.input[0]]
thresholds = context[node.input[1]]
......
......@@ -31,19 +31,22 @@ def xnorpopcountmatmul(inp0, inp1):
class XnorPopcountMatMul(CustomOp):
def make_shape_compatible_op(self, node):
def make_shape_compatible_op(self):
node = self.onnx_node
return helper.make_node(
"MatMul", [node.input[0], node.input[1]], [node.output[0]]
)
def infer_node_datatype(self, node, model):
def infer_node_datatype(self, model):
node = self.onnx_node
# ensure inputs are binary
assert model.get_tensor_datatype(node.input[0]) == DataType["BINARY"]
assert model.get_tensor_datatype(node.input[1]) == DataType["BINARY"]
# XNOR-popcount produces unsigned integers, assume uint32
model.set_tensor_datatype(node.output[0], DataType["UINT32"])
def execute_node(self, node, context, graph):
def execute_node(self, context, graph):
node = self.onnx_node
# save inputs
inp0 = context[node.input[0]]
inp1 = context[node.input[1]]
......
......@@ -14,7 +14,7 @@ def _infer_node_datatype(model, node):
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
inst.infer_node_datatype(node, model)
inst.infer_node_datatype(model)
except KeyError:
# exception if op_type is not supported
raise Exception("Custom op_type %s is currently not supported." % op_type)
......
......@@ -13,7 +13,7 @@ def _make_shape_compatible_op(node):
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
return inst.make_shape_compatible_op(node)
return inst.make_shape_compatible_op()
except KeyError:
# exception if op_type is not supported
raise Exception("Custom op_type %s is currently not supported." % op_type)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment