Skip to content
Snippets Groups Projects
Commit 1db41884 authored by auphelia's avatar auphelia
Browse files

[code generation] Added "_npysim" to all involved files

parent 4b5f0d59
No related branches found
No related tags found
No related merge requests found
Showing with 35 additions and 41 deletions
......@@ -42,11 +42,11 @@ class HLSCustomOp(CustomOp):
def get_nodeattr_types(self):
return {
"backend": ("s", True, "fpgadataflow"),
"code_gen_dir": ("s", False, ""),
"code_gen_dir_npysim": ("s", False, ""),
"executable_path": ("s", False, ""),
}
def code_generation(self, model):
def code_generation_npysim(self, model):
node = self.onnx_node
self.generate_params(model)
self.global_includes()
......@@ -63,13 +63,13 @@ class HLSCustomOp(CustomOp):
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
f = open(os.path.join(code_gen_dir, "execute_{}.cpp".format(node.op_type)), "w")
f.write(template)
f.close()
def compile_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
builder = CppBuilder()
# to enable additional debug features please uncommand the next line
# builder.append_includes("-DDEBUG")
......@@ -87,7 +87,7 @@ class HLSCustomOp(CustomOp):
def dynamic_input_to_npy(self, context, count):
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
if code_gen_dir == "":
raise Exception(
"""
......@@ -106,7 +106,7 @@ Found no codegen dir for this node, did you run the codegen transformation?
def npy_to_dynamic_output(self, context):
# TODO support multi-output nodes as needed
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
output = np.load("{}/output.npy".format(code_gen_dir))
context[node.output[0]] = output
......
......@@ -14,9 +14,6 @@ class StreamingFCLayer_Batch(HLSCustomOp):
def get_nodeattr_types(self):
my_attrs = {
# "backend": ("s", True, "fpgadataflow"),
# "code_gen_dir": ("s", True, ""),
# "executable_path": ("s", True, ""),
"PE": ("i", True, 0),
"SIMD": ("i", True, 0),
"MW": ("i", True, 0),
......@@ -91,7 +88,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
# verify that all necessary attributes exist
try:
self.get_nodeattr("code_gen_dir")
self.get_nodeattr("code_gen_dir_npysim")
self.get_nodeattr("executable_path")
self.get_nodeattr("resType")
self.get_nodeattr("MW")
......@@ -109,7 +106,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
info_messages.append(
"""The necessary attributes do not exist.
StreamingFCLayer_Batch needs the following attributes:
code_gen_dir, executable_path, resType, MW, MH, SIMD, PE,
code_gen_dir_npysim, executable_path, resType, MW, MH, SIMD, PE,
inputDataType, weightDataType, outputDataType, ActVal,
binaryXnorMode, noActivation"""
)
......@@ -283,7 +280,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
weight_tensor, export_wdt, "weights", True, True
)
# write weights into params.h
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
f_weights = open("{}/params.h".format(code_gen_dir), "w")
if export_wdt.bitwidth() != 1:
......@@ -324,7 +321,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
threshold_tensor, tdt, "thresholds", False, True
)
# write thresholds into thresh.h
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
f_thresh = open("{}/thresh.h".format(code_gen_dir), "w")
tdt_hls = tdt.get_hls_datatype_str()
# use binary to export bipolar activations
......@@ -357,7 +354,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
nf = mh // pe
# TODO ensure codegen dir exists
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
# create a npy file fore each input of the node (in_ind is input index)
in_ind = 0
for inputs in node.input:
......@@ -419,7 +416,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
]
def read_npy_data(self):
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
dtype = self.get_input_datatype()
if dtype == DataType.BIPOLAR:
# use binary for bipolar storage
......@@ -466,7 +463,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
]
def dataoutstrm(self):
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
dtype = self.get_output_datatype()
if dtype == DataType.BIPOLAR:
# use binary for bipolar storage
......
......@@ -4,9 +4,6 @@ from finn.custom_op.fpgadataflow import HLSCustomOp
class StreamingMaxPool_Batch(HLSCustomOp):
def get_nodeattr_types(self):
my_attrs = {
# "backend": ("s", True, "fpgadataflow"),
# "code_gen_dir": ("s", True, ""),
# "executable_path": ("s", True, ""),
"ImgDim": ("i", True, 0),
"PoolDim": ("i", True, 0),
"NumChannels": ("i", True, 0),
......@@ -51,7 +48,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
# verify that all necessary attributes exist
try:
self.get_nodeattr("code_gen_dir")
self.get_nodeattr("code_gen_dir_npysim")
self.get_nodeattr("executable_path")
self.get_nodeattr("ImgDim")
self.get_nodeattr("PoolDim")
......@@ -61,7 +58,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
info_messages.append(
"""The necessary attributes do not exist.
StreamingMaxPool_Batch needs the following attributes:
code_gen_dir, executable_path, ImgDim, PoolDim, NumChannels"""
code_gen_dir_npysim, executable_path, ImgDim, PoolDim, NumChannels"""
)
# verify the number of inputs
......@@ -89,7 +86,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
def read_npy_data(self):
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
# c++ code to read out an npy file
# and put it in hls::stream in the correct order
self.code_gen_dict["$READNPYDATA$"] = []
......@@ -188,7 +185,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
def save_as_npy(self):
code_gen_dir = self.get_nodeattr("code_gen_dir")
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
numReps = 1
self.code_gen_dict["$SAVEASCNPY$"] = [
"""cnpy::npy_save("{}/output.npy",&output_data_vector[0],
......
......@@ -22,10 +22,10 @@ class CleanUp(Transformation):
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
code_gen_dir = inst.get_nodeattr("code_gen_dir")
code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim")
if os.path.isdir(code_gen_dir):
shutil.rmtree(code_gen_dir)
inst.set_nodeattr("code_gen_dir", "")
inst.set_nodeattr("code_gen_dir_npysim", "")
inst.set_nodeattr("executable_path", "")
except KeyError:
# exception if op_type is not supported
......
......@@ -14,19 +14,19 @@ def _codegen_single_node(node, model):
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# get the path of the code generation directory
code_gen_dir = inst.get_nodeattr("code_gen_dir")
code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim")
# ensure that there is a directory
if code_gen_dir == "" or not os.path.isdir(code_gen_dir):
code_gen_dir = tmp.mkdtemp(prefix="code_gen_" + str(node.op_type) + "_")
inst.set_nodeattr("code_gen_dir", code_gen_dir)
code_gen_dir = tmp.mkdtemp(prefix="code_gen_npysim" + str(node.op_type) + "_")
inst.set_nodeattr("code_gen_dir_npysim", code_gen_dir)
# ensure that there is generated code inside the dir
inst.code_generation(model)
inst.code_generation_npysim(model)
except KeyError:
# exception if op_type is not supported
raise Exception("Custom op_type %s is currently not supported." % op_type)
class CodeGen(Transformation):
class CodeGen_npysim(Transformation):
"""Code generation for all nodes in model"""
def apply(self, model):
......
......@@ -22,7 +22,7 @@ class Compile(Transformation):
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# ensure that code is generated
assert inst.get_nodeattr("code_gen_dir") != ""
assert inst.get_nodeattr("code_gen_dir_npysim") != ""
# call the compilation function for this node
inst.compile_singlenode_code()
# ensure that executable path is now set
......
......@@ -6,7 +6,7 @@ import finn.core.utils as util
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.cleanup import CleanUp
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim
def test_code_gen_trafo():
......@@ -50,7 +50,7 @@ def test_code_gen_trafo():
W = util.gen_finn_dt_tensor(wdt, (mw, mh))
model.set_initializer("weights", W)
model = model.transform(CodeGen())
model = model.transform(CodeGen_npysim())
for node in model.graph.node:
code_gen_attribute = util.get_by_name(node.attribute, "code_gen_dir")
tmp_dir = code_gen_attribute.s.decode("UTF-8")
......
......@@ -6,7 +6,7 @@ import finn.core.utils as util
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.cleanup import CleanUp
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim
from finn.transformation.fpgadataflow.compile import Compile
......@@ -51,7 +51,7 @@ def test_compilation_trafo():
W = util.gen_finn_dt_tensor(wdt, (mw, mh))
model.set_initializer("weights", W)
model = model.transform(CodeGen())
model = model.transform(CodeGen_npysim())
model = model.transform(Compile())
for node in model.graph.node:
compilation_attribute = util.get_by_name(node.attribute, "executable_path")
......
......@@ -15,7 +15,7 @@ from finn.core.modelwrapper import ModelWrapper
from finn.custom_op.fpgadataflow.streamingfclayer_batch import StreamingFCLayer_Batch
from finn.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim
from finn.transformation.fpgadataflow.compile import Compile
from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
from finn.transformation.infer_shapes import InferShapes
......@@ -82,7 +82,7 @@ def test_convert_to_hls_layers_lfc_w1a1():
fc3w.set_nodeattr("SIMD", 1024)
fc3w.set_nodeattr("PE", 10)
model = model.transform(CodeGen())
model = model.transform(CodeGen_npysim())
model = model.transform(Compile())
raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
......
......@@ -10,7 +10,7 @@ from finn.core.modelwrapper import ModelWrapper
from finn.core.utils import calculate_signed_dot_prod_range, gen_finn_dt_tensor
from finn.custom_op.multithreshold import multithreshold
from finn.transformation.fpgadataflow.cleanup import CleanUp
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim
from finn.transformation.fpgadataflow.compile import Compile
......@@ -147,7 +147,7 @@ def test_fpgadataflow_fclayer(idt, wdt, act, nf, sf, mw, mh):
else:
tdt = DataType.INT32
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt)
model = model.transform(CodeGen())
model = model.transform(CodeGen_npysim())
model = model.transform(Compile())
# prepare input data
input_dict = prepare_inputs(x, idt, wdt)
......
......@@ -5,7 +5,7 @@ import finn.core.onnx_exec as oxe
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.cleanup import CleanUp
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim
from finn.transformation.fpgadataflow.compile import Compile
......@@ -112,7 +112,7 @@ def test_layer_streaming_maxpool_batch():
).reshape(2, 2, 4, 4)
print(input_tensor)
model = model.transform(CodeGen())
model = model.transform(CodeGen_npysim())
model = model.transform(Compile())
input_dict = {"in": input_tensor}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment