diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index f275263a4ed094516250b79adaac4ce87d896384..c0d5d6cc1984ab3d73bd5cb9c3bcfea5aab4773e 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -42,11 +42,11 @@ class HLSCustomOp(CustomOp): def get_nodeattr_types(self): return { "backend": ("s", True, "fpgadataflow"), - "code_gen_dir": ("s", False, ""), + "code_gen_dir_npysim": ("s", False, ""), "executable_path": ("s", False, ""), } - def code_generation(self, model): + def code_generation_npysim(self, model): node = self.onnx_node self.generate_params(model) self.global_includes() @@ -63,13 +63,13 @@ class HLSCustomOp(CustomOp): # transform list into long string separated by '\n' code_gen_line = "\n".join(self.code_gen_dict[key]) template = template.replace(key, code_gen_line) - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") f = open(os.path.join(code_gen_dir, "execute_{}.cpp".format(node.op_type)), "w") f.write(template) f.close() def compile_singlenode_code(self): - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") builder = CppBuilder() # to enable additional debug features please uncommand the next line # builder.append_includes("-DDEBUG") @@ -87,7 +87,7 @@ class HLSCustomOp(CustomOp): def dynamic_input_to_npy(self, context, count): node = self.onnx_node - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") if code_gen_dir == "": raise Exception( """ @@ -106,7 +106,7 @@ Found no codegen dir for this node, did you run the codegen transformation? def npy_to_dynamic_output(self, context): # TODO support multi-output nodes as needed node = self.onnx_node - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") output = np.load("{}/output.npy".format(code_gen_dir)) context[node.output[0]] = output diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index 975da666f0ba728f542b06865aaa2c66c5f07c07..081020fffa3e8356124465272287399aca753f47 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -14,9 +14,6 @@ class StreamingFCLayer_Batch(HLSCustomOp): def get_nodeattr_types(self): my_attrs = { - # "backend": ("s", True, "fpgadataflow"), - # "code_gen_dir": ("s", True, ""), - # "executable_path": ("s", True, ""), "PE": ("i", True, 0), "SIMD": ("i", True, 0), "MW": ("i", True, 0), @@ -91,7 +88,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): # verify that all necessary attributes exist try: - self.get_nodeattr("code_gen_dir") + self.get_nodeattr("code_gen_dir_npysim") self.get_nodeattr("executable_path") self.get_nodeattr("resType") self.get_nodeattr("MW") @@ -109,7 +106,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): info_messages.append( """The necessary attributes do not exist. StreamingFCLayer_Batch needs the following attributes: - code_gen_dir, executable_path, resType, MW, MH, SIMD, PE, + code_gen_dir_npysim, executable_path, resType, MW, MH, SIMD, PE, inputDataType, weightDataType, outputDataType, ActVal, binaryXnorMode, noActivation""" ) @@ -283,7 +280,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): weight_tensor, export_wdt, "weights", True, True ) # write weights into params.h - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") f_weights = open("{}/params.h".format(code_gen_dir), "w") if export_wdt.bitwidth() != 1: @@ -324,7 +321,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): threshold_tensor, tdt, "thresholds", False, True ) # write thresholds into thresh.h - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") f_thresh = open("{}/thresh.h".format(code_gen_dir), "w") tdt_hls = tdt.get_hls_datatype_str() # use binary to export bipolar activations @@ -357,7 +354,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): nf = mh // pe # TODO ensure codegen dir exists - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") # create a npy file fore each input of the node (in_ind is input index) in_ind = 0 for inputs in node.input: @@ -419,7 +416,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): ] def read_npy_data(self): - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") dtype = self.get_input_datatype() if dtype == DataType.BIPOLAR: # use binary for bipolar storage @@ -466,7 +463,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): ] def dataoutstrm(self): - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") dtype = self.get_output_datatype() if dtype == DataType.BIPOLAR: # use binary for bipolar storage diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 92f499b6771efe4455e7259a8eb62ab9c636cb1f..d331ffe04eb2ea4c445ee4fe1148eed262b23aa7 100644 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -4,9 +4,6 @@ from finn.custom_op.fpgadataflow import HLSCustomOp class StreamingMaxPool_Batch(HLSCustomOp): def get_nodeattr_types(self): my_attrs = { - # "backend": ("s", True, "fpgadataflow"), - # "code_gen_dir": ("s", True, ""), - # "executable_path": ("s", True, ""), "ImgDim": ("i", True, 0), "PoolDim": ("i", True, 0), "NumChannels": ("i", True, 0), @@ -51,7 +48,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): # verify that all necessary attributes exist try: - self.get_nodeattr("code_gen_dir") + self.get_nodeattr("code_gen_dir_npysim") self.get_nodeattr("executable_path") self.get_nodeattr("ImgDim") self.get_nodeattr("PoolDim") @@ -61,7 +58,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): info_messages.append( """The necessary attributes do not exist. StreamingMaxPool_Batch needs the following attributes: - code_gen_dir, executable_path, ImgDim, PoolDim, NumChannels""" + code_gen_dir_npysim, executable_path, ImgDim, PoolDim, NumChannels""" ) # verify the number of inputs @@ -89,7 +86,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): def read_npy_data(self): node = self.onnx_node - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") # c++ code to read out an npy file # and put it in hls::stream in the correct order self.code_gen_dict["$READNPYDATA$"] = [] @@ -188,7 +185,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): self.code_gen_dict["$DATAOUTSTREAM$"].append("}") def save_as_npy(self): - code_gen_dir = self.get_nodeattr("code_gen_dir") + code_gen_dir = self.get_nodeattr("code_gen_dir_npysim") numReps = 1 self.code_gen_dict["$SAVEASCNPY$"] = [ """cnpy::npy_save("{}/output.npy",&output_data_vector[0], diff --git a/src/finn/transformation/fpgadataflow/cleanup.py b/src/finn/transformation/fpgadataflow/cleanup.py index 1632d3443a3bf79e55a4b877ae182964ff7caaed..d0090f3acd6b4a2cfc47c32ab06bd6885e0feb88 100644 --- a/src/finn/transformation/fpgadataflow/cleanup.py +++ b/src/finn/transformation/fpgadataflow/cleanup.py @@ -22,10 +22,10 @@ class CleanUp(Transformation): try: # lookup op_type in registry of CustomOps inst = registry.custom_op[op_type](node) - code_gen_dir = inst.get_nodeattr("code_gen_dir") + code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim") if os.path.isdir(code_gen_dir): shutil.rmtree(code_gen_dir) - inst.set_nodeattr("code_gen_dir", "") + inst.set_nodeattr("code_gen_dir_npysim", "") inst.set_nodeattr("executable_path", "") except KeyError: # exception if op_type is not supported diff --git a/src/finn/transformation/fpgadataflow/codegen.py b/src/finn/transformation/fpgadataflow/codegen_npysim.py similarity index 81% rename from src/finn/transformation/fpgadataflow/codegen.py rename to src/finn/transformation/fpgadataflow/codegen_npysim.py index 84078d90a573faf4d014c5e280e22e41061f0aff..45a21b8b188ff05b81250ddbf88405b365ef311b 100644 --- a/src/finn/transformation/fpgadataflow/codegen.py +++ b/src/finn/transformation/fpgadataflow/codegen_npysim.py @@ -14,19 +14,19 @@ def _codegen_single_node(node, model): # lookup op_type in registry of CustomOps inst = registry.custom_op[op_type](node) # get the path of the code generation directory - code_gen_dir = inst.get_nodeattr("code_gen_dir") + code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim") # ensure that there is a directory if code_gen_dir == "" or not os.path.isdir(code_gen_dir): - code_gen_dir = tmp.mkdtemp(prefix="code_gen_" + str(node.op_type) + "_") - inst.set_nodeattr("code_gen_dir", code_gen_dir) + code_gen_dir = tmp.mkdtemp(prefix="code_gen_npysim" + str(node.op_type) + "_") + inst.set_nodeattr("code_gen_dir_npysim", code_gen_dir) # ensure that there is generated code inside the dir - inst.code_generation(model) + inst.code_generation_npysim(model) except KeyError: # exception if op_type is not supported raise Exception("Custom op_type %s is currently not supported." % op_type) -class CodeGen(Transformation): +class CodeGen_npysim(Transformation): """Code generation for all nodes in model""" def apply(self, model): diff --git a/src/finn/transformation/fpgadataflow/compile.py b/src/finn/transformation/fpgadataflow/compile.py index 37df1c61dfc101111b1ab8623dcee9a5f1697489..c4f6a1a094f0963845440280add791f2a349ba9d 100644 --- a/src/finn/transformation/fpgadataflow/compile.py +++ b/src/finn/transformation/fpgadataflow/compile.py @@ -22,7 +22,7 @@ class Compile(Transformation): # lookup op_type in registry of CustomOps inst = registry.custom_op[op_type](node) # ensure that code is generated - assert inst.get_nodeattr("code_gen_dir") != "" + assert inst.get_nodeattr("code_gen_dir_npysim") != "" # call the compilation function for this node inst.compile_singlenode_code() # ensure that executable path is now set diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py index 533710605e2bc514ba0fb0c8784c378d07451951..308f2c3b278fd9ef56d413ce94775a35e8240101 100644 --- a/tests/fpgadataflow/test_code_gen_trafo.py +++ b/tests/fpgadataflow/test_code_gen_trafo.py @@ -6,7 +6,7 @@ import finn.core.utils as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper from finn.transformation.fpgadataflow.cleanup import CleanUp -from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim def test_code_gen_trafo(): @@ -50,7 +50,7 @@ def test_code_gen_trafo(): W = util.gen_finn_dt_tensor(wdt, (mw, mh)) model.set_initializer("weights", W) - model = model.transform(CodeGen()) + model = model.transform(CodeGen_npysim()) for node in model.graph.node: code_gen_attribute = util.get_by_name(node.attribute, "code_gen_dir") tmp_dir = code_gen_attribute.s.decode("UTF-8") diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py index f84ce34b54b3496f7e277e55ac574124e09c25d3..d6e5d3f111d4c0595305fc54653b925bdefc2157 100644 --- a/tests/fpgadataflow/test_compilation_trafo.py +++ b/tests/fpgadataflow/test_compilation_trafo.py @@ -6,7 +6,7 @@ import finn.core.utils as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper from finn.transformation.fpgadataflow.cleanup import CleanUp -from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -51,7 +51,7 @@ def test_compilation_trafo(): W = util.gen_finn_dt_tensor(wdt, (mw, mh)) model.set_initializer("weights", W) - model = model.transform(CodeGen()) + model = model.transform(CodeGen_npysim()) model = model.transform(Compile()) for node in model.graph.node: compilation_attribute = util.get_by_name(node.attribute, "executable_path") diff --git a/tests/fpgadataflow/test_convert_to_hls_layers.py b/tests/fpgadataflow/test_convert_to_hls_layers.py index 32792e1364229199286a7012105442f3bbfb05df..21dda4481ae7f6cfbb46b422045eb37e4e3db3a3 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers.py @@ -15,7 +15,7 @@ from finn.core.modelwrapper import ModelWrapper from finn.custom_op.fpgadataflow.streamingfclayer_batch import StreamingFCLayer_Batch from finn.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from finn.transformation.fold_constants import FoldConstants -from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from finn.transformation.infer_shapes import InferShapes @@ -82,7 +82,7 @@ def test_convert_to_hls_layers_lfc_w1a1(): fc3w.set_nodeattr("SIMD", 1024) fc3w.set_nodeattr("PE", 10) - model = model.transform(CodeGen()) + model = model.transform(CodeGen_npysim()) model = model.transform(Compile()) raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_fclayer.py index 0df66c4af2cfbadead8e95322c433cf69d4d2715..00ba44e6fd102e6663ec2a9308534d17fb6f778a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py +++ b/tests/fpgadataflow/test_fpgadataflow_fclayer.py @@ -10,7 +10,7 @@ from finn.core.modelwrapper import ModelWrapper from finn.core.utils import calculate_signed_dot_prod_range, gen_finn_dt_tensor from finn.custom_op.multithreshold import multithreshold from finn.transformation.fpgadataflow.cleanup import CleanUp -from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -147,7 +147,7 @@ def test_fpgadataflow_fclayer(idt, wdt, act, nf, sf, mw, mh): else: tdt = DataType.INT32 model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt) - model = model.transform(CodeGen()) + model = model.transform(CodeGen_npysim()) model = model.transform(Compile()) # prepare input data input_dict = prepare_inputs(x, idt, wdt) diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py index 4a7ca1b5c0473c520f0e2ea775f7c8950eb16695..32b1c60fc714794e39fe1ade2d0252895bb33025 100644 --- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py +++ b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py @@ -5,7 +5,7 @@ import finn.core.onnx_exec as oxe from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper from finn.transformation.fpgadataflow.cleanup import CleanUp -from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim from finn.transformation.fpgadataflow.compile import Compile @@ -112,7 +112,7 @@ def test_layer_streaming_maxpool_batch(): ).reshape(2, 2, 4, 4) print(input_tensor) - model = model.transform(CodeGen()) + model = model.transform(CodeGen_npysim()) model = model.transform(Compile()) input_dict = {"in": input_tensor}