From be3a041880431d2ac5ee330dd9b42c79ae22df18 Mon Sep 17 00:00:00 2001
From: Yaman Umuroglu <maltanar@gmail.com>
Date: Tue, 3 Dec 2019 00:26:11 +0000
Subject: [PATCH] [Refactor] use correct attribute accessors for custom op
 codegen

---
 src/finn/custom_op/fpgadataflow/__init__.py   |  9 +--
 .../fpgadataflow/streamingfclayer_batch.py    | 60 ++++++-------------
 .../fpgadataflow/streamingmaxpool_batch.py    | 57 ++++++------------
 .../fpgadataflow/code_gen_transformation.py   | 52 +++-------------
 4 files changed, 46 insertions(+), 132 deletions(-)

diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py
index f9d2c6ba5..ae8d8aa69 100644
--- a/src/finn/custom_op/fpgadataflow/__init__.py
+++ b/src/finn/custom_op/fpgadataflow/__init__.py
@@ -1,7 +1,6 @@
 from abc import abstractmethod
 import os
 from finn.custom_op import CustomOp
-import finn.core.utils as util
 
 
 class HLSCustomOp(CustomOp):
@@ -37,10 +36,6 @@ class HLSCustomOp(CustomOp):
         """
         self.code_gen_dict = {}
 
-        self.tmp_dir = ""
-        self.code_gen_dir = util.get_by_name(onnx_node.attribute, "code_gen_dir")
-        self.executable_path = ""
-
     def get_nodeattr_types(self):
         return {"code_gen_dir": ("s", False, ""), "executable_path": ("s", False, "")}
 
@@ -62,8 +57,8 @@ class HLSCustomOp(CustomOp):
             # transform list into long string separated by '\n'
             code_gen_line = "\n".join(self.code_gen_dict[key])
             template = template.replace(key, code_gen_line)
-
-        f = open(os.path.join(self.tmp_dir, "execute_{}.cpp".format(node.op_type)), "w")
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
+        f = open(os.path.join(code_gen_dir, "execute_{}.cpp".format(node.op_type)), "w")
         f.write(template)
         f.close()
 
diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
index 3e06a3e51..97831485d 100644
--- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
@@ -1,6 +1,5 @@
 import os
 import subprocess
-import tempfile as tmp
 
 import numpy as np
 
@@ -151,7 +150,6 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         return ret
 
     def generate_weights(self, model):
-
         weights = model.get_initializer(self.onnx_node.input[1])
         # convert weights into hlslib-compatible format
         weight_tensor = self.get_hls_compatible_weight_tensor(weights)
@@ -164,7 +162,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
             weight_tensor, export_wdt, "weights", True, True
         )
         # write weights into params.h
-        f_weights = open("{}/params.h".format(self.tmp_dir), "w")
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
+        f_weights = open("{}/params.h".format(code_gen_dir), "w")
 
         if export_wdt.bitwidth() != 1:
             f_weights.write(
@@ -200,7 +199,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
                 threshold_tensor, tdt, "thresholds", False, True
             )
             # write thresholds into thresh.h
-            f_thresh = open("{}/thresh.h".format(self.tmp_dir), "w")
+            code_gen_dir = self.get_nodeattr("code_gen_dir")
+            f_thresh = open("{}/thresh.h".format(code_gen_dir), "w")
             tdt_hls = tdt.get_hls_datatype_str()
             odt_hls = self.get_output_datatype().get_hls_datatype_str()
             f_thresh.write(
@@ -218,13 +218,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
 
     def execute_node(self, context, graph):
         node = self.onnx_node
-        # make temporary directory for generated files
-        self.tmp_dir = tmp.mkdtemp(prefix=str(node.op_type) + "_")
-
-        # create empty list for temporary files to enable the option
-        # to delete the files after the execution
-        temp_files = []
-
+        # TODO ensure codegen dir exists
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
         # create a npy file fore each input of the node (in_ind is input index)
         in_ind = 0
         for inputs in node.input:
@@ -239,48 +234,25 @@ class StreamingFCLayer_Batch(HLSCustomOp):
                 if self.get_input_datatype() == DataType.BIPOLAR:
                     # store bipolar activations as binary
                     np.save(
-                        os.path.join(self.tmp_dir, "input_{}.npy".format(in_ind)),
+                        os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)),
                         (context[inputs] + 1) / 2,
                     )
                 else:
                     np.save(
-                        os.path.join(self.tmp_dir, "input_{}.npy".format(in_ind)),
+                        os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)),
                         context[inputs],
                     )
-                temp_files.append("{}/input_{}.npy".format(self.tmp_dir, in_ind))
             elif in_ind > 2:
                 raise Exception("Unexpected input found for StreamingFCLayer")
-
             in_ind += 1
-
-        temp_files.append("{}/params.h".format(self.tmp_dir))
-        temp_files.append("{}/thresh.h".format(self.tmp_dir))
-
-        # code generation
-        self.code_generation(context)
-
-        # c++ compilation and execution flow
-        temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
-        bash_compile = """g++ -o {}/execute_{} {}/execute_{}.cpp
-        /workspace/cnpy/cnpy.cpp -I/workspace/finn/src/finn/data/cpp -I/workspace/cnpy/
-        -I/workspace/finn-hlslib -I/workspace/vivado-hlslib
-        --std=c++11 -lz""".format(
-            self.tmp_dir, node.op_type, self.tmp_dir, node.op_type
-        )
-        process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
-        process_compile.communicate()
-        bash_execute = "{}/execute_{}".format(self.tmp_dir, node.op_type)
-        process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
+        # execute precompiled executable
+        executable_path = self.get_nodeattr("executable_path")
+        # TODO sanity check executable
+        process_execute = subprocess.Popen(executable_path, stdout=subprocess.PIPE)
         process_execute.communicate()
-        temp_files.append("{}/execute_{}".format(self.tmp_dir, node.op_type))
-        temp_files.append("{}/output.npy".format(self.tmp_dir))
-
         # load output npy file
-        output = np.load("{}/output.npy".format(self.tmp_dir))
+        output = np.load("{}/output.npy".format(code_gen_dir))
         context[node.output[0]] = output
-        # deleting temporary files
-        # for temp_file in temp_files:
-        #    os.remove(temp_file)
 
     def global_includes(self):
         self.code_gen_dict["$GLOBALS$"] = ['#include "weights.hpp"']
@@ -309,13 +281,14 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         ]
 
     def read_npy_data(self):
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
         dtype = self.get_input_datatype()
         elem_bits = dtype.bitwidth()
         packed_bits = self.get_instream_width()
         packed_hls_type = "ap_uint<%d>" % packed_bits
         elem_hls_type = dtype.get_hls_datatype_str()
         npy_type = "float"
-        npy_in = "%s/input_0.npy" % self.tmp_dir
+        npy_in = "%s/input_0.npy" % code_gen_dir
         self.code_gen_dict["$READNPYDATA$"] = []
         self.code_gen_dict["$READNPYDATA$"].append(
             'npy2apintstream<%s, %s, %d, %s>("%s", in0);'
@@ -352,13 +325,14 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         ]
 
     def dataoutstrm(self):
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
         dtype = self.get_output_datatype()
         elem_bits = dtype.bitwidth()
         packed_bits = self.get_outstream_width()
         packed_hls_type = "ap_uint<%d>" % packed_bits
         elem_hls_type = dtype.get_hls_datatype_str()
         npy_type = "float"
-        npy_out = "%s/output.npy" % self.tmp_dir
+        npy_out = "%s/output.npy" % code_gen_dir
         nf = int(self.get_nodeattr("MH") / self.get_nodeattr("PE"))
         shape = (1, nf, self.get_nodeattr("PE"))
         shape_cpp_str = str(shape).replace("(", "{").replace(")", "}")
diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
index e21ffeae1..56d43cac7 100644
--- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
@@ -1,6 +1,5 @@
 import os
 import subprocess
-import tempfile as tmp
 
 import numpy as np
 
@@ -25,48 +24,26 @@ class StreamingMaxPool_Batch(HLSCustomOp):
 
     def execute_node(self, context, graph):
         node = self.onnx_node
-        # make temporary directory for generated files
-        self.tmp_dir = tmp.mkdtemp(prefix=str(node.op_type) + "_")
-
-        # create empty list for temporary files to enable the option
-        # to delete the files after the execution
-        temp_files = []
-
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
         # create a npy file fore each input of the node (in_ind is input index)
         in_ind = 0
         for inputs in node.input:
-            np.save(
-                os.path.join(self.tmp_dir, "input_{}.npy".format(in_ind)),
-                context[inputs],
-            )
-            temp_files.append("{}/input_{}.npy".format(self.tmp_dir, in_ind))
+            if in_ind == 0:
+                np.save(
+                    os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)),
+                    context[inputs],
+                )
+            else:
+                raise Exception("Unexpected input found for StreamingMaxPool_Batch")
             in_ind += 1
-
-        # code generation
-        self.code_generation()
-
-        # c++ compilation and execution flow
-        temp_files.append("{}/execute_{}.cpp".format(self.tmp_dir, node.op_type))
-        bash_compile = """g++ -o {}/execute_{} {}/execute_{}.cpp
-        /workspace/cnpy/cnpy.cpp -I/workspace/cnpy/
-        -I/workspace/finn-hlslib -I/workspace/vivado-hlslib
-        --std=c++11 -lz""".format(
-            self.tmp_dir, node.op_type, self.tmp_dir, node.op_type
-        )
-        process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
-        process_compile.communicate()
-        bash_execute = "{}/execute_{}".format(self.tmp_dir, node.op_type)
-        process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
+        # execute precompiled executable
+        executable_path = self.get_nodeattr("executable_path")
+        # TODO sanity check executable
+        process_execute = subprocess.Popen(executable_path, stdout=subprocess.PIPE)
         process_execute.communicate()
-        temp_files.append("{}/execute_{}".format(self.tmp_dir, node.op_type))
-        temp_files.append("{}/output.npy".format(self.tmp_dir))
-
         # load output npy file
-        output = np.load("{}/output.npy".format(self.tmp_dir))
+        output = np.load("{}/output.npy".format(code_gen_dir))
         context[node.output[0]] = output
-        # deleting temporary files
-        # for temp_file in temp_files:
-        #    os.remove(temp_file)
 
     def global_includes(self):
         self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
@@ -85,13 +62,14 @@ class StreamingMaxPool_Batch(HLSCustomOp):
 
     def read_npy_data(self):
         node = self.onnx_node
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
         # c++ code to read out an npy file
         # and put it in hls::stream in the correct order
         self.code_gen_dict["$READNPYDATA$"] = []
         input_ind = 0
         input_file_names = []
         for inputs in node.input:
-            input_file_names.append("{}/input_{}.npy".format(self.tmp_dir, input_ind))
+            input_file_names.append("{}/input_{}.npy".format(code_gen_dir, input_ind))
             input_ind += 1
 
         input_ind = 0
@@ -183,11 +161,12 @@ class StreamingMaxPool_Batch(HLSCustomOp):
         self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
 
     def save_as_npy(self):
-        numReps = 2
+        code_gen_dir = self.get_nodeattr("code_gen_dir")
+        numReps = 1
         self.code_gen_dict["$SAVEASCNPY$"] = [
             """cnpy::npy_save("{}/output.npy",&output_data_vector[0],
             {{{},{},{}}},"w");""".format(
-                self.tmp_dir,
+                code_gen_dir,
                 numReps,
                 self.get_nodeattr("NumChannels"),
                 int(self.get_nodeattr("ImgDim") / self.get_nodeattr("PoolDim")),
diff --git a/src/finn/transformation/fpgadataflow/code_gen_transformation.py b/src/finn/transformation/fpgadataflow/code_gen_transformation.py
index 313112690..2aec5a2ea 100644
--- a/src/finn/transformation/fpgadataflow/code_gen_transformation.py
+++ b/src/finn/transformation/fpgadataflow/code_gen_transformation.py
@@ -6,55 +6,21 @@ from finn.core.utils import get_by_name
 from finn.transformation import Transformation
 
 
-def code_gen_transformation(node, model):
+def _codegen_single_node(node, model):
     """Call custom implementation to generate code for single custom node
     and create folder that contains all the generated files"""
     op_type = node.op_type
     try:
         # lookup op_type in registry of CustomOps
         inst = registry.custom_op[op_type](node)
-
         # get the path of the code generation directory
-        code_gen_dir = inst.code_gen_dir
-        code_gen_dir = code_gen_dir.s.decode("UTF-8")
-
-        # parameter is empty
-        if not code_gen_dir:
-            tmp_dir = tmp.mkdtemp(prefix="code_gen_" + str(node.op_type) + "_")
-            inst.tmp_dir = tmp_dir
-            inst.code_generation(model)
-            # check if directory exists
-            if os.path.isdir(tmp_dir):
-                if len(os.listdir(tmp_dir)) == 0:
-                    raise Exception("Code was not generated!")
-                else:
-                    inst.code_gen_dir = tmp_dir
-                    model.set_attribute(node, "code_gen_dir", tmp_dir)
-            else:
-                raise Exception("Code was not generated!")
-
-        # there is already a code gen directory
-        else:
-            # check directory for files
-            if os.path.isdir(code_gen_dir):
-                if len(os.listdir(code_gen_dir)) == 0:
-                    os.rmdir(code_gen_dir)
-                    tmp_dir = tmp.mkdtemp(prefix="code_gen_" + str(node.op_type) + "_")
-                    inst.tmp_dir = tmp_dir
-                    inst.code_generation(model)
-                    if os.path.isdir(tmp_dir):
-                        if len(os.listdir(tmp_dir)) == 0:
-                            raise Exception("Code was not generated!")
-                        else:
-                            inst.code_gen_dir = tmp_dir
-                            model.set_attribute(node, "code_gen_dir", tmp_dir)
-                    else:
-                        raise Exception("Code was not generated!")
-                # else: attribute is correctly set
-            else:
-                inst.code_gen_dir = tmp_dir
-                model.set_attribute(node, "code_gen_dir", tmp_dir)
-
+        code_gen_dir = inst.get_nodeattr("code_gen_dir")
+        # ensure that there is a directory
+        if code_gen_dir == "" or not os.path.isdir(code_gen_dir):
+            code_gen_dir = tmp.mkdtemp(prefix="code_gen_" + str(node.op_type) + "_")
+            inst.set_nodeattr("code_gen_dir", code_gen_dir)
+        # ensure that there is generated code inside the dir
+        inst.code_generation(model)
     except KeyError:
         # exception if op_type is not supported
         raise Exception("Custom op_type %s is currently not supported." % op_type)
@@ -69,5 +35,5 @@ class CodeGen(Transformation):
                 backend_attribute = get_by_name(node.attribute, "backend")
                 backend_value = backend_attribute.s.decode("UTF-8")
                 if backend_value == "fpgadataflow":
-                    code_gen_transformation(node, model)
+                    _codegen_single_node(node, model)
         return (model, False)
-- 
GitLab