Skip to content
Snippets Groups Projects
Commit 8255062d authored by auphelia's avatar auphelia
Browse files

[Code gen] Detach execute node function from __init__.py to every node class...

[Code gen] Detach execute node function from __init__.py to every node class to make different treating of thresholds and weights possible
parent 2d27c9b7
No related branches found
No related tags found
No related merge requests found
......@@ -37,33 +37,6 @@ class HLSCustomOp(CustomOp):
"""
self.code_gen_dict = {}
def execute_node(self, node, context, graph):
in_ind = 0
temp_files = []
for inputs in node.input:
np.save("input_{}.npy".format(in_ind), context[inputs])
temp_files.append("input_{}.npy".format(in_ind))
in_ind += 1
self.code_generation(node)
temp_files.append("execute_{}.cpp".format(node.op_type))
bash_compile = """g++ -o execute_{} execute_{}.cpp
/workspace/cnpy/cnpy.cpp -I/workspace/cnpy/
-I/workspace/finn-hlslib -I/workspace/vivado-hlslib
--std=c++11 -lz""".format(
node.op_type, node.op_type
)
process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
process_compile.communicate()
bash_execute = "./execute_{}".format(node.op_type)
process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
process_execute.communicate()
temp_files.append("execute_{}".format(node.op_type))
temp_files.append("output.npy")
output = np.load("output.npy")
context[node.output[0]] = output
# deleting temporary files
for temp_file in temp_files:
os.remove(temp_file)
@abstractmethod
def get_attributes(self, node):
......
......@@ -9,6 +9,35 @@ class StreamingFCLayer_Batch(HLSCustomOp):
def infer_node_datatype(self, node, model):
pass
def execute_node(self, node, context, graph):
in_ind = 0
temp_files = []
for inputs in node.input:
np.save("input_{}.npy".format(in_ind), context[inputs])
temp_files.append("input_{}.npy".format(in_ind))
in_ind += 1
self.code_generation(node)
temp_files.append("execute_{}.cpp".format(node.op_type))
bash_compile = """g++ -o execute_{} execute_{}.cpp
/workspace/cnpy/cnpy.cpp -I/workspace/cnpy/
-I/workspace/finn-hlslib -I/workspace/vivado-hlslib
--std=c++11 -lz""".format(
node.op_type, node.op_type
)
process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
process_compile.communicate()
bash_execute = "./execute_{}".format(node.op_type)
process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
process_execute.communicate()
temp_files.append("execute_{}".format(node.op_type))
temp_files.append("output.npy")
output = np.load("output.npy")
context[node.output[0]] = output
# deleting temporary files
for temp_file in temp_files:
os.remove(temp_file)
def get_attributes(self, node):
self.resType = get_by_name(node.attribute, "resType").s.decode("utf-8")
self.MW = get_by_name(node.attribute, "MW").i
......
import os
import numpy as np
import subprocess
from finn.core.utils import get_by_name
from finn.custom_op.fpgadataflow import HLSCustomOp
......@@ -9,6 +14,35 @@ class StreamingMaxPool(HLSCustomOp):
def infer_node_datatype(self, node, model):
pass
def execute_node(self, node, context, graph):
in_ind = 0
temp_files = []
for inputs in node.input:
np.save("input_{}.npy".format(in_ind), context[inputs])
temp_files.append("input_{}.npy".format(in_ind))
in_ind += 1
self.code_generation(node)
temp_files.append("execute_{}.cpp".format(node.op_type))
bash_compile = """g++ -o execute_{} execute_{}.cpp
/workspace/cnpy/cnpy.cpp -I/workspace/cnpy/
-I/workspace/finn-hlslib -I/workspace/vivado-hlslib
--std=c++11 -lz""".format(
node.op_type, node.op_type
)
process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
process_compile.communicate()
bash_execute = "./execute_{}".format(node.op_type)
process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
process_execute.communicate()
temp_files.append("execute_{}".format(node.op_type))
temp_files.append("output.npy")
output = np.load("output.npy")
context[node.output[0]] = output
# deleting temporary files
for temp_file in temp_files:
os.remove(temp_file)
def get_attributes(self, node):
self.ImgDim = get_by_name(node.attribute, "ImgDim").i
self.PoolDim = get_by_name(node.attribute, "PoolDim").i
......
import os
import numpy as np
import subprocess
from finn.core.utils import get_by_name
from finn.custom_op.fpgadataflow import HLSCustomOp
......@@ -8,6 +12,35 @@ class StreamingMaxPool_Batch(HLSCustomOp):
def infer_node_datatype(self, node, model):
pass
def execute_node(self, node, context, graph):
in_ind = 0
temp_files = []
for inputs in node.input:
np.save("input_{}.npy".format(in_ind), context[inputs])
temp_files.append("input_{}.npy".format(in_ind))
in_ind += 1
self.code_generation(node)
temp_files.append("execute_{}.cpp".format(node.op_type))
bash_compile = """g++ -o execute_{} execute_{}.cpp
/workspace/cnpy/cnpy.cpp -I/workspace/cnpy/
-I/workspace/finn-hlslib -I/workspace/vivado-hlslib
--std=c++11 -lz""".format(
node.op_type, node.op_type
)
process_compile = subprocess.Popen(bash_compile.split(), stdout=subprocess.PIPE)
process_compile.communicate()
bash_execute = "./execute_{}".format(node.op_type)
process_execute = subprocess.Popen(bash_execute.split(), stdout=subprocess.PIPE)
process_execute.communicate()
temp_files.append("execute_{}".format(node.op_type))
temp_files.append("output.npy")
output = np.load("output.npy")
context[node.output[0]] = output
# deleting temporary files
for temp_file in temp_files:
os.remove(temp_file)
def get_attributes(self, node):
self.ImgDim = get_by_name(node.attribute, "ImgDim").i
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment