Skip to content
Snippets Groups Projects
Commit 86c8b655 authored by auphelia's avatar auphelia
Browse files

Merge dev into feature/streaming_fifo

parents 6cd2a7ac 79d3e878
No related branches found
No related tags found
No related merge requests found
Showing
with 400 additions and 291 deletions
......@@ -43,30 +43,19 @@ RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config
# cloning dependency repos
# Brevitas
RUN git clone --branch feature/finn_onnx_export https://github.com/Xilinx/brevitas.git /workspace/brevitas
RUN git -C /workspace/brevitas checkout 215cf44c76d562339fca368c8c3afee3110033e8
RUN git clone https://github.com/Xilinx/brevitas.git /workspace/brevitas
# Brevitas examples
RUN git clone --branch feature/rework_scaling_clipping https://github.com/maltanar/brevitas_cnv_lfc.git /workspace/brevitas_cnv_lfc
RUN git -C /workspace/brevitas_cnv_lfc checkout 2059f96bd576bf71f32c757e7f92617a70190c90
RUN git clone https://github.com/maltanar/brevitas_cnv_lfc.git /workspace/brevitas_cnv_lfc
# CNPY
RUN git clone https://github.com/rogersce/cnpy.git /workspace/cnpy
RUN git -C /workspace/cnpy checkout 4e8810b1a8637695171ed346ce68f6984e585ef4
# FINN hlslib
RUN git clone https://github.com/maltanar/finn-hlslib.git /workspace/finn-hlslib
RUN git -C /workspace/finn-hlslib checkout b139bf051ac8f8e0a3625509247f714127cf3317
# PyVerilator
RUN git clone https://github.com/maltanar/pyverilator /workspace/pyverilator
RUN git -C /workspace/pyverilator checkout 307fc5c82db748620836307a2002fdc9fe170226
# PYNQ-HelloWorld
RUN git clone --branch feature/synth_rpt https://github.com/maltanar/PYNQ-HelloWorld.git /workspace/PYNQ-HelloWorld
RUN git -C /workspace/PYNQ-HelloWorld checkout db7e418767ce2a8e08fe732ddb3aa56ee79b7560
RUN git clone https://github.com/maltanar/PYNQ-HelloWorld.git /workspace/PYNQ-HelloWorld
# FINN
# checkout desired FINN branch for testing
RUN git clone --branch $FINN_CI_BRANCH https://github.com/Xilinx/finn /workspace/finn
RUN pip install -r /workspace/finn/requirements.txt
......
......@@ -60,10 +60,6 @@ RUN pip install pytest-dependency
RUN pip install sphinx
RUN pip install sphinx_rtd_theme
# copy entrypoint script
COPY docker/finn_entrypoint.sh /usr/local/bin/
RUN chmod 755 /usr/local/bin/finn_entrypoint.sh
# switch user
RUN groupadd -g $GID $GNAME
RUN useradd -M -u $UID $UNAME -g $GNAME
......@@ -76,32 +72,20 @@ USER $UNAME
# cloning dependency repos (as user)
# Brevitas
RUN git clone --branch feature/finn_onnx_export https://github.com/Xilinx/brevitas.git /workspace/brevitas
RUN git -C /workspace/brevitas checkout 215cf44c76d562339fca368c8c3afee3110033e8
RUN git clone https://github.com/Xilinx/brevitas.git /workspace/brevitas
# Brevitas examples
RUN git clone --branch feature/rework_scaling_clipping https://github.com/maltanar/brevitas_cnv_lfc.git /workspace/brevitas_cnv_lfc
RUN git -C /workspace/brevitas_cnv_lfc checkout 2059f96bd576bf71f32c757e7f92617a70190c90
RUN git clone https://github.com/maltanar/brevitas_cnv_lfc.git /workspace/brevitas_cnv_lfc
# CNPY
RUN git clone https://github.com/rogersce/cnpy.git /workspace/cnpy
RUN git -C /workspace/cnpy checkout 4e8810b1a8637695171ed346ce68f6984e585ef4
# FINN hlslib
RUN git clone https://github.com/maltanar/finn-hlslib.git /workspace/finn-hlslib
RUN git -C /workspace/finn-hlslib checkout b139bf051ac8f8e0a3625509247f714127cf3317
# PyVerilator
RUN git clone https://github.com/maltanar/pyverilator /workspace/pyverilator
RUN git -C /workspace/pyverilator checkout 307fc5c82db748620836307a2002fdc9fe170226
# PYNQ-HelloWorld
RUN git clone --branch feature/synth_rpt https://github.com/maltanar/PYNQ-HelloWorld.git /workspace/PYNQ-HelloWorld
RUN git -C /workspace/PYNQ-HelloWorld checkout db7e418767ce2a8e08fe732ddb3aa56ee79b7560
RUN git clone https://github.com/maltanar/PYNQ-HelloWorld.git /workspace/PYNQ-HelloWorld
# Note that we expect the cloned finn directory on the host to be
# mounted on /workspace/finn -- see run-docker.sh for an example
# of how to do this.
# for this developer-oriented Docker container we assume the FINN repo is cloned and mounted from the host
# at /workspace/finn -- see run-docker.sh for an example of how to do this.
ENV PYTHONPATH "${PYTHONPATH}:/workspace/finn/src"
ENV PYTHONPATH "${PYTHONPATH}:/workspace/brevitas_cnv_lfc/training_scripts"
ENV PYTHONPATH "${PYTHONPATH}:/workspace/brevitas"
......@@ -110,6 +94,13 @@ ENV PYNQSHELL_PATH "/workspace/PYNQ-HelloWorld/boards"
WORKDIR /home/$UNAME/finn
RUN echo "PS1='\[\033[1;36m\]\u\[\033[1;31m\]@\[\033[1;32m\]\h:\[\033[1;35m\]\w\[\033[1;31m\]\$\[\033[0m\] '" >> /home/$UNAME/.bashrc
RUN echo "source \$VIVADO_PATH/settings64.sh" >> /home/$UNAME/.bashrc
# copy entrypoint script
USER root
COPY docker/finn_entrypoint.sh /usr/local/bin/
RUN chmod 755 /usr/local/bin/finn_entrypoint.sh
USER $UNAME
ENTRYPOINT ["finn_entrypoint.sh"]
CMD ["bash"]
......@@ -4,6 +4,43 @@ export XILINX_VIVADO=$VIVADO_PATH
export SHELL=/bin/bash
export FINN_ROOT=/workspace/finn
GREEN='\033[0;32m'
NC='\033[0m' # No Color
gecho () {
echo -e "${GREEN}$1${NC}"
}
# checkout the correct dependency repo commits
# the repos themselves are cloned in the Dockerfile
BREVITAS_COMMIT=215cf44c76d562339fca368c8c3afee3110033e8
BREVITAS_EXAMPLES_COMMIT=2059f96bd576bf71f32c757e7f92617a70190c90
CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4
HLSLIB_COMMIT=b139bf051ac8f8e0a3625509247f714127cf3317
PYVERILATOR_COMMIT=307fc5c82db748620836307a2002fdc9fe170226
PYNQSHELL_COMMIT=db7e418767ce2a8e08fe732ddb3aa56ee79b7560
gecho "Setting up known-good commit versions for FINN dependencies"
# Brevitas
gecho "brevitas @ $BREVITAS_COMMIT"
git -C /workspace/brevitas checkout $BREVITAS_COMMIT --quiet
# Brevitas examples
gecho "brevitas_cnv_lfc @ $BREVITAS_EXAMPLES_COMMIT"
git -C /workspace/brevitas_cnv_lfc checkout $BREVITAS_EXAMPLES_COMMIT --quiet
# CNPY
gecho "cnpy @ $CNPY_COMMIT"
git -C /workspace/cnpy checkout $CNPY_COMMIT --quiet
# FINN hlslib
gecho "finn-hlslib @ $HLSLIB_COMMIT"
git -C /workspace/finn-hlslib checkout $HLSLIB_COMMIT --quiet
# PyVerilator
gecho "PyVerilator @ $PYVERILATOR_COMMIT"
git -C /workspace/pyverilator checkout $PYVERILATOR_COMMIT --quiet
# PYNQ-HelloWorld
gecho "PYNQ shell @ $PYNQSHELL_COMMIT"
git -C /workspace/PYNQ-HelloWorld checkout $PYNQSHELL_COMMIT --quiet
# source Vivado env.vars
source $VIVADO_PATH/settings64.sh
......
......@@ -25,12 +25,11 @@
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import os
import xml.etree.ElementTree as ET
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
def hls_synth_res_estimation(model):
......@@ -40,50 +39,30 @@ def hls_synth_res_estimation(model):
res_dict = {}
for node in model.graph.node:
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
op_type = node.op_type
inst = registry.custom_op[op_type](node)
code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen")
if code_gen_dir == "":
res_dict[node.name] = dict()
res_dict[node.name]["BRAM_18K"] = 0
res_dict[node.name]["FF"] = 0
res_dict[node.name]["LUT"] = 0
res_dict[node.name]["DSP48E"] = 0
res_dict[node.name]["URAM"] = 0
warnings.warn(
"""Could not find report files, values will be set to zero
for this node. Please run "CodeGen_ipgen" transformation and
if is_fpgadataflow_node(node) is True:
op_type = node.op_type
inst = registry.custom_op[op_type](node)
code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen")
if code_gen_dir == "":
raise Exception(
"""Please run "CodeGen_ipgen" transformation and
"HLSSynth_IPGen" first to generate the report files"""
)
else:
xmlfile = "{}/project_{}/sol1/syn/report/{}_csynth.xml".format(
code_gen_dir, node.name, node.name
)
)
else:
xmlfile = "{}/project_{}/sol1/syn/report/{}_csynth.xml".format(
code_gen_dir, node.name, node.name
)
if os.path.isfile(xmlfile):
res_dict[node.name] = dict()
tree = ET.parse(xmlfile)
root = tree.getroot()
for item in root.findall("AreaEstimates/Resources"):
for child in item:
res_dict[node.name][child.tag] = child.text
else:
res_dict[node.name] = dict()
res_dict[node.name]["BRAM_18K"] = 0
res_dict[node.name]["FF"] = 0
res_dict[node.name]["LUT"] = 0
res_dict[node.name]["DSP48E"] = 0
res_dict[node.name]["URAM"] = 0
warnings.warn(
"""Could not find report files, values will be set to zero
for this node. Please run "HLSSynth_IPGen" first
if os.path.isfile(xmlfile):
res_dict[node.name] = dict()
tree = ET.parse(xmlfile)
root = tree.getroot()
for item in root.findall("AreaEstimates/Resources"):
for child in item:
res_dict[node.name][child.tag] = child.text
else:
raise Exception(
"""Please run "HLSSynth_IPGen" first
to generate the report files"""
)
)
return res_dict
......@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
def res_estimation(model):
......@@ -37,14 +37,9 @@ def res_estimation(model):
res_dict = {}
for node in model.graph.node:
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
op_type = node.op_type
inst = registry.custom_op[op_type](node)
res_dict[node.name] = inst.node_res_estimation()
if is_fpgadataflow_node(node) is True:
op_type = node.op_type
inst = registry.custom_op[op_type](node)
res_dict[node.name] = inst.node_res_estimation()
return res_dict
......@@ -253,14 +253,12 @@ class ModelWrapper:
return None
def find_producer(self, tensor_name):
"""Finds and returns the node that produces the tensor with given name.
Currently only works for linear graphs."""
all_outputs = [x.output[0] for x in self._model_proto.graph.node]
try:
producer_ind = all_outputs.index(tensor_name)
return self._model_proto.graph.node[producer_ind]
except ValueError:
return None
"""Finds and returns the node that produces the tensor with given name."""
ret = None
for x in self._model_proto.graph.node:
if tensor_name in x.output:
ret = x
return ret
def find_upstream(self, tensor_name, finder_fxn):
"""Follow the producer chain upstream, calling finder_fxn on each upstream
......
......@@ -61,6 +61,10 @@ def execute_node(node, context, graph):
# onnxruntime unfortunately does not implement run_node as defined by ONNX,
# it can only execute entire models -- so we create a model which solely
# consists of our current node.
# note: ensure that the same ValueInfo does not appear both in
# graph.value_info as well as graph.output or graph.input
# nodes with multiple outputs that are a mix of value_info and
# input/outputs may get them reordered below
node_inputs = list(filter(lambda x: x.name in node.input, graph.input))
node_inputs += list(
filter(lambda x: x.name in node.input, graph.value_info)
......@@ -84,17 +88,25 @@ def execute_node(node, context, graph):
output_list = sess.run(None, input_dict)
for output_ind in range(len(node.output)):
# get the name of the target buffer from node.output
outp = node.output[output_ind]
if output_list[output_ind].shape != context[outp].shape:
# retrieve the index of that name in node_outputs
for i in range(len(node_outputs)):
if outp == node_outputs[i].name:
list_ind = i
# use that index to index output_list
if output_list[list_ind].shape != context[outp].shape:
raise Exception(
"""Output shapes disagree after node execution:
found %s vs expected %s"""
% (
str(output_list[output_ind].shape.shape),
str(output_list[list_ind].shape.shape),
str(context[outp].shape),
)
)
context[outp] = output_list[output_ind]
context[outp] = output_list[list_ind]
def execute_onnx(model, input_dict, return_full_exec_context=False):
......
......@@ -30,7 +30,7 @@ import os
import shutil
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import Transformation
......@@ -53,36 +53,33 @@ class CleanUp(Transformation):
model.set_metadata_prop("vivado_stitch_proj", "")
for node in model.graph.node:
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# delete code_gen_dir from npysim
code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim")
if os.path.isdir(code_gen_dir):
shutil.rmtree(code_gen_dir)
inst.set_nodeattr("code_gen_dir_npysim", "")
inst.set_nodeattr("executable_path", "")
# delete code_gen_dir from ipgen and project folder
code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen")
ipgen_path = inst.get_nodeattr("ipgen_path")
if os.path.isdir(code_gen_dir):
shutil.rmtree(code_gen_dir)
if os.path.isdir(ipgen_path):
shutil.rmtree(ipgen_path)
inst.set_nodeattr("code_gen_dir_ipgen", "")
inst.set_nodeattr("ipgen_path", "")
# delete Java HotSpot Performance data log
for d_name in os.listdir("/tmp/"):
if "hsperfdata" in d_name:
shutil.rmtree("/tmp/" + str(d_name))
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# delete code_gen_dir from npysim
code_gen_dir = inst.get_nodeattr("code_gen_dir_npysim")
if os.path.isdir(code_gen_dir):
shutil.rmtree(code_gen_dir)
inst.set_nodeattr("code_gen_dir_npysim", "")
inst.set_nodeattr("executable_path", "")
# delete code_gen_dir from ipgen and project folder
code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen")
ipgen_path = inst.get_nodeattr("ipgen_path")
if os.path.isdir(code_gen_dir):
shutil.rmtree(code_gen_dir)
if os.path.isdir(ipgen_path):
shutil.rmtree(ipgen_path)
inst.set_nodeattr("code_gen_dir_ipgen", "")
inst.set_nodeattr("ipgen_path", "")
# delete Java HotSpot Performance data log
for d_name in os.listdir("/tmp/"):
if "hsperfdata" in d_name:
shutil.rmtree("/tmp/" + str(d_name))
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
return (model, False)
......@@ -30,7 +30,8 @@ import os
import finn.custom_op.registry as registry
from finn.transformation import Transformation
from finn.util.basic import get_by_name, make_build_dir
from finn.util.basic import make_build_dir
from finn.util.fpgadataflow import is_fpgadataflow_node
def _codegen_single_node(node, model, fpgapart, clk):
......@@ -77,11 +78,6 @@ class CodeGen_ipgen(Transformation):
def apply(self, model):
for node in model.graph.node:
if node.domain == "finn":
backend_attribute = get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
_codegen_single_node(node, model, self.fpgapart, self.clk)
if is_fpgadataflow_node(node) is True:
_codegen_single_node(node, model, self.fpgapart, self.clk)
return (model, False)
......@@ -30,7 +30,8 @@ import os
import finn.custom_op.registry as registry
from finn.transformation import Transformation
from finn.util.basic import get_by_name, make_build_dir
from finn.util.basic import make_build_dir
from finn.util.fpgadataflow import is_fpgadataflow_node
def _codegen_single_node(node, model):
......@@ -67,11 +68,6 @@ class CodeGen_npysim(Transformation):
def apply(self, model):
for node in model.graph.node:
if node.domain == "finn":
backend_attribute = get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
_codegen_single_node(node, model)
if is_fpgadataflow_node(node) is True:
_codegen_single_node(node, model)
return (model, False)
......@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import NodeLocalTransformation
......@@ -49,31 +49,27 @@ class Compile(NodeLocalTransformation):
def applyNodeLocal(self, node):
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is not None:
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# ensure that code is generated
assert (
inst.get_nodeattr("code_gen_dir_npysim") != ""
), """Node
attribute "code_gen_dir_npysim" is not set. Please run
Transformation CodeGen_npysim first."""
# call the compilation function for this node
inst.compile_singlenode_code()
# ensure that executable path is now set
assert (
inst.get_nodeattr("executable_path") != ""
), """Transformation
compile was not successful, there is no path to executables set
in node attribute "executable_path"."""
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# ensure that code is generated
assert (
inst.get_nodeattr("code_gen_dir_npysim") != ""
), """Node
attribute "code_gen_dir_npysim" is not set. Please run
Transformation CodeGen_npysim first."""
# call the compilation function for this node
inst.compile_singlenode_code()
# ensure that executable path is now set
assert (
inst.get_nodeattr("executable_path") != ""
), """Transformation
compile was not successful, there is no path to executables set
in node attribute "executable_path"."""
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
return (node, False)
......@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import NodeLocalTransformation
......@@ -49,33 +49,27 @@ class HLSSynth_IPGen(NodeLocalTransformation):
def applyNodeLocal(self, node):
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
return (node, False)
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# ensure that code is generated
assert (
inst.get_nodeattr("code_gen_dir_ipgen") != ""
), """Node
attribute "code_gen_dir_ipgen" is empty. Please run
transformation CodeGen_ipgen first."""
# call the compilation function for this node
inst.ipgen_singlenode_code()
# ensure that executable path is now set
assert (
inst.get_nodeattr("ipgen_path") != ""
), """Transformation
HLSSynth_IPGen was not successful. Node attribute "ipgen_path"
is empty."""
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# ensure that code is generated
assert (
inst.get_nodeattr("code_gen_dir_ipgen") != ""
), """Node
attribute "code_gen_dir_ipgen" is empty. Please run
transformation CodeGen_ipgen first."""
# call the compilation function for this node
inst.ipgen_singlenode_code()
# ensure that executable path is now set
assert (
inst.get_nodeattr("ipgen_path") != ""
), """Transformation
HLSSynth_IPGen was not successful. Node attribute "ipgen_path"
is empty."""
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
return (node, False)
......@@ -3,7 +3,7 @@ from onnx import helper as oh
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import get_by_name
from finn.util.fpgadataflow import is_fpgadataflow_node
def _is_dwc_node(node):
......@@ -13,21 +13,9 @@ def _is_dwc_node(node):
return False
def _is_fpgadataflow_node(node):
if node.domain == "finn":
n_backend = get_by_name(node.attribute, "backend")
if n_backend is None:
return False
backend_value = n_backend.s.decode("UTF-8")
if backend_value == "fpgadataflow":
return True
else:
return False
def _suitable_node(node):
if node is not None:
if _is_fpgadataflow_node(node) is True:
if is_fpgadataflow_node(node) is True:
if _is_dwc_node(node) is False:
return True
else:
......
......@@ -27,7 +27,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import NodeLocalTransformation
try:
......@@ -55,22 +56,18 @@ class PrepareRTLSim(NodeLocalTransformation):
def applyNodeLocal(self, node):
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is not None:
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
inst.prepare_rtlsim()
# ensure that executable path is now set
assert (
inst.get_nodeattr("rtlsim_so") != ""
), "Failed to prepare RTLSim, no rtlsim_so attribute found."
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
inst.prepare_rtlsim()
# ensure that executable path is now set
assert (
inst.get_nodeattr("rtlsim_so") != ""
), "Failed to prepare RTLSim, no rtlsim_so attribute found."
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
return (node, False)
......@@ -29,7 +29,7 @@
import os
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import Transformation
......@@ -42,32 +42,27 @@ class ReplaceVerilogRelPaths(Transformation):
def apply(self, model):
for node in model.graph.node:
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# find the IP gen dir
ipgen_path = inst.get_nodeattr("ipgen_path")
if ipgen_path is not None and os.path.isdir(ipgen_path):
for dname, dirs, files in os.walk(ipgen_path):
for fname in files:
if fname.endswith(".v"):
fpath = os.path.join(dname, fname)
with open(fpath, "r") as f:
s = f.read()
old = '$readmemh(".'
new = '$readmemh("%s' % dname
s = s.replace(old, new)
old = '"./'
new = '"%s/' % dname
s = s.replace(old, new)
with open(fpath, "w") as f:
f.write(s)
except KeyError:
pass
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# find the IP gen dir
ipgen_path = inst.get_nodeattr("ipgen_path")
if ipgen_path is not None and os.path.isdir(ipgen_path):
for dname, dirs, files in os.walk(ipgen_path):
for fname in files:
if fname.endswith(".v"):
fpath = os.path.join(dname, fname)
with open(fpath, "r") as f:
s = f.read()
old = '$readmemh(".'
new = '$readmemh("%s' % dname
s = s.replace(old, new)
old = '"./'
new = '"%s/' % dname
s = s.replace(old, new)
with open(fpath, "w") as f:
f.write(s)
except KeyError:
pass
return (model, False)
......@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import finn.custom_op.registry as registry
import finn.util.basic as util
from finn.util.fpgadataflow import is_fpgadataflow_node
from finn.transformation import Transformation
......@@ -42,25 +42,20 @@ class SetExecMode(Transformation):
def apply(self, model):
for node in model.graph.node:
op_type = node.op_type
if node.domain == "finn":
backend_attribute = util.get_by_name(node.attribute, "backend")
if backend_attribute is None:
continue
backend_value = backend_attribute.s.decode("UTF-8")
if backend_value == "fpgadataflow":
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# set sim_mode accordingly to argument mode
inst.set_nodeattr("exec_mode", self.mode)
# ensure that sim_mode is now set
assert (
inst.get_nodeattr("exec_mode") != ""
), """Transformation
if is_fpgadataflow_node(node) is True:
try:
# lookup op_type in registry of CustomOps
inst = registry.custom_op[op_type](node)
# set sim_mode accordingly to argument mode
inst.set_nodeattr("exec_mode", self.mode)
# ensure that sim_mode is now set
assert (
inst.get_nodeattr("exec_mode") != ""
), """Transformation
was not successful. Node attribute "exec_mode" is not set"""
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
except KeyError:
# exception if op_type is not supported
raise Exception(
"Custom op_type %s is currently not supported." % op_type
)
return (model, False)
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from onnx import TensorProto
from onnx import helper as oh
from finn.transformation import Transformation
from finn.core.datatype import DataType
class InsertTopK(Transformation):
"""Add TopK node at the network output and replace the graph output with
the TopK indices."""
def __init__(self, k=5, axis=-1, largest=1, sorted=1):
super().__init__()
self.k = k
self.axis = axis
self.largest = largest
self.sorted = sorted
def apply(self, model):
# get name of output tensor
graph_out_name = model.graph.output[0].name
# find final node
final_node = model.find_producer(graph_out_name)
# if a top-select op is already present, do nothing
if final_node.op_type == "TopK":
return (model, False)
else:
out_shape = model.get_tensor_shape(graph_out_name)
out_dtype = model.get_tensor_datatype(graph_out_name)
# adjust shape
out_shape[self.axis] = self.k
# make new buffer
k_tensor = np.array([self.k]).astype(np.int64)
k_value = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.INT64, [1]
)
topk_values = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape
)
topk_indices = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.INT64, out_shape
)
model.graph.value_info.append(k_value)
model.set_tensor_datatype(k_value.name, out_dtype) # TODO set to int64
model.graph.value_info.append(topk_values)
model.set_tensor_datatype(topk_values.name, out_dtype)
# create and append topk node
model.set_initializer(k_value.name, k_tensor)
topk_node = oh.make_node(
"TopK",
inputs=[graph_out_name, k_value.name],
outputs=[topk_values.name, topk_indices.name],
axis=self.axis,
largest=self.largest,
sorted=self.sorted,
)
model.graph.node.append(topk_node)
# replace the existing output definition with topk indices
model.graph.output.insert(0, topk_indices)
model.graph.output.pop(1)
# set quantization annotation for indices
# minimal output dtype for TopK indices dependens on num. classes
# assuming UINT32 is large enough for now (FINN has currently no
# DataType.INT64)
model.set_tensor_datatype(topk_indices.name, DataType.UINT32)
return (model, True)
import onnx
from finn.util.test import get_test_model_trained
import brevitas.onnx as bo
import numpy as np
import onnx.numpy_helper as nph
import torch
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.insert_topk import InsertTopK
import finn.core.onnx_exec as oxe
from pkgutil import get_data
import pytest
export_onnx_path = "test_output_lfc.onnx"
@pytest.mark.parametrize("k", [1, 5, 10])
def test_topk_insert(k):
tfc = get_test_model_trained("TFC", 1, 1)
bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path)
model = ModelWrapper(export_onnx_path)
# do transformations (no topk)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model = model.transform(InferDataTypes())
# verification: generate random input, run through net, streamline,
# run again, check that output is top-k
raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
input_tensor = onnx.load_tensor_from_string(raw_i)
input_brevitas = torch.from_numpy(nph.to_array(input_tensor)).float()
output_golden = tfc.forward(input_brevitas).detach().numpy()
output_golden_topk = np.flip(output_golden.flatten().argsort())[:k]
output_golden_topk = output_golden_topk.flatten()
input_dict = {"global_in": nph.to_array(input_tensor)}
# insert top-k
model = model.transform(InsertTopK(k))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model = model.transform(InferShapes())
# verify output of top-k
output_dict_topk = oxe.execute_onnx(model, input_dict)
output_pysim_topk = output_dict_topk[list(output_dict_topk.keys())[0]]
output_pysim_topk = output_pysim_topk.astype(np.int).flatten()
assert np.array_equal(output_golden_topk, output_pysim_topk)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment