Skip to content
Snippets Groups Projects
Commit 9e8d7351 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[Refactor] simplify fpgadataflow transformation names

parent a86dd25c
No related branches found
No related tags found
No related merge requests found
......@@ -3,8 +3,8 @@ import finn.custom_op.registry as registry
from finn.transformation import Transformation
class Compilation(Transformation):
"""Compilation for all nodes in model"""
class Compile(Transformation):
"""Compile for all nodes in model"""
def __init__(self):
super().__init__()
......
......@@ -8,8 +8,8 @@ import finn.custom_op.xnorpopcount as xp
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.core.utils import gen_finn_dt_tensor
from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen
from finn.transformation.fpgadataflow.compilation_transformation import Compilation
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.compile import Compile
def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=None):
......@@ -107,7 +107,7 @@ def test_fpgadataflow_fclayer_noact(idt, wdt, nf, sf, mw, mh):
x = gen_finn_dt_tensor(idt, (1, mw))
model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt)
model = model.transform(CodeGen())
model = model.transform(Compilation())
model = model.transform(Compile())
# prepare input data
input_dict = prepare_inputs(model, x, idt)
if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
......
......@@ -4,8 +4,8 @@ from onnx import TensorProto, helper
import finn.core.onnx_exec as oxe
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen
from finn.transformation.fpgadataflow.compilation_transformation import Compilation
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.compile import Compile
def test_layer_streaming_maxpool_batch():
......@@ -112,7 +112,7 @@ def test_layer_streaming_maxpool_batch():
print(input_tensor)
model = model.transform(CodeGen())
model = model.transform(Compilation())
model = model.transform(Compile())
input_dict = {"in": input_tensor}
output_dict = oxe.execute_onnx(model, input_dict)
......
import os
import numpy as np
from onnx import TensorProto, helper
import finn.core.utils as util
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen
from finn.transformation.fpgadataflow.codegen import CodeGen
def test_code_gen_trafo():
......
import os
import numpy as np
from onnx import TensorProto, helper
import finn.core.utils as util
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen
from finn.transformation.fpgadataflow.compilation_transformation import Compilation
from finn.transformation.fpgadataflow.codegen import CodeGen
from finn.transformation.fpgadataflow.compile import Compile
def test_compilation_trafo():
......@@ -19,7 +18,6 @@ def test_compilation_trafo():
wmem = mw * mh // (pe * simd)
nf = mh // pe
sf = mw // simd
tmem = nf
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, sf, simd])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, nf, pe])
......@@ -57,7 +55,7 @@ def test_compilation_trafo():
model.set_initializer("weights", W)
model = model.transform(CodeGen())
model = model.transform(Compilation())
model = model.transform(Compile())
for node in model.graph.node:
compilation_attribute = util.get_by_name(node.attribute, "executable_path")
executable = compilation_attribute.s.decode("UTF-8")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment