diff --git a/src/finn/transformation/fpgadataflow/code_gen_transformation.py b/src/finn/transformation/fpgadataflow/codegen.py similarity index 100% rename from src/finn/transformation/fpgadataflow/code_gen_transformation.py rename to src/finn/transformation/fpgadataflow/codegen.py diff --git a/src/finn/transformation/fpgadataflow/compilation_transformation.py b/src/finn/transformation/fpgadataflow/compile.py similarity index 94% rename from src/finn/transformation/fpgadataflow/compilation_transformation.py rename to src/finn/transformation/fpgadataflow/compile.py index 8d158c4a5561bfeec4c17b9e5fa5c3df5a7a96bd..ee02ce7695fd4583a8e58e0661d9711d6a714709 100644 --- a/src/finn/transformation/fpgadataflow/compilation_transformation.py +++ b/src/finn/transformation/fpgadataflow/compile.py @@ -3,8 +3,8 @@ import finn.custom_op.registry as registry from finn.transformation import Transformation -class Compilation(Transformation): - """Compilation for all nodes in model""" +class Compile(Transformation): + """Compile for all nodes in model""" def __init__(self): super().__init__() diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_fclayer.py index 056f57d579a06ffaca1d69dc2490d2996eb1ac03..22034834fbc365caf7f863cdd148916983d31153 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py +++ b/tests/fpgadataflow/test_fpgadataflow_fclayer.py @@ -8,8 +8,8 @@ import finn.custom_op.xnorpopcount as xp from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper from finn.core.utils import gen_finn_dt_tensor -from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen -from finn.transformation.fpgadataflow.compilation_transformation import Compilation +from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.compile import Compile def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=None): @@ -107,7 +107,7 @@ def test_fpgadataflow_fclayer_noact(idt, wdt, nf, sf, mw, mh): x = gen_finn_dt_tensor(idt, (1, mw)) model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt) model = model.transform(CodeGen()) - model = model.transform(Compilation()) + model = model.transform(Compile()) # prepare input data input_dict = prepare_inputs(model, x, idt) if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py index 9b5b5ecb8297489d1ae1f5f222f0ba1ad8c9a037..2b69e9d8149d0f875c1b225f97aee01751a98f06 100644 --- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py +++ b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py @@ -4,8 +4,8 @@ from onnx import TensorProto, helper import finn.core.onnx_exec as oxe from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen -from finn.transformation.fpgadataflow.compilation_transformation import Compilation +from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.compile import Compile def test_layer_streaming_maxpool_batch(): @@ -112,7 +112,7 @@ def test_layer_streaming_maxpool_batch(): print(input_tensor) model = model.transform(CodeGen()) - model = model.transform(Compilation()) + model = model.transform(Compile()) input_dict = {"in": input_tensor} output_dict = oxe.execute_onnx(model, input_dict) diff --git a/tests/transformation/test_code_gen_trafo.py b/tests/transformation/test_code_gen_trafo.py index b5205c5211007c18a564b7a4c20e37de207b0708..e2651acfd0345156809e9c82f55694c0d48af4f9 100644 --- a/tests/transformation/test_code_gen_trafo.py +++ b/tests/transformation/test_code_gen_trafo.py @@ -1,12 +1,11 @@ import os -import numpy as np from onnx import TensorProto, helper import finn.core.utils as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen +from finn.transformation.fpgadataflow.codegen import CodeGen def test_code_gen_trafo(): diff --git a/tests/transformation/test_compilation_trafo.py b/tests/transformation/test_compilation_trafo.py index 9578d93d6d5bea2869e99ba442f0048a933d41c7..e1418b4febbfe1315198e19848dbf81a4e04658a 100644 --- a/tests/transformation/test_compilation_trafo.py +++ b/tests/transformation/test_compilation_trafo.py @@ -1,13 +1,12 @@ import os -import numpy as np from onnx import TensorProto, helper import finn.core.utils as util from finn.core.datatype import DataType from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fpgadataflow.code_gen_transformation import CodeGen -from finn.transformation.fpgadataflow.compilation_transformation import Compilation +from finn.transformation.fpgadataflow.codegen import CodeGen +from finn.transformation.fpgadataflow.compile import Compile def test_compilation_trafo(): @@ -19,7 +18,6 @@ def test_compilation_trafo(): wmem = mw * mh // (pe * simd) nf = mh // pe sf = mw // simd - tmem = nf inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, sf, simd]) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, nf, pe]) @@ -57,7 +55,7 @@ def test_compilation_trafo(): model.set_initializer("weights", W) model = model.transform(CodeGen()) - model = model.transform(Compilation()) + model = model.transform(Compile()) for node in model.graph.node: compilation_attribute = util.get_by_name(node.attribute, "executable_path") executable = compilation_attribute.s.decode("UTF-8")