From 1aa05dc1dfd48b9bcea847bc0d75811706a508c7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu <yamanu@xilinx.com> Date: Fri, 20 May 2022 13:47:01 +0200 Subject: [PATCH] [Refactor] explicitly specify np datatype for new arrays --- .../custom_op/fpgadataflow/channelwise_op_batch.py | 2 +- .../fpgadataflow/convert_to_hls_layers.py | 6 ++++-- .../test_convert_to_hls_conv_fc_transition.py | 2 +- .../test_convert_to_hls_layers_synthetic.py | 12 ++++++------ .../streamline/test_linear_past_eltwise.py | 9 +++++---- .../test_move_maxpool_past_multithreshold.py | 2 +- 6 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index f6c562454..630a231c8 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -51,7 +51,7 @@ from . import templates def get_smallest_possible(vals): """Returns smallest (fewest bits) possible DataType that can represent value. Prefers unsigned integers where possible.""" - vals = np.array(vals) + vals = np.array(vals, dtype=np.float64) for v in vals: assert int(v) == v, "Error float value" diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 46a97f6fa..a1a0c393f 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1269,7 +1269,7 @@ class InferChannelwiseLinearLayer(Transformation): def get_smallest_possible(self, vals): """Returns smallest (fewest bits) possible DataType that can represent value. Prefers unsigned integers where possible.""" - vals = np.array(vals) + vals = np.array(vals, dtype=np.float64) for v in vals: assert int(v) == v, "Error float value" @@ -1545,7 +1545,9 @@ class InferGlobalAccPoolLayer(Transformation): model.make_new_valueinfo_name(), TensorProto.FLOAT, [1] ) model.graph.value_info.append(mul_value) - model.set_initializer(mul_value.name, np.array(1 / (vecs[1] * vecs[2]))) + model.set_initializer( + mul_value.name, np.array(1 / (vecs[1] * vecs[2]), dtype=np.float32) + ) new_mul = helper.make_node( "Mul", [pool_out, mul_value.name], diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py index 9b0f3d68a..64d0b3a84 100755 --- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -201,7 +201,7 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): model.set_initializer( "matmul_param", gen_finn_dt_tensor(fc_weight_dt, fc_param_shape) ) - model.set_initializer("reshape_shape", np.array([1, -1])) + model.set_initializer("reshape_shape", np.array([1, -1], dtype=np.int64)) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py index 608990156..f1d3bf5aa 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py @@ -127,12 +127,12 @@ def make_model(ch, ifmdim): model = ModelWrapper(model) # set initializers for scalar add/mul nodes - model.set_initializer(add0_node.input[1], np.array([0.0])) - model.set_initializer(add1_node.input[1], np.array([7.0])) - model.set_initializer(add2_node.input[1], np.array([8.0])) - model.set_initializer(mul1_node.input[1], np.array([2.0])) - model.set_initializer(mul2_node.input[1], np.array([2.0])) - model.set_initializer(reshape_node.input[1], np.array([1, -1])) + model.set_initializer(add0_node.input[1], np.array([0.0], dtype=np.float32)) + model.set_initializer(add1_node.input[1], np.array([7.0], dtype=np.float32)) + model.set_initializer(add2_node.input[1], np.array([8.0], dtype=np.float32)) + model.set_initializer(mul1_node.input[1], np.array([2.0], dtype=np.float32)) + model.set_initializer(mul2_node.input[1], np.array([2.0], dtype=np.float32)) + model.set_initializer(reshape_node.input[1], np.array([1, -1], dtype=np.int64)) return model diff --git a/tests/transformation/streamline/test_linear_past_eltwise.py b/tests/transformation/streamline/test_linear_past_eltwise.py index 098b3f9d4..2cdb9dfc5 100644 --- a/tests/transformation/streamline/test_linear_past_eltwise.py +++ b/tests/transformation/streamline/test_linear_past_eltwise.py @@ -40,6 +40,7 @@ from finn.transformation.infer_shapes import InferShapes from finn.transformation.streamline.reorder import MoveLinearPastEltwiseAdd export_onnx_path = "test_linear_past_eltwise.onnx" +np_default_dtype = np.float32 # construct a synthetic graph to test: # topk insertion, topk conversion to hls, add conversion to hls @@ -81,10 +82,10 @@ def make_model(shape): model = ModelWrapper(model) # set initializers for scalar add/mul nodes - model.set_initializer(add1_node.input[1], np.array([7.0])) - model.set_initializer(add2_node.input[1], np.array([8.0])) - model.set_initializer(mul1_node.input[1], np.array([3.0])) - model.set_initializer(mul2_node.input[1], np.array([3.0])) + model.set_initializer(add1_node.input[1], np.array([7.0], dtype=np_default_dtype)) + model.set_initializer(add2_node.input[1], np.array([8.0], dtype=np_default_dtype)) + model.set_initializer(mul1_node.input[1], np.array([3.0], dtype=np_default_dtype)) + model.set_initializer(mul2_node.input[1], np.array([3.0], dtype=np_default_dtype)) return model diff --git a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py index fca05afa5..d07f5d275 100644 --- a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py +++ b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py @@ -82,7 +82,7 @@ def test_move_maxpool_past_multithreshold(): model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) - model.set_initializer("thres1", np.array([[0]])) + model.set_initializer("thres1", np.array([[0]], dtype=np.float32)) model.set_initializer( "thres2", get_multithreshold_rand_params(*thres2_shape, seed=0) ) -- GitLab