Skip to content
Snippets Groups Projects
Commit c2e51d31 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

Merge branch 'feature/onnx_export_quant_avg_pool' of...

Merge branch 'feature/onnx_export_quant_avg_pool' of https://github.com/Xilinx/finn into feature/onnx_export_quant_avg_pool
parents e0f7e034 7102f289
No related branches found
No related tags found
No related merge requests found
......@@ -106,6 +106,14 @@ def get_finn_root():
)
def get_execution_error_thresh():
"Return the max error that is allowed for rounding in FINN execution."
try:
return float(os.environ["ERROR_THRESH"])
except KeyError:
return 1e-2
def make_build_dir(prefix=""):
"""Creates a temporary folder with given prefix to be used as a build dir.
Use this function instead of tempfile.mkdtemp to ensure any generated files
......@@ -305,7 +313,7 @@ def sanitize_quant_values(model, node_tensors, execution_context, check_values=F
)
# check if rounded values are not too far from original values
max_error = max(np.abs(current_values - updated_values).flatten())
if max_error <= 1e-4:
if max_error <= get_execution_error_thresh():
if check_values is True:
# check again if values can now be represented with set finn datatype
# TODO: vectorize with numpy
......
......@@ -55,10 +55,9 @@ def test_brevitas_avg_pool_export(
prefix = "INT"
else:
prefix = "UINT"
dt_name = prefix + str(input_bit_width)
dt_name = prefix + str(input_bit_width // 2)
dtype = DataType[dt_name]
model = model.transform(InferShapes())
model.set_tensor_datatype(model.graph.input[0].name, dtype)
model = model.transform(InferDataTypes())
# execution with input tensor using integers and scale = 1
......@@ -91,6 +90,7 @@ def test_brevitas_avg_pool_export(
bo.export_finn_onnx(b_avgpool, ishape, export_onnx_path, input_t=input_quant_tensor)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
b_avgpool.eval()
expected = b_avgpool.forward(input_quant_tensor).tensor.detach().numpy()
# finn execution
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment