Skip to content
Snippets Groups Projects
Commit 1b447c76 authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[Test] add test_convert_to_hls_layers_tfc_w1a2

parent e4b82b6a
No related branches found
No related tags found
No related merge requests found
......@@ -27,8 +27,8 @@ export_onnx_path = "test_output_tfc.onnx"
def test_convert_to_hls_layers_tfc_w1a1():
lfc = get_test_model_trained("TFC", 1, 1)
bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
tfc = get_test_model_trained("TFC", 1, 1)
bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
......@@ -91,7 +91,7 @@ def test_convert_to_hls_layers_tfc_w1a1():
input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
assert input_tensor.shape == (1, 1, 28, 28)
# do forward pass in PyTorch/Brevitas
expected = lfc.forward(input_tensor).detach().numpy()
expected = tfc.forward(input_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
......@@ -129,38 +129,34 @@ def test_convert_to_hls_layers_tfc_w1a2():
assert fc3.op_type == "StreamingFCLayer_Batch"
assert model.get_tensor_shape(fc3.input[0]) == [1, 64]
assert model.get_tensor_shape(fc3.input[1]) == [64, 10]
os.remove(export_onnx_path)
fc0w = getCustomOp(fc0)
fc0w.set_nodeattr("SIMD", 784)
fc0w.set_nodeattr("PE", 16)
fc1w = getCustomOp(fc1)
fc1w.set_nodeattr("SIMD", 16)
fc1w.set_nodeattr("PE", 16)
fc2w = getCustomOp(fc2)
fc2w.set_nodeattr("SIMD", 16)
fc2w.set_nodeattr("PE", 16)
fc3w = getCustomOp(fc3)
fc3w.set_nodeattr("SIMD", 16)
fc3w.set_nodeattr("PE", 10)
model = model.transform(CodeGen_npysim())
model = model.transform(Compile())
model = model.transform(SetExecMode("npysim"))
# model.save("tfc.onnx")
raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
input_tensor = onnx.load_tensor_from_string(raw_i)
# run using FINN-based execution
input_dict = {"global_in": nph.to_array(input_tensor)}
output_dict = oxe.execute_onnx(model, input_dict)
produced = output_dict[list(output_dict.keys())[0]]
# run using PyTorch/Brevitas
input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
assert input_tensor.shape == (1, 1, 28, 28)
# do forward pass in PyTorch/Brevitas
expected = tfc.forward(input_tensor).detach().numpy()
output_dict = oxe.execute_onnx(model, input_dict, True)
produced = output_dict[model.graph.output[0].name]
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model = model.transform(Streamline())
golden_output_dict = oxe.execute_onnx(model, input_dict, True)
expected = golden_output_dict[model.graph.output[0].name]
assert np.isclose(produced, expected, atol=1e-3).all()
os.remove(export_onnx_path)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment