Skip to content
Snippets Groups Projects
Commit 5c5dc983 authored by auphelia's avatar auphelia
Browse files

Merge branch 'dev' into feature/exec_hlslib_nodes

parents bfeffc65 bfb87861
No related branches found
No related tags found
No related merge requests found
......@@ -6,11 +6,15 @@ from finn.core.datatype import DataType
from finn.core.utils import pack_innermost_dim_as_hex_string
def numpy_to_hls_code(ndarray, dtype, hls_var_name, pack_innermost_dim=True):
def numpy_to_hls_code(
ndarray, dtype, hls_var_name, pack_innermost_dim=True, no_decl=False
):
"""Return C++ code representation of a numpy ndarray with FINN DataType
dtype, using hls_var_name as the resulting C++ variable name. If
pack_innermost_dim is specified, the innermost dimension of the ndarray
will be packed into a hex string using array2hexstring.
will be packed into a hex string using array2hexstring. If no_decl is
set to True, no variable name and type will be generated as part of the
emitted string.
"""
hls_dtype = dtype.get_hls_datatype_str()
if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
......@@ -47,5 +51,8 @@ def numpy_to_hls_code(ndarray, dtype, hls_var_name, pack_innermost_dim=True):
strarr = np.array2string(ndarray, separator=", ", formatter={"all": elem2str})
np.set_printoptions(**orig_printops)
strarr = strarr.replace("[", "{").replace("]", "}")
ret = ret + " = \n" + strarr + ";"
if no_decl:
ret = strarr + ";"
else:
ret = ret + " = \n" + strarr + ";"
return ret
File added
import os
import pkg_resources as pk
import brevitas.onnx as bo
import numpy as np
import torch
from models.CNV import CNV
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.infer_shapes import InferShapes
export_onnx_path = "test_output_cnv.onnx"
# TODO get from config instead, hardcoded to Docker path for now
trained_cnv_checkpoint = (
"/workspace/brevitas_cnv_lfc/pretrained_models/CNV_1W1A/checkpoints/best.tar"
)
def test_brevitas_cnv_w1a1_export():
cnv = CNV(weight_bit_width=1, act_bit_width=1, in_bit_width=1, in_ch=3).eval()
bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path)
model = ModelWrapper(export_onnx_path)
assert model.graph.node[2].op_type == "Sign"
assert model.graph.node[3].op_type == "Conv"
conv0_wname = model.graph.node[3].input[1]
assert list(model.get_initializer(conv0_wname).shape) == [64, 3, 3, 3]
assert model.graph.node[4].op_type == "Mul"
os.remove(export_onnx_path)
def test_brevitas_cnv_w1a1_export_exec():
cnv = CNV(weight_bit_width=1, act_bit_width=1, in_bit_width=1, in_ch=3).eval()
checkpoint = torch.load(trained_cnv_checkpoint, map_location="cpu")
cnv.load_state_dict(checkpoint["state_dict"])
bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model.save(export_onnx_path)
fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
input_tensor = np.load(fn)["arr_0"].astype(np.float32)
assert input_tensor.shape == (1, 3, 32, 32)
# run using FINN-based execution
input_dict = {"0": input_tensor}
output_dict = oxe.execute_onnx(model, input_dict)
produced = output_dict[list(output_dict.keys())[0]]
# do forward pass in PyTorch/Brevitas
input_tensor = torch.from_numpy(input_tensor).float()
expected = cnv.forward(input_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
os.remove(export_onnx_path)
def test_brevitas_trained_cnv_w1a1_pytorch():
# load pretrained weights into CNV-w1a1
cnv = CNV(weight_bit_width=1, act_bit_width=1, in_bit_width=1, in_ch=3).eval()
checkpoint = torch.load(trained_cnv_checkpoint, map_location="cpu")
cnv.load_state_dict(checkpoint["state_dict"])
fn = pk.resource_filename("finn", "data/cifar10/cifar10-test-data-class3.npz")
input_tensor = np.load(fn)["arr_0"]
input_tensor = torch.from_numpy(input_tensor).float()
assert input_tensor.shape == (1, 3, 32, 32)
# do forward pass in PyTorch/Brevitas
produced = cnv.forward(input_tensor).detach().numpy()
expected = np.asarray(
[
[
3.7939777,
-2.3108773,
0.06898145,
0.55185133,
0.37939775,
-1.9659703,
-0.3104164,
-2.828238,
2.6902752,
0.48286998,
]
],
dtype=np.float32,
)
assert np.isclose(produced, expected, atol=1e-3).all()
......@@ -41,3 +41,7 @@ def test_numpy_to_hls_code():
{{ap_uint<4>("f", 16), ap_uint<4>("f", 16)},
{ap_uint<4>("7", 16), ap_uint<4>("d", 16)}};"""
assert remove_all_whitespace(ret) == remove_all_whitespace(eB)
ret = numpy_to_hls_code(B, DataType.UINT2, "test", True, True)
eB = """{{ap_uint<4>("f", 16), ap_uint<4>("f", 16)},
{ap_uint<4>("7", 16), ap_uint<4>("d", 16)}};"""
assert remove_all_whitespace(ret) == remove_all_whitespace(eB)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment