diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 073d6620ac3c2a4f62ac544e74ecf21b6e36d58f..ad91013a2e796a60df30bd42595d41e46c1c2ee6 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -59,7 +59,7 @@ def get_smallest_possible(vals): for k in DataType.__members__: dt = DataType[k] - if dt in [DataType.BIPOLAR, DataType.TERNARY, DataType.FLOAT32]: + if dt in [DataType["BIPOLAR"], DataType["TERNARY"], DataType["FLOAT32"]]: # not currently supported continue @@ -75,9 +75,9 @@ def get_smallest_possible(vals): ) if (0 <= vals).all(): - return DataType.UINT64 + return DataType["UINT64"] else: - return DataType.INT64 + return DataType["INT64"] class ChannelwiseOp_Batch(HLSCustomOp): @@ -347,8 +347,8 @@ class ChannelwiseOp_Batch(HLSCustomOp): ) # get input data type export_idt = self.get_input_datatype() - if self.get_input_datatype() == DataType.BIPOLAR: - export_idt = DataType.BINARY + if self.get_input_datatype() == DataType["BIPOLAR"]: + export_idt = DataType["BINARY"] idt_hls = export_idt.get_hls_datatype_str() # write parameters into params.h @@ -356,8 +356,8 @@ class ChannelwiseOp_Batch(HLSCustomOp): pdt_hls = pdt.get_hls_datatype_str() # use binary to export bipolar activations export_odt = self.get_output_datatype() - if self.get_output_datatype() == DataType.BIPOLAR: - export_odt = DataType.BINARY + if self.get_output_datatype() == DataType["BIPOLAR"]: + export_odt = DataType["BINARY"] odt_hls = export_odt.get_hls_datatype_str() # get desired function func = self.get_nodeattr("Func") @@ -438,7 +438,7 @@ class ChannelwiseOp_Batch(HLSCustomOp): # load output npy file super().npy_to_dynamic_output(context) # reinterpret binary output as bipolar where needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out @@ -546,9 +546,9 @@ class ChannelwiseOp_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 9ec7bc662d95b1c94ca17bc3c9a1a7b6199cc18a..19732e44398665cfd9b97f9a1abcec56372e2523 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -319,10 +319,10 @@ class ConvolutionInputGenerator(HLSCustomOp): inp.shape == exp_ishape ), """Input shape doesn't match expected shape (1, ifm_dim_h, ifm_dim_w, ifm_ch).""" - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # reshape input into folded form @@ -370,7 +370,7 @@ class ConvolutionInputGenerator(HLSCustomOp): ) ) # binary -> bipolar if needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out @@ -404,9 +404,9 @@ class ConvolutionInputGenerator(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -465,9 +465,9 @@ class ConvolutionInputGenerator(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index b428210acfd70186f68e7f1b35cfcd945d0a77d9..1f9fcade0f974df2b2f21171d42e63f4af5e7eac 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -345,10 +345,10 @@ class ConvolutionInputGenerator1D(HLSCustomOp): inp.shape == exp_ishape ), """Input shape doesn't match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # reshape input into folded form @@ -396,7 +396,7 @@ class ConvolutionInputGenerator1D(HLSCustomOp): ) ) # binary -> bipolar if needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out @@ -502,9 +502,9 @@ class ConvolutionInputGenerator1D(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -572,9 +572,9 @@ class ConvolutionInputGenerator1D(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index 2313ab28b41668b93a55298aa2b589dac999070e..e8948c322a2543ee8ecbf682ee4a7989270ee3ad 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -164,9 +164,9 @@ class DownSampler(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -197,9 +197,9 @@ class DownSampler(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index ca0b2f12ab6e84bab0b87e5a34917619c2ba289d..03d3436346aa716e9ea9e49027fdbb17bee74311 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -210,9 +210,9 @@ class FMPadding_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -261,9 +261,9 @@ class FMPadding_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index cef964acd5192ad254e1086dacead590b51e7ec1..f4638e6de3616c568da295f091a1ad39262e6dd8 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -235,9 +235,9 @@ class Pool_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -296,9 +296,9 @@ class Pool_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 67e3cd36549d34cab55a931cc040fee5d14ca06f..11809b9bc267d00c8cd630163cc969187efc7417 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -228,9 +228,9 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -262,9 +262,9 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -331,10 +331,10 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): exp_shape ), "Input shape does not match expected shape." - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # reshape input into folded shape @@ -377,7 +377,7 @@ class StreamingDataWidthConverter_Batch(HLSCustomOp): ) ) # binary -> bipolar if needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index 96594d441345332bbe5873570156e07cacbb385d..968c9a6bad0364813c4e70829628da3d07152fbd 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -509,15 +509,15 @@ class StreamingFCLayer_Batch(HLSCustomOp): ret = dict() inp_hls_str = self.get_input_datatype().get_hls_datatype_str() out_hls_str = self.get_output_datatype().get_hls_datatype_str() - inp_is_binary = self.get_input_datatype() == DataType.BINARY - # out_is_binary = self.get_output_datatype() == DataType.BINARY - wt_is_binary = self.get_weight_datatype() == DataType.BINARY + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + # out_is_binary = self.get_output_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 if (inp_is_binary or wt_is_binary) and (not bin_xnor_mode): raise Exception("True binary (non-bipolar) inputs not yet supported") - inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR - # out_is_bipolar = self.get_output_datatype() == DataType.BIPOLAR - wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + # out_is_bipolar = self.get_output_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] # reinterpret inp/wt as bipolar if bin_xnor_mode is iset inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) @@ -567,7 +567,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): # ONNX uses (in_features, out_features) and matmul(x, W) # finn-hlslib uses (out_features, in_features) and matmul(W, x) ret = orig_weight_matrix.T - if self.get_weight_datatype() == DataType.BIPOLAR: + if self.get_weight_datatype() == DataType["BIPOLAR"]: # convert bipolar to binary ret = (ret + 1) / 2 # interleave rows between PEs and reshape @@ -658,11 +658,11 @@ class StreamingFCLayer_Batch(HLSCustomOp): ), """Threshold matrix dimension is not as expected (2).""" n_thres_steps = orig_thres_matrix.shape[1] - inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR - wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] # reinterpret inp/wt as bipolar if bin_xnor_mode is iset - inp_is_binary = self.get_input_datatype() == DataType.BINARY - wt_is_binary = self.get_weight_datatype() == DataType.BINARY + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) @@ -717,8 +717,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): export_wdt = self.get_weight_datatype() # we have converted bipolar weights to binary for export, # so use it as such for weight generation - if self.get_weight_datatype() == DataType.BIPOLAR: - export_wdt = DataType.BINARY + if self.get_weight_datatype() == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] if weight_file_mode == "hls_header": weight_hls_code = numpy_to_hls_code( weight_tensor, export_wdt, "weights", True, True @@ -847,11 +847,11 @@ class StreamingFCLayer_Batch(HLSCustomOp): if thresholds is not None: threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) # use UINT32 threshold export for bipolar times bipolar - inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR - wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] # reinterpret inp/wt as bipolar if bin_xnor_mode is iset - inp_is_binary = self.get_input_datatype() == DataType.BINARY - wt_is_binary = self.get_weight_datatype() == DataType.BINARY + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) @@ -872,8 +872,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): tdt_hls = tdt.get_hls_datatype_str() # use binary to export bipolar activations export_odt = self.get_output_datatype() - if self.get_output_datatype() == DataType.BIPOLAR: - export_odt = DataType.BINARY + if self.get_output_datatype() == DataType["BIPOLAR"]: + export_odt = DataType["BINARY"] odt_hls = export_odt.get_hls_datatype_str() f_thresh.write( "static ThresholdsActivation<{},{},{},{},{},{},{}> threshs \ @@ -921,10 +921,10 @@ class StreamingFCLayer_Batch(HLSCustomOp): not float32 as expected.""" expected_inp_shape = self.get_folded_input_shape() reshaped_input = context[inputs].reshape(expected_inp_shape) - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary reshaped_input = (reshaped_input + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # make copy before saving the array @@ -943,7 +943,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): # load output npy file super().npy_to_dynamic_output(context) # reinterpret binary output as bipolar where needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out @@ -966,8 +966,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): export_wdt = self.get_weight_datatype() # we have converted bipolar weights to binary for export, # so use it as such for weight generation - if self.get_weight_datatype() == DataType.BIPOLAR: - export_wdt = DataType.BINARY + if self.get_weight_datatype() == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] wei = npy_to_rtlsim_input( "{}/weights.npy".format(code_gen_dir), export_wdt, wnbits ) @@ -1058,9 +1058,9 @@ class StreamingFCLayer_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -1134,8 +1134,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): ] elif mem_mode == "decoupled" or mem_mode == "external": wdt = self.get_weight_datatype() - if wdt == DataType.BIPOLAR: - export_wdt = DataType.BINARY + if wdt == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] else: export_wdt = wdt wdtype_hls_str = export_wdt.get_hls_datatype_str() @@ -1160,9 +1160,9 @@ class StreamingFCLayer_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 9653d698f54e2b97c66bff62c0b3c11057b36aad..c8ae83cc90e1199831da5286d5051fdb969e825a 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -261,10 +261,10 @@ class StreamingFIFO(HLSCustomOp): not float32 as expected.""" expected_inp_shape = self.get_folded_input_shape() reshaped_input = inp.reshape(expected_inp_shape) - if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR: + if DataType[self.get_nodeattr("dataType")] == DataType["BIPOLAR"]: # store bipolar activations as binary reshaped_input = (reshaped_input + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = DataType[self.get_nodeattr("dataType")] # make copy before saving the array diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 19a42fe2d6b53879d401ec8bd462ddd59623dc1e..87ecde8f9c7c50b7a22213db2b856d7439050421 100644 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -205,9 +205,9 @@ class StreamingMaxPool_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -255,9 +255,9 @@ class StreamingMaxPool_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -323,10 +323,10 @@ class StreamingMaxPool_Batch(HLSCustomOp): inp.shape == exp_ishape ), """Input shape doesn't match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # no reshaping for input since assuming no folding on input @@ -373,7 +373,7 @@ class StreamingMaxPool_Batch(HLSCustomOp): ) ) # binary -> bipolar if needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 7fb7634dc22e1d00569e7bb755bf120d6de4f808..81ce7fe8c5730c774f80d2b1640d5241e02b0b85 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -390,8 +390,8 @@ class Thresholding_Batch(HLSCustomOp): tdt_hls = tdt.get_hls_datatype_str() # use binary to export bipolar activations export_odt = self.get_output_datatype() - if self.get_output_datatype() == DataType.BIPOLAR: - export_odt = DataType.BINARY + if self.get_output_datatype() == DataType["BIPOLAR"]: + export_odt = DataType["BINARY"] odt_hls = export_odt.get_hls_datatype_str() f_thresh.write( "static ThresholdsActivation<{},{},{},{},{},{},{}> threshs \ @@ -515,10 +515,10 @@ class Thresholding_Batch(HLSCustomOp): not float32 as expected.""" expected_inp_shape = self.get_folded_input_shape() reshaped_input = context[inputs].reshape(expected_inp_shape) - if self.get_input_datatype() == DataType.BIPOLAR: + if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary reshaped_input = (reshaped_input + 1) / 2 - export_idt = DataType.BINARY + export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() # make copy before saving the array @@ -537,7 +537,7 @@ class Thresholding_Batch(HLSCustomOp): # load output npy file super().npy_to_dynamic_output(context) # reinterpret binary output as bipolar where needed - if self.get_output_datatype() == DataType.BIPOLAR: + if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] out = 2 * out - 1 context[node.output[0]] = out @@ -711,9 +711,9 @@ class Thresholding_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index e8aa09b1c0754b68e37d01551afe90811f22e7cd..8331610dc11bb440e6bb56924bbf8efeed2653c2 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -147,9 +147,9 @@ class UpsampleNearestNeighbour_Batch(HLSCustomOp): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -180,9 +180,9 @@ class UpsampleNearestNeighbour_Batch(HLSCustomOp): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() - if dtype == DataType.BIPOLAR: + if dtype == DataType["BIPOLAR"]: # use binary for bipolar storage - dtype = DataType.BINARY + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py index 9fc133b9bc09ca556df5779970d68d4b62131659..fa990f28087c0ec47a071e85ed1af6ff9328f70f 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py @@ -236,8 +236,8 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): ret = dict() inp_hls_str = self.get_input_datatype().get_hls_datatype_str() out_hls_str = self.get_output_datatype().get_hls_datatype_str() - inp_is_bipolar = self.get_input_datatype() == DataType.BIPOLAR - wt_is_bipolar = self.get_weight_datatype() == DataType.BIPOLAR + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] # fill in TSrcI and TWeightI # TODO handle bipolar inputs if inp_is_bipolar or wt_is_bipolar: diff --git a/src/finn/qnn-data/templates/driver/driver_base.py b/src/finn/qnn-data/templates/driver/driver_base.py index df1ab15c7892d66a668d82040b0da93366942cb7..b6dd8350809a33ab5dad3e21b0f52f41cbe872ec 100644 --- a/src/finn/qnn-data/templates/driver/driver_base.py +++ b/src/finn/qnn-data/templates/driver/driver_base.py @@ -456,9 +456,9 @@ class FINNExampleOverlay(Overlay): # also benchmark driver-related overheads input_npy = gen_finn_dt_tensor(self.idt(), self.ishape_normal()) # provide as int8/uint8 to support fast packing path where possible - if self.idt() == DataType.UINT8: + if self.idt() == DataType["UINT8"]: input_npy = input_npy.astype(np.uint8) - elif self.idt() == DataType.INT8: + elif self.idt() == DataType["INT8"]: input_npy = input_npy.astype(np.int8) start = time.time() ibuf_folded = self.fold_input(input_npy) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 3cb193055f3a455d95f7735ab38b2601809dbabd..4ab9c8fe27fce2a71cd9e21e6a94c1eb706cc47a 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -589,18 +589,18 @@ class InferBinaryStreamingFCLayer(Transformation): mm_output = n.output[0] mm_in_shape = model.get_tensor_shape(mm_input) mm_out_shape = model.get_tensor_shape(mm_output) - assert model.get_tensor_datatype(mm_input) == DataType.BINARY, ( + assert model.get_tensor_datatype(mm_input) == DataType["BINARY"], ( n.name + """: First input for xnorpopcount is not set to FINN DataType BINARY.""" ) - assert model.get_tensor_datatype(mm_weight) == DataType.BINARY, ( + assert model.get_tensor_datatype(mm_weight) == DataType["BINARY"], ( n.name + """: Second input (weights) for xnorpopcount is not set to FINN DataType BINARY.""" ) - idt = DataType.BINARY - wdt = DataType.BINARY + idt = DataType["BINARY"] + wdt = DataType["BINARY"] mm_output = n.output[0] W = model.get_initializer(mm_weight) # extract weight shape, note that ONNX and finn-hlslib @@ -766,7 +766,7 @@ class InferQuantizedStreamingFCLayer(Transformation): + ": out_bias must be integer for HLS conversion." ) actval = int(actval) - odt_is_bipolar = odt == DataType.BIPOLAR + odt_is_bipolar = odt == DataType["BIPOLAR"] bipolar_ok = ( odt_is_bipolar and (scale == 2.0) and (actval == -1) ) @@ -1254,7 +1254,7 @@ class InferChannelwiseLinearLayer(Transformation): for k in DataType.__members__: dt = DataType[k] - if dt in [DataType.BIPOLAR, DataType.TERNARY, DataType.FLOAT32]: + if dt in [DataType["BIPOLAR"], DataType["TERNARY"], DataType["FLOAT32"]]: # not currently supported continue @@ -1270,9 +1270,9 @@ class InferChannelwiseLinearLayer(Transformation): ) if (0 <= vals).all(): - return DataType.UINT64 + return DataType["UINT64"] else: - return DataType.INT64 + return DataType["INT64"] def apply(self, model): graph = model.graph diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index cf712b38054c78c6e414ad914ab67378daec5d12..cba9648187ffadcff048b045f0c85c81c770e44d 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -205,7 +205,7 @@ class FactorOutMulSignMagnitude(Transformation): actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape))) is_1d = actual_ndims == 1 is_not_bipolar = ( - model.get_tensor_datatype(mul_weight_name) != DataType.BIPOLAR + model.get_tensor_datatype(mul_weight_name) != DataType["BIPOLAR"] ) is_signed = (A < 0).any() if is_signed and (is_scalar or is_1d) and is_not_bipolar: @@ -217,7 +217,7 @@ class FactorOutMulSignMagnitude(Transformation): # create new mul node with sign(A) as the operand sgn = np.sign(A) model.set_initializer(sign_mul_param_name, sgn) - model.set_tensor_datatype(sign_mul_param_name, DataType.BIPOLAR) + model.set_tensor_datatype(sign_mul_param_name, DataType["BIPOLAR"]) # replace original mul weight by magnitudes model.set_initializer(mul_weight_name, np.abs(A)) new_mul = oh.make_node( @@ -457,7 +457,7 @@ class AbsorbScalarMulAddIntoTopK(Transformation): graph.node.remove(prod) n.input[0] = prod_input # to avoid error the dataype is set to float32 - model.set_tensor_datatype(n.input[0], DataType.FLOAT32) + model.set_tensor_datatype(n.input[0], DataType["FLOAT32"]) graph_modified = True if graph_modified: model = model.transform(InferShapes()) diff --git a/src/finn/transformation/streamline/collapse_repeated.py b/src/finn/transformation/streamline/collapse_repeated.py index 50265046d94db1e7233a45b934fd68f08431a95d..92c48c84ffa1a161f623ef6b22caaeb92f4a8199 100644 --- a/src/finn/transformation/streamline/collapse_repeated.py +++ b/src/finn/transformation/streamline/collapse_repeated.py @@ -85,8 +85,8 @@ class CollapseRepeatedOp(Transformation): # replace parameter value model.set_initializer(new_node_param_name, new_param) # be conservative with param/output DataTypes - model.set_tensor_datatype(new_node_param_name, DataType.FLOAT32) - model.set_tensor_datatype(end_name, DataType.FLOAT32) + model.set_tensor_datatype(new_node_param_name, DataType["FLOAT32"]) + model.set_tensor_datatype(end_name, DataType["FLOAT32"]) # remove old nodes graph.node.remove(n) graph.node.remove(consumer) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index cd44e115eed42d1c0529b86a49e8855ff7c492ce..416755ae21002716e1535094d58aa8593ce3221a 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -408,16 +408,16 @@ class MoveMulPastDWConv(Transformation): # rewire mul input to be conv input conv_node.input[0] = start_name model.set_tensor_shape(start_name, conv_in_shape) - model.set_tensor_datatype(start_name, DataType.FLOAT32) + model.set_tensor_datatype(start_name, DataType["FLOAT32"]) # use old conv input tensor as conv output conv_node.output[0] = conv_in_name model.set_tensor_shape(conv_in_name, conv_out_shape) - model.set_tensor_datatype(conv_in_name, DataType.FLOAT32) + model.set_tensor_datatype(conv_in_name, DataType["FLOAT32"]) # use new conv output as new mul node input mul_node.input[0] = conv_in_name # use old conv output as new mul node output mul_node.output[0] = conv_out_name - model.set_tensor_datatype(conv_out_name, DataType.FLOAT32) + model.set_tensor_datatype(conv_out_name, DataType["FLOAT32"]) # move mul node past conv node graph.node.remove(mul_node) graph.node.insert(node_ind, mul_node) @@ -482,16 +482,16 @@ class MoveMulPastMaxPool(Transformation): # rewire mul input to be maxpool input maxpool_node.input[0] = start_name model.set_tensor_shape(start_name, maxpool_in_shape) - model.set_tensor_datatype(start_name, DataType.FLOAT32) + model.set_tensor_datatype(start_name, DataType["FLOAT32"]) # use old maxpool input tensor as maxpool output maxpool_node.output[0] = maxpool_in_name model.set_tensor_shape(maxpool_in_name, maxpool_out_shape) - model.set_tensor_datatype(maxpool_in_name, DataType.FLOAT32) + model.set_tensor_datatype(maxpool_in_name, DataType["FLOAT32"]) # use new maxpool output as new mul node input mul_node.input[0] = maxpool_in_name # use old maxpool output as new mul node output mul_node.output[0] = maxpool_out_name - model.set_tensor_datatype(maxpool_out_name, DataType.FLOAT32) + model.set_tensor_datatype(maxpool_out_name, DataType["FLOAT32"]) # move mul node past maxpool node graph.node.remove(mul_node) graph.node.insert(node_ind, mul_node) @@ -638,7 +638,7 @@ class MoveScalarLinearPastInvariants(Transformation): model.set_tensor_shape(n.output[0], out_shape) model.set_tensor_shape(prod0.output[0], out_shape) model.set_tensor_datatype(prod0.output[0], scalar_op_odt) - model.set_tensor_datatype(n.output[0], DataType.FLOAT32) + model.set_tensor_datatype(n.output[0], DataType["FLOAT32"]) graph.node.remove(prod0) graph.node.insert(node_ind - 1, prod0) graph_modified = True diff --git a/src/finn/transformation/streamline/sign_to_thres.py b/src/finn/transformation/streamline/sign_to_thres.py index 13f2e8524af7ce2d3457d0637f1c6d02733f504b..61d7eb35430262b1ee90dfa478076fb6f7556612 100644 --- a/src/finn/transformation/streamline/sign_to_thres.py +++ b/src/finn/transformation/streamline/sign_to_thres.py @@ -69,6 +69,6 @@ class ConvertSignToThres(Transformation): graph.node.insert(node_ind, mt_node) graph.node.remove(n) # add quantization annotations - model.set_tensor_datatype(sign_out_name, DataType.BIPOLAR) + model.set_tensor_datatype(sign_out_name, DataType["BIPOLAR"]) graph_modified = True return (model, graph_modified) diff --git a/src/finn/util/create.py b/src/finn/util/create.py index d9c5d7b1b59916edfc8730992535f3ddb57c4d60..62229a69b68c26dd191b3e1d4a44f1bb8b19ed07 100644 --- a/src/finn/util/create.py +++ b/src/finn/util/create.py @@ -49,10 +49,10 @@ def hls_random_mlp_maker(layer_spec): # no activation, produce accumulators T = None tdt = None - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - odt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] else: - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act (min, max) = calculate_signed_dot_prod_range(idt, wdt, mw) @@ -61,13 +61,13 @@ def hls_random_mlp_maker(layer_spec): # provide non-decreasing thresholds T = np.sort(T, axis=1) # generate thresholds for activation - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - tdt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + tdt = DataType["UINT32"] # bias thresholds to be positive T = np.ceil((T + mw) / 2) assert (T >= 0).all() else: - tdt = DataType.INT32 + tdt = DataType["INT32"] lyr["T"] = T lyr["tdt"] = tdt lyr["odt"] = odt @@ -120,11 +120,11 @@ def hls_mlp_maker(layer_spec): # StreamingFC: # - specify their datatypes as such # - specify their datatypes as BINARY as use binaryXnorMode - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # we'll internally convert weights/inputs to binary and specify the # datatypes as such, and also set the binaryXnorMode attribute to 1 - export_wdt = DataType.BINARY - export_idt = DataType.BINARY + export_wdt = DataType["BINARY"] + export_idt = DataType["BINARY"] binary_xnor_mode = 1 else: export_wdt = wdt @@ -134,7 +134,7 @@ def hls_mlp_maker(layer_spec): if T is not None: no_act = 0 node_inp_list = [current_in_name, current_W_name, current_T_name] - if odt == DataType.BIPOLAR: + if odt == DataType["BIPOLAR"]: actval = 0 else: actval = odt.min() diff --git a/tests/brevitas/test_brevitas_QConv2d.py b/tests/brevitas/test_brevitas_QConv2d.py index c1f790946bfa5f53194b96b1fea9c1722797a4a0..9de2efbcee627384cb76d1e05d0495cf6b40b169 100644 --- a/tests/brevitas/test_brevitas_QConv2d.py +++ b/tests/brevitas/test_brevitas_QConv2d.py @@ -86,7 +86,7 @@ def test_brevitas_QConv2d(dw, bias, in_channels): weight_narrow_range=True, weight_scaling_min_val=2e-16, ) - weight_tensor = gen_finn_dt_tensor(DataType.INT4, w_shape) + weight_tensor = gen_finn_dt_tensor(DataType["INT4"], w_shape) b_conv.weight = torch.nn.Parameter(torch.from_numpy(weight_tensor).float()) b_conv.eval() bo.export_finn_onnx(b_conv, ishape, export_onnx_path) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index eb642adada9bd9abb8a328518770899d3da96ada..108c97c2e83b7f3ca9dd6ead746b3ef8b4d10af5 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -78,7 +78,9 @@ def test_brevitas_mobilenet(): bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 - preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType.UINT8) + preproc_model.set_tensor_datatype( + preproc_model.graph.input[0].name, DataType["UINT8"] + ) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py index 873866b37727730b7cedd035f5edd93f7c1afe32..67e4d04d066a2a1a6baf78429d91e724a9d80e7f 100644 --- a/tests/brevitas/test_brevitas_qlinear.py +++ b/tests/brevitas/test_brevitas_qlinear.py @@ -48,7 +48,7 @@ export_onnx_path = "test_brevitas_qlinear.onnx" @pytest.mark.parametrize("out_features", [4]) @pytest.mark.parametrize("in_features", [3]) @pytest.mark.parametrize("w_bits", [4]) -@pytest.mark.parametrize("i_dtype", [DataType.UINT4]) +@pytest.mark.parametrize("i_dtype", [DataType["UINT4"]]) def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype): i_shape = (1, in_features) w_shape = (out_features, in_features) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 6790485ceab373cd539727aefc59fa8999b3b192..14e10da86ec5cec14dfabdd0b4f6c2b92dd42519 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -352,7 +352,7 @@ class TestEnd2End: model = model.transform(MergeONNXModels(pre_model)) # add input quantization annotation: UINT8 for all BNN-PYNQ models global_inp_name = model.graph.input[0].name - model.set_tensor_datatype(global_inp_name, DataType.UINT8) + model.set_tensor_datatype(global_inp_name, DataType["UINT8"]) # postprocessing: insert Top-1 node at the end model = model.transform(InsertTopK(k=1)) chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post") diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 7b4cebb52b3e4758746d4054827c6f96e8a4d681..23a5d23f1a91798b834797b3a8ccc35d07b2e61a 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -141,9 +141,11 @@ def test_end2end_cybsec_mlp_export(): assert finn_model.graph.node[3].op_type == "MatMul" assert finn_model.graph.node[-1].op_type == "MultiThreshold" # verify datatypes on some tensors - assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType.BIPOLAR + assert ( + finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"] + ) first_matmul_w_name = finn_model.graph.node[3].input[1] - assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType.INT2 + assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"] @pytest.mark.slow diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index 760c77ea406386ce4886b21bcb042808984dcb7f..0d639ae084af257db61c85ff0b5c0d5101539b71 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -97,7 +97,9 @@ def test_end2end_mobilenet_export(): bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 - preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType.UINT8) + preproc_model.set_tensor_datatype( + preproc_model.graph.input[0].name, DataType["UINT8"] + ) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(FoldConstants()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py index 89fab37d6d5225383ccb13a748c83573d6ee4516..5ddff3d36f03d17833e17bc98649a64dabf31577 100644 --- a/tests/fpgadataflow/test_code_gen_trafo.py +++ b/tests/fpgadataflow/test_code_gen_trafo.py @@ -39,7 +39,7 @@ from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim @pytest.mark.vivado def test_code_gen_trafo(): - idt = wdt = odt = DataType.BIPOLAR + idt = wdt = odt = DataType["BIPOLAR"] mw = 8 mh = 8 pe = 4 diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py index 6284748b9ccdc422b42bd9e301eb395d8dd1ad45..81e2ff9a7c5829982cdb6121378e9e9e3af81632 100644 --- a/tests/fpgadataflow/test_compilation_trafo.py +++ b/tests/fpgadataflow/test_compilation_trafo.py @@ -40,7 +40,7 @@ from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim @pytest.mark.vivado def test_compilation_trafo(): - idt = wdt = odt = DataType.BIPOLAR + idt = wdt = odt = DataType["BIPOLAR"] mw = 8 mh = 8 pe = 4 diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py index 4e7030449c87b81d7a492b0e76dd05a047be3858..5cc5f8fa6c1ccd3e5a9e154b6fb2773caf4668a9 100644 --- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -72,7 +72,7 @@ from finn.util.basic import gen_finn_dt_tensor def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): pad, kernel_size, stride, dilation = conv_config np.random.seed(0) - idt = DataType.UINT4 + idt = DataType["UINT4"] in_feature_dim_h, in_feature_dim_w = [10, 1] in_chn = 16 @@ -101,7 +101,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): input_shape = [1, in_chn, in_feature_dim_h, in_feature_dim_w] output_shape = [1, out_chn, out_feature_dim_h, out_feature_dim_w] - conv_weight_dt = DataType.UINT4 + conv_weight_dt = DataType["UINT4"] conv_config = {} conv_config["dilations"] = [dilation_h, dilation_w] diff --git a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py index 8dd927fa7628d1500fe644b030278fbaa3f18810..bf690d1d68bc0f580663735c3596c1dfc0a651e8 100644 --- a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py @@ -76,9 +76,13 @@ def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape): # parameter datatype -@pytest.mark.parametrize("pdt", [DataType.BIPOLAR, DataType.UINT4, DataType.INT2]) +@pytest.mark.parametrize( + "pdt", [DataType["BIPOLAR"], DataType["UINT4"], DataType["INT2"]] +) # input datatype -@pytest.mark.parametrize("idt", [DataType.INT32, DataType.UINT4, DataType.INT4]) +@pytest.mark.parametrize( + "idt", [DataType["INT32"], DataType["UINT4"], DataType["INT4"]] +) # function @pytest.mark.parametrize("onnx_op_name", ["Add", "Mul"]) # vector parameter or scalar parameter (broadcast) @@ -103,10 +107,10 @@ def test_convert_to_hls_channelwise_layer( # Since the aren't Data types with a bit width of a non power of 2, # there are cases where the input won't use it full range. - if idt == DataType.INT32: - x = gen_finn_dt_tensor(DataType.INT16, (1, ifm_ch, ifm_dim, ifm_dim)) - elif idt == DataType.UINT32: - x = gen_finn_dt_tensor(DataType.UINT16, (1, ifm_ch, ifm_dim, ifm_dim)) + if idt == DataType["INT32"]: + x = gen_finn_dt_tensor(DataType["INT16"], (1, ifm_ch, ifm_dim, ifm_dim)) + elif idt == DataType["UINT32"]: + x = gen_finn_dt_tensor(DataType["UINT16"], (1, ifm_ch, ifm_dim, ifm_dim)) else: x = gen_finn_dt_tensor(idt, (1, ifm_ch, ifm_dim, ifm_dim)) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py index cf2903a5789d7d3892ac549338b274268c1661b3..9b0f3d68aed655f0b36857d50a085093ea94aecb 100755 --- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -79,10 +79,10 @@ def get_multithreshold_rand_params(channels, num_of_thres, seed=None): @pytest.mark.slow def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): np.random.seed(0) - idt = DataType.UINT4 - odt = DataType.UINT4 - conv_weight_dt = DataType.INT4 - fc_weight_dt = DataType.INT4 + idt = DataType["UINT4"] + odt = DataType["UINT4"] + conv_weight_dt = DataType["INT4"] + fc_weight_dt = DataType["INT4"] input_shape, kernel_shape, stride, pad = conv_config kernel_size_h, kernel_size_w = kernel_shape @@ -186,8 +186,8 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): model.set_tensor_datatype("global_out", odt) model.set_tensor_datatype("conv_param", conv_weight_dt) model.set_tensor_datatype("matmul_param", fc_weight_dt) - model.set_tensor_datatype("thres1_param", DataType.INT32) - model.set_tensor_datatype("thres2_param", DataType.INT32) + model.set_tensor_datatype("thres1_param", DataType["INT32"]) + model.set_tensor_datatype("thres2_param", DataType["INT32"]) model.set_initializer( "conv_param", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index deca7c96127fdf03d9feb7504d5a6daebb41a5d5..d96bc987567cdcfcd18a404986c954c7527c7354 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -63,7 +63,7 @@ from finn.util.basic import gen_finn_dt_tensor def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): kernel_size, stride, pad = conv_config np.random.seed(0) - idt = DataType.UINT4 + idt = DataType["UINT4"] in_feature_dim = 7 in_chn = 16 @@ -84,7 +84,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): input_shape = [1, in_chn, in_feature_dim, in_feature_dim] output_shape = [1, out_chn, out_feature_dim, out_feature_dim] - conv_weight_dt = DataType.UINT4 + conv_weight_dt = DataType["UINT4"] conv_config = {} conv_config["dilations"] = [1, 1] diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py index b0780c073114351ba136fefe6973114bd1a8505b..6089901566cb412e63cd8acc7a8260081248ba52 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py @@ -138,7 +138,7 @@ def make_model(ch, ifmdim): # data types -@pytest.mark.parametrize("idt", [DataType.UINT2]) +@pytest.mark.parametrize("idt", [DataType["UINT2"]]) # channels @pytest.mark.parametrize("ch", [16]) # ifmdim diff --git a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py index 70716e88a4de827be37416b63a925b30d01c342a..3efafc040df07a7d56638bf5ce0b1ce01887343c 100644 --- a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py +++ b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py @@ -118,9 +118,9 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType.UINT4, DataType.INT4, DataType.INT8]) +@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["INT4"], DataType["INT8"]]) # output datatype -@pytest.mark.parametrize("odt", [DataType.UINT4, DataType.INT4]) +@pytest.mark.parametrize("odt", [DataType["UINT4"], DataType["INT4"]]) # pool configuration: ( k,stride, pad, ifm_dim ) @pytest.mark.parametrize("pool_config", [(7, 7, 0, 7), (3, 2, 1, 5)]) # input channels diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index 75ce055c0e9a093a5ddeab6b13af8d36d6152fb8..633db668d3bc5de815a313743c06cd74a7166c9c 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -60,14 +60,14 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad=total_pad) if act is None: - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act out_act = oh.make_tensor_value_info( "out_act", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, ofm_ch] ) T = oh.make_tensor_value_info("T", TensorProto.FLOAT, [ofm_ch, 15]) - tdt = DataType.INT32 + tdt = DataType["INT32"] thresh_node = oh.make_node( "MultiThreshold", domain="finn.custom_op.general", @@ -161,7 +161,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): # PE @pytest.mark.parametrize("pe", [1, 2, 4]) # Output activation -@pytest.mark.parametrize("act", [None, DataType.UINT4]) +@pytest.mark.parametrize("act", [None, DataType["UINT4"]]) # kernel size @pytest.mark.parametrize("k", [2, 4]) # stride @@ -171,7 +171,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): @pytest.mark.slow @pytest.mark.vivado def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding): - idt = wdt = DataType.INT4 + idt = wdt = DataType["INT4"] ifm_dim = 6 ifm_ch = 4 @@ -203,7 +203,7 @@ def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding): # PE @pytest.mark.parametrize("pe", [1, 2, 4]) # Output activation -@pytest.mark.parametrize("act", [None, DataType.UINT4]) +@pytest.mark.parametrize("act", [None, DataType["UINT4"]]) # kernel size @pytest.mark.parametrize("k", [2, 4]) # stride @@ -213,7 +213,7 @@ def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding): @pytest.mark.slow @pytest.mark.vivado def test_depthwise_conv_hls_rtlsim(act, pe, k, stride, padding): - idt = wdt = DataType.INT4 + idt = wdt = DataType["INT4"] ifm_dim = 6 ifm_ch = 4 diff --git a/tests/fpgadataflow/test_fpgadataflow_addstreams.py b/tests/fpgadataflow/test_fpgadataflow_addstreams.py index 021d58b4a382f2fe3d1a2c3c2a4ce8d7f3c87ae5..8cbf54ec188b12c67e02a33e3540718e9b08f382 100644 --- a/tests/fpgadataflow/test_fpgadataflow_addstreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_addstreams.py @@ -82,7 +82,7 @@ def prepare_inputs(input1, input2): # data types -@pytest.mark.parametrize("idt", [DataType.UINT4, DataType.UINT8]) +@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]]) # channels @pytest.mark.parametrize("ch", [1, 64]) # folding diff --git a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py index 15bcd5fa8a937aa313f2c73f253f934f6bbd332b..949046d4ae313b852471e7d8a93e44fea48f7b0f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py +++ b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py @@ -85,11 +85,11 @@ def make_modelwrapper(C, pe, idt, odt, pdt, func, vecs): # activation: None or DataType -@pytest.mark.parametrize("act", [DataType.INT8]) +@pytest.mark.parametrize("act", [DataType["INT8"]]) # input datatype -@pytest.mark.parametrize("idt", [DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["INT4"]]) # param datatype -@pytest.mark.parametrize("pdt", [DataType.INT4]) +@pytest.mark.parametrize("pdt", [DataType["INT4"]]) # folding, -1 is maximum possible @pytest.mark.parametrize("nf", [-1, 2]) # number of input features diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 86622cf6d44dbda3af417283f5ceea1d1ebc3bf0..47cd7e7ba1df76cc793cd0946581239a6883874e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -131,7 +131,7 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT2]) +@pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["INT2"]]) # kernel size @pytest.mark.parametrize("k", [2, 3]) # input dimension diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py index b3d695469b7a4fa1f4235feee29e7fc3dece0df5..8440ac1fe46a0d1ea4db3d76489dfc4ce68ff642 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py @@ -144,8 +144,8 @@ def prepare_inputs(input_tensor): # input datatype -# @pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT8]) -@pytest.mark.parametrize("idt", [DataType.INT8]) +# @pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["INT8"]]) +@pytest.mark.parametrize("idt", [DataType["INT8"]]) # kernel size @pytest.mark.parametrize("k", [[4, 1]]) # input dimension diff --git a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py index 6b776e8827d8e76102bd069ae8567051ed0580ba..73bf1165afa9418be0c89f77797de538275fd220 100644 --- a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py @@ -85,7 +85,7 @@ def prepare_inputs(input_tensor, idt): # data type -@pytest.mark.parametrize("idt", [DataType.INT4, DataType.UINT16]) +@pytest.mark.parametrize("idt", [DataType["INT4"], DataType["UINT16"]]) # channels @pytest.mark.parametrize("ch", [64]) # folding diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index b0af4382383d8935c69e362b1a43db536979c784..248b591eb48d7cfd6f121738a9bca525c38a45f8 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -82,7 +82,7 @@ def prepare_inputs(input_tensor, dt): # outWidth @pytest.mark.parametrize("OUTWidth", [2, 4]) # finn_dtype -@pytest.mark.parametrize("finn_dtype", [DataType.BIPOLAR, DataType.INT2]) +@pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"], DataType["INT2"]]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype): diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_fclayer.py index 49c326d2a34e7262826505ae32f2509b42ae0a35..02c3a3dc9506152fe999873df0612e76a5c9cefd 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py +++ b/tests/fpgadataflow/test_fpgadataflow_fclayer.py @@ -59,11 +59,11 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non # StreamingFC: # - specify their datatypes as such # - specify their datatypes as BINARY as use binaryXnorMode - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # we'll internally convert weights/inputs to binary and specify the # datatypes as such, and also set the binaryXnorMode attribute to 1 - export_wdt = DataType.BINARY - export_idt = DataType.BINARY + export_wdt = DataType["BINARY"] + export_idt = DataType["BINARY"] binary_xnor_mode = 1 else: export_wdt = wdt @@ -75,7 +75,7 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non if T is not None: no_act = 0 node_inp_list = ["inp", "weights", "thresh"] - if odt == DataType.BIPOLAR: + if odt == DataType["BIPOLAR"]: actval = 0 else: actval = odt.min() @@ -123,7 +123,7 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non def prepare_inputs(input_tensor, idt, wdt): - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # convert bipolar to binary return {"inp": (input_tensor + 1) / 2} else: @@ -133,11 +133,11 @@ def prepare_inputs(input_tensor, idt, wdt): # mem_mode: const or decoupled @pytest.mark.parametrize("mem_mode", ["const", "decoupled", "external"]) # activation: None or DataType -@pytest.mark.parametrize("act", [None, DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("act", [None, DataType["BIPOLAR"], DataType["INT4"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("wdt", [DataType["BIPOLAR"], DataType["INT4"]]) # input datatype -@pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["INT4"]]) # neuron folding, -1 is maximum possible @pytest.mark.parametrize("nf", [-1, 2, 1]) # synapse folding, -1 is maximum possible @@ -165,10 +165,10 @@ def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # no activation, produce accumulators T = None tdt = None - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - odt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] else: - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act (min, max) = calculate_signed_dot_prod_range(idt, wdt, mw) @@ -177,13 +177,13 @@ def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # provide non-decreasing thresholds T = np.sort(T, axis=1) # generate thresholds for activation - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - tdt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + tdt = DataType["UINT32"] # bias thresholds to be positive T = np.ceil((T + mw) / 2) assert (T >= 0).all() else: - tdt = DataType.INT32 + tdt = DataType["INT32"] model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt) for node in model.graph.node: # lookup op_type in registry of CustomOps @@ -194,14 +194,14 @@ def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): model = model.transform(CompileCppSim()) # prepare input data input_dict = prepare_inputs(x, idt, wdt) - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # convert inputs to binary and use xnorpopcountmatmul y = xp.xnorpopcountmatmul((x + 1) / 2, (W + 1) / 2) else: y = np.matmul(x, W) if T is not None: y = multithreshold(y, T) - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar y = 2 * y - 1 else: @@ -220,11 +220,11 @@ def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # mem_mode: const or decoupled @pytest.mark.parametrize("mem_mode", ["const", "decoupled", "external"]) # activation: None or DataType -@pytest.mark.parametrize("act", [None, DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("act", [None, DataType["BIPOLAR"], DataType["INT4"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("wdt", [DataType["BIPOLAR"], DataType["INT4"]]) # input datatype -@pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["INT4"]]) # neuron folding, -1 is maximum possible @pytest.mark.parametrize("nf", [-1, 2, 1]) # synapse folding, -1 is maximum possible @@ -252,10 +252,10 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # no activation, produce accumulators T = None tdt = None - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - odt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] else: - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act (min, max) = calculate_signed_dot_prod_range(idt, wdt, mw) @@ -264,13 +264,13 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # provide non-decreasing thresholds T = np.sort(T, axis=1) # generate thresholds for activation - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - tdt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + tdt = DataType["UINT32"] # bias thresholds to be positive T = np.ceil((T + mw) / 2) assert (T >= 0).all() else: - tdt = DataType.INT32 + tdt = DataType["INT32"] model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt) for node in model.graph.node: # lookup op_type in registry of CustomOps @@ -279,14 +279,14 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # prepare input data input_dict = prepare_inputs(x, idt, wdt) - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # convert inputs to binary and use xnorpopcountmatmul y = xp.xnorpopcountmatmul((x + 1) / 2, (W + 1) / 2) else: y = np.matmul(x, W) if T is not None: y = multithreshold(y, T) - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar y = 2 * y - 1 else: @@ -319,11 +319,11 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): # mem_mode: const or decoupled @pytest.mark.parametrize("mem_mode", ["decoupled"]) # activation: None or DataType -@pytest.mark.parametrize("act", [DataType.INT4]) +@pytest.mark.parametrize("act", [DataType["INT4"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType.INT4]) +@pytest.mark.parametrize("wdt", [DataType["INT4"]]) # input datatype -@pytest.mark.parametrize("idt", [DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["INT4"]]) # neuron folding, -1 is maximum possible @pytest.mark.parametrize("nf", [-1]) # synapse folding, -1 is maximum possible @@ -352,10 +352,10 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( # no activation, produce accumulators T = None tdt = None - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - odt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] else: - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act (min, max) = calculate_signed_dot_prod_range(idt, wdt, mw) @@ -364,13 +364,13 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( # provide non-decreasing thresholds T = np.sort(T, axis=1) # generate thresholds for activation - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: - tdt = DataType.UINT32 + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + tdt = DataType["UINT32"] # bias thresholds to be positive T = np.ceil((T + mw) / 2) assert (T >= 0).all() else: - tdt = DataType.INT32 + tdt = DataType["INT32"] model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt) for node in model.graph.node: # lookup op_type in registry of CustomOps @@ -379,14 +379,14 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( # prepare input data input_dict = prepare_inputs(x, idt, wdt) - if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR: + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: # convert inputs to binary and use xnorpopcountmatmul y = xp.xnorpopcountmatmul((x + 1) / 2, (W + 1) / 2) else: y = np.matmul(x, W) if T is not None: y = multithreshold(y, T) - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar y = 2 * y - 1 else: diff --git a/tests/fpgadataflow/test_fpgadataflow_fifo.py b/tests/fpgadataflow/test_fpgadataflow_fifo.py index 81f66c42ca76d42fe8ee50576d72007f6ca6c12f..4d3074fe14617df4386f060b6a476734931fb4ca 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fifo.py +++ b/tests/fpgadataflow/test_fpgadataflow_fifo.py @@ -86,7 +86,7 @@ def prepare_inputs(input_tensor, dt): # outWidth @pytest.mark.parametrize("depth", [16]) # finn_dtype -@pytest.mark.parametrize("finn_dtype", [DataType.BIPOLAR]) # , DataType.INT2]) +@pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"]]) # , DataType["INT2"]]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_fifo_rtlsim(Shape, folded_shape, depth, finn_dtype): diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 5db12ee22828e43e276ed85f04f985653fe0a2dd..b564273c0927938859dc438dce619e7067a7ad74 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -108,7 +108,7 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty # PaddingStyle: selects behavior when (odim-idim)%2 != 0 @pytest.mark.parametrize("pad_style", [2]) # FINN input datatype -@pytest.mark.parametrize("idt", [DataType.INT2, DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]]) # execution mode @pytest.mark.parametrize("mode", ["cppsim", "rtlsim"]) @pytest.mark.slow diff --git a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py index f1373123a69f4c3d02b191c0f0560b59d2c9a7b2..2299cc6e8f397df718d2fd65be8a562c2457e42d 100644 --- a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py @@ -78,7 +78,7 @@ def prepare_inputs(input_tensor, idt): # data type -@pytest.mark.parametrize("idt", [DataType.UINT4, DataType.UINT16]) +@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT16"]]) # channels @pytest.mark.parametrize("ch", [64]) # folding @@ -127,7 +127,7 @@ def test_fpgadataflow_globalaccpool(idt, ch, fold, imdim, exec_mode): exp_cycles_dict = model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] # commented out, needs performance debug: - # test_fpgadataflow_globalaccpool[rtlsim-7-1-64-DataType.UINT4] + # test_fpgadataflow_globalaccpool[rtlsim-7-1-64-DataType["UINT4"]] # assert False where False = # <function isclose at 0x7eff26d5ca60>(50, 103, atol=(0.1 * 103)) # assert np.isclose(exp_cycles, cycles_rtlsim, atol=0.1 * cycles_rtlsim) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 9a6050a55dd86ca5064b293f87304cbb1365edea..933da667ecb54d471d9e6d48ac4462421addad7e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -69,9 +69,9 @@ ip_stitch_model_dir = os.environ["FINN_BUILD_DIR"] def create_one_fc_model(mem_mode="const"): # create a model with a StreamingFCLayer instance with no activation # the wider range of the full accumulator makes debugging a bit easier - wdt = DataType.INT2 - idt = DataType.INT32 - odt = DataType.INT32 + wdt = DataType["INT2"] + idt = DataType["INT32"] + odt = DataType["INT32"] m = 4 no_act = 1 binary_xnor_mode = 0 @@ -122,9 +122,9 @@ def create_one_fc_model(mem_mode="const"): def create_two_fc_model(mem_mode="decoupled"): # create a model with two StreamingFCLayer instances - wdt = DataType.INT2 - idt = DataType.INT32 - odt = DataType.INT32 + wdt = DataType["INT2"] + idt = DataType["INT32"] + odt = DataType["INT32"] m = 4 actval = 0 no_act = 1 diff --git a/tests/fpgadataflow/test_fpgadataflow_labelselect.py b/tests/fpgadataflow/test_fpgadataflow_labelselect.py index 8997208a648fa79439a882de23865496ba527858..8ed06c8bdf1c0dbfab2f8141bf724132f4a24705 100644 --- a/tests/fpgadataflow/test_fpgadataflow_labelselect.py +++ b/tests/fpgadataflow/test_fpgadataflow_labelselect.py @@ -81,7 +81,9 @@ def prepare_inputs(input_tensor, idt): return {"inp": input_tensor} -@pytest.mark.parametrize("idt", [DataType.UINT8, DataType.UINT16, DataType.INT16]) +@pytest.mark.parametrize( + "idt", [DataType["UINT8"], DataType["UINT16"], DataType["INT16"]] +) # labels @pytest.mark.parametrize("labels", [10, 100]) # folding diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py index 9def746c1c872a8b99b5bab48e8d0bd20798cedd..fe52a73fc07df8551442e975c5eb378c132a56d7 100644 --- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py +++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py @@ -54,9 +54,9 @@ def test_res_estimate(): mw = mh = 4 simd = 1 pe = 1 - idt = DataType.INT2 - wdt = DataType.INT2 - odt = DataType.INT2 + idt = DataType["INT2"] + wdt = DataType["INT2"] + odt = DataType["INT2"] actval = odt.min() inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, mw]) diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py index 012bc3e2e140c3fa63729584629613e3046f8838..341bd3f37041c9b5a1526e99b2c4bad4d3dd3029 100644 --- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py @@ -98,9 +98,9 @@ def make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode): # activation: None or DataType -@pytest.mark.parametrize("act", [DataType.INT4, DataType.BIPOLAR]) +@pytest.mark.parametrize("act", [DataType["INT4"], DataType["BIPOLAR"]]) # input datatype -@pytest.mark.parametrize("idt", [DataType.INT16, DataType.UINT16]) +@pytest.mark.parametrize("idt", [DataType["INT16"], DataType["UINT16"]]) # folding, -1 is maximum possible @pytest.mark.parametrize("nf", [-1, 2, 1]) # number of input features @@ -125,12 +125,12 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode): T = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype(np.float32) # make the vivado_hls threshold bug appear (incorrect rtlsim result when first # threshold of first channel is zero, while using BIPOLAR output) - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: T[0][0] = 0 # provide non-decreasing thresholds T = np.sort(T, axis=1) - if odt == DataType.BIPOLAR: + if odt == DataType["BIPOLAR"]: actval = 0 else: actval = odt.min() @@ -154,7 +154,7 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode): input_dict = {"inp": x} y = multithreshold(x, T) - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar y = 2 * y - 1 else: @@ -186,8 +186,8 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode): @pytest.mark.vivado def test_runtime_thresholds_single_layer(): mem_mode = "decoupled" - act = DataType.INT4 - idt = DataType.INT16 + act = DataType["INT4"] + idt = DataType["INT16"] nf = 8 ich = 16 pe = ich // nf @@ -202,7 +202,7 @@ def test_runtime_thresholds_single_layer(): # provide non-decreasing thresholds T = np.sort(T, axis=1) - if odt == DataType.BIPOLAR: + if odt == DataType["BIPOLAR"]: actval = 0 else: actval = odt.min() @@ -245,7 +245,7 @@ def test_runtime_thresholds_single_layer(): # old weights (see above) y = exec_ctx["outp"][1] expected = multithreshold(in_tensor, T)[1] - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar expected = 2 * expected - 1 else: @@ -274,7 +274,7 @@ def test_runtime_thresholds_single_layer(): rtlsim_exec(model, exec_ctx, pre_hook=write_weights) y = exec_ctx["outp"][1] expected = multithreshold(in_tensor, new_weights)[1] - if act == DataType.BIPOLAR: + if act == DataType["BIPOLAR"]: # binary to bipolar expected = 2 * expected - 1 else: diff --git a/tests/fpgadataflow/test_fpgadataflow_upsampler.py b/tests/fpgadataflow/test_fpgadataflow_upsampler.py index f7e06adb816cfa664187f39a9567d4f742e4043b..1709cfe32904a5ed369f8399150a8a1d05f4b781 100644 --- a/tests/fpgadataflow/test_fpgadataflow_upsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_upsampler.py @@ -60,7 +60,7 @@ class ForceDataTypeForTensors(Transformation): Forces a certain datatype for all tensors in a model. """ - def __init__(self, dType=DataType.INT8): + def __init__(self, dType=DataType["INT8"]): super().__init__() self._dType = dType @@ -116,7 +116,7 @@ class PyTorchTestModel(nn.Module): # param datatype -@pytest.mark.parametrize("dt", [DataType.INT8]) +@pytest.mark.parametrize("dt", [DataType["INT8"]]) # Width/height of square input feature map @pytest.mark.parametrize("IFMDim", [3, 5]) # upscaling factor diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 36b844deab4e28ff35290a170f713a64be839e8a..6f39994bf27594a063a1e66c5bba7867eaabef6e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -141,11 +141,11 @@ def prepare_inputs(input_tensor): # mem_mode: const or decoupled -@pytest.mark.parametrize("idt", [DataType.UINT4, DataType.UINT8]) +@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType.INT4]) +@pytest.mark.parametrize("wdt", [DataType["INT4"]]) # activation: None or DataType -@pytest.mark.parametrize("act", [DataType.UINT4, None]) +@pytest.mark.parametrize("act", [DataType["UINT4"], None]) # PE @pytest.mark.parametrize("pe", [1, "channels"]) # Input image shape @@ -187,14 +187,14 @@ def test_fpgadataflow_vvau( if act is None: T = None tdt = None - odt = DataType.INT32 + odt = DataType["INT32"] else: odt = act (min_v, max_v) = _calculate_dot_prod_range(idt, wdt, k_h * k_w * channels) n_steps = act.get_num_possible_values() - 1 T = np.random.randint(min_v, max_v - 1, (channels, n_steps)).astype(np.float32) T = np.sort(T, axis=1) - tdt = DataType.INT32 + tdt = DataType["INT32"] model = _make_single_vvau_modelwrapper( W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py index 556e15f13607caa556daff079026f0b2bacb1b2b..236eb2a0342a2782f106761f4cd356888a2f8630 100644 --- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py +++ b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py @@ -121,7 +121,7 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT4]) +@pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["INT4"]]) # 1d maxpool @pytest.mark.parametrize("dim_1d", [False, True]) # kernel size @@ -151,7 +151,7 @@ def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, exec_mod ofm_dim_h = int(((ifm_dim_h - k_h) / stride_h) + 1) ofm_dim_w = int(((ifm_dim_w - k_w) / stride_w) + 1) ofm_dim = (ofm_dim_h, ofm_dim_w) - if idt == DataType.BIPOLAR and dim_1d: + if idt == DataType["BIPOLAR"] and dim_1d: pytest.skip("Skipping binary StreamingMaxPool_1d (not implemented)") if ifm_dim_h % k_h != 0 or ifm_dim_w % k_w != 0: pytest.skip("Skipping StreamingMaxPool test w/ ImgDim % PoolDim != 0") diff --git a/tests/fpgadataflow/test_runtime_weights.py b/tests/fpgadataflow/test_runtime_weights.py index 706d11114f2f08df700efd40afb8dea218efbf42..0196a78d5c4254d7cb116641f946bcccb9e1ebc9 100644 --- a/tests/fpgadataflow/test_runtime_weights.py +++ b/tests/fpgadataflow/test_runtime_weights.py @@ -49,8 +49,8 @@ target_clk_ns = 5 @pytest.mark.vivado def test_runtime_weights_single_layer(): - idt = DataType.UINT32 - wdt = DataType.UINT4 + idt = DataType["UINT32"] + wdt = DataType["UINT4"] act = None mw = 64 mh = 32 diff --git a/tests/fpgadataflow/test_set_folding.py b/tests/fpgadataflow/test_set_folding.py index 8f4d57d3f84dd5c5a167b0e35b775def8ed27c5d..66fd5b43a1b8b8c8986bf9c9b9d0e9efd7a744a6 100644 --- a/tests/fpgadataflow/test_set_folding.py +++ b/tests/fpgadataflow/test_set_folding.py @@ -115,7 +115,7 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): def test_set_folding(target_fps, platform): model = make_multi_fclayer_model( - 128, DataType.INT4, DataType.INT2, DataType.INT16, 5 + 128, DataType["INT4"], DataType["INT2"], DataType["INT16"], 5 ) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/transformation/streamline/test_move_flatten_past_affine.py b/tests/transformation/streamline/test_move_flatten_past_affine.py index 1971ecfaa181d6ee799a9191b63d2482629b1e1c..ef01436dc9435676b562e2b635a8cf12e901046b 100644 --- a/tests/transformation/streamline/test_move_flatten_past_affine.py +++ b/tests/transformation/streamline/test_move_flatten_past_affine.py @@ -77,14 +77,14 @@ def test_move_flatten_past_affine(data_layout, batch_size): model = ModelWrapper(model) # initialize values - a0_values = gen_finn_dt_tensor(DataType.TERNARY, [1024, 1000]) + a0_values = gen_finn_dt_tensor(DataType["TERNARY"], [1024, 1000]) model.set_initializer("a0", a0_values) a1_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32) model.set_initializer("a1", a1_values) a2_values = np.random.uniform(low=-1, high=1, size=(1000)).astype(np.float32) model.set_initializer("a2", a2_values) - model.set_tensor_datatype("inp", DataType.INT2) + model.set_tensor_datatype("inp", DataType["INT2"]) model.set_tensor_layout("inp", data_layout) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) @@ -93,7 +93,7 @@ def test_move_flatten_past_affine(data_layout, batch_size): model = model.transform(GiveReadableTensorNames()) # compare execution before and after transformation - inp_values = gen_finn_dt_tensor(DataType.INT2, ishape) + inp_values = gen_finn_dt_tensor(DataType["INT2"], ishape) idict = {model.graph.input[0].name: inp_values} model_transformed = model.transform(MoveFlattenPastAffine()) assert oxe.compare_execution(model, model_transformed, idict) diff --git a/tests/transformation/streamline/test_move_flatten_past_topk.py b/tests/transformation/streamline/test_move_flatten_past_topk.py index 5e0211ad8857653ce75af2f5a7de0c6439770108..6086f7804eda4447de8f5948f521f0b003f65020 100644 --- a/tests/transformation/streamline/test_move_flatten_past_topk.py +++ b/tests/transformation/streamline/test_move_flatten_past_topk.py @@ -69,7 +69,7 @@ def test_move_flatten_past_affine(data_layout, batch_size): model = helper.make_model(graph, producer_name="move_flatten_model") model = ModelWrapper(model) - model.set_tensor_datatype("inp", DataType.INT2) + model.set_tensor_datatype("inp", DataType["INT2"]) model.set_tensor_layout("inp", data_layout) model = model.transform(InsertTopK()) model = model.transform(InferShapes()) @@ -79,7 +79,7 @@ def test_move_flatten_past_affine(data_layout, batch_size): model = model.transform(GiveReadableTensorNames()) # compare execution before and after transformation - inp_values = gen_finn_dt_tensor(DataType.INT2, ishape) + inp_values = gen_finn_dt_tensor(DataType["INT2"], ishape) idict = {model.graph.input[0].name: inp_values} model_transformed = model.transform(MoveFlattenPastTopK()) assert oxe.compare_execution(model, model_transformed, idict) diff --git a/tests/transformation/streamline/test_move_mul_past_dw_conv.py b/tests/transformation/streamline/test_move_mul_past_dw_conv.py index cb9beed713eb448b49015a7de601a4d15edc035b..e9e956d845ef8e56d2078bcd738ad3bb0ff72bfa 100644 --- a/tests/transformation/streamline/test_move_mul_past_dw_conv.py +++ b/tests/transformation/streamline/test_move_mul_past_dw_conv.py @@ -68,9 +68,9 @@ def test_move_mul_past_dw_conv(ifm_dim, ifm_ch, k, stride, pad_amt, dw): model = helper.make_model(graph, producer_name="mulpastconv-model") model = ModelWrapper(model) - inp_values = gen_finn_dt_tensor(DataType.INT2, [1, ifm_ch, ifm_dim, ifm_dim]) - mul_values = gen_finn_dt_tensor(DataType.INT2, [1, ifm_ch, 1, 1]) - W_values = gen_finn_dt_tensor(DataType.INT2, W_shape) + inp_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, ifm_dim, ifm_dim]) + mul_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, 1, 1]) + W_values = gen_finn_dt_tensor(DataType["INT2"], W_shape) model.set_initializer("W", W_values) model.set_initializer("mul", mul_values) model = model.transform(InferShapes()) diff --git a/tests/transformation/streamline/test_move_mul_past_maxpool.py b/tests/transformation/streamline/test_move_mul_past_maxpool.py index 81f18842ed8ba2b5230f3a853076244d0a0ab8d9..2c51aaf36a79591fd0fd0cea368d5e23da0d07c3 100755 --- a/tests/transformation/streamline/test_move_mul_past_maxpool.py +++ b/tests/transformation/streamline/test_move_mul_past_maxpool.py @@ -66,7 +66,7 @@ def test_move_mul_past_maxpool(ifm_dim, ifm_ch, k, stride, pad, cw, negative): model = helper.make_model(graph, producer_name="mulpastmaxpool-model") model = ModelWrapper(model) - inp_values = gen_finn_dt_tensor(DataType.INT2, [1, ifm_ch, ifm_dim, ifm_dim]) + inp_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, ifm_dim, ifm_dim]) mul_values = np.random.random_sample(mul_shape).astype(np.float32) if negative == 1: mul_values = mul_values * (-1) diff --git a/tests/transformation/streamline/test_remove_identity_ops.py b/tests/transformation/streamline/test_remove_identity_ops.py index ad7c20fb51902f22c20896bdfb3321dc74d0572d..ee4e42fc8417017184594b6e754f3e8270a46ee1 100644 --- a/tests/transformation/streamline/test_remove_identity_ops.py +++ b/tests/transformation/streamline/test_remove_identity_ops.py @@ -69,11 +69,11 @@ def test_remove_identity_ops(op, as_first_node, approx): model = helper.make_model(graph, producer_name="mulpastconv-model") model = ModelWrapper(model) - inp_values = gen_finn_dt_tensor(DataType.INT2, [1, 4, 1, 1]) + inp_values = gen_finn_dt_tensor(DataType["INT2"], [1, 4, 1, 1]) mul_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32) shape_values = np.asarray([1, -1], dtype=np.int64) div_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32) - matmul_values = gen_finn_dt_tensor(DataType.INT2, [4, 2]) + matmul_values = gen_finn_dt_tensor(DataType["INT2"], [4, 2]) model.set_initializer("mul", mul_values) model.set_initializer("shape", shape_values) model.set_initializer("div", div_values) diff --git a/tests/transformation/streamline/test_round_thresholds.py b/tests/transformation/streamline/test_round_thresholds.py index f9259908a2b4e4d716e3fb9ae7ec28cd9ec85d03..2e57f1c85f6ac197ca7a4cf15e595c34cc0fb564 100644 --- a/tests/transformation/streamline/test_round_thresholds.py +++ b/tests/transformation/streamline/test_round_thresholds.py @@ -47,17 +47,17 @@ def test_round_thresholds(): model = ModelWrapper(model_def) threshold_val = np.asarray([[-1.1], [0.7], [2.3], [5.1]], dtype=np.float32) model.set_initializer("thresholds", threshold_val) - model.set_tensor_datatype("v", DataType.INT8) + model.set_tensor_datatype("v", DataType["INT8"]) inp_dict_f = {"v": np.floor(threshold_val).T} inp_dict_n = {"v": np.round(threshold_val).T} inp_dict_c = {"v": np.ceil(threshold_val).T} orig_f = oxe.execute_onnx(model, inp_dict_f)["out"] orig_n = oxe.execute_onnx(model, inp_dict_n)["out"] orig_c = oxe.execute_onnx(model, inp_dict_c)["out"] - assert model.get_tensor_datatype("thresholds") == DataType.FLOAT32 + assert model.get_tensor_datatype("thresholds") == DataType["FLOAT32"] new_model = model.transform(RoundAndClipThresholds()) # rounded up thresholds should have same dtype as input - assert new_model.get_tensor_datatype("thresholds") == DataType.INT8 + assert new_model.get_tensor_datatype("thresholds") == DataType["INT8"] new_f = oxe.execute_onnx(new_model, inp_dict_f)["out"] new_n = oxe.execute_onnx(new_model, inp_dict_n)["out"] new_c = oxe.execute_onnx(new_model, inp_dict_c)["out"] diff --git a/tests/transformation/test_infer_datatypes_lfc.py b/tests/transformation/test_infer_datatypes_lfc.py index 00715e3e3ca3626e1b76bf3b23bae4dc1d65b053..8883dac7a54eafaaa768c8ae991b2030e385b318 100644 --- a/tests/transformation/test_infer_datatypes_lfc.py +++ b/tests/transformation/test_infer_datatypes_lfc.py @@ -49,12 +49,12 @@ def test_infer_datatypes_lfc(): model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) - assert model.get_tensor_datatype("MatMul_0_out0") == DataType.INT32 - assert model.get_tensor_datatype("MatMul_1_out0") == DataType.INT32 - assert model.get_tensor_datatype("MatMul_2_out0") == DataType.INT32 - assert model.get_tensor_datatype("MatMul_3_out0") == DataType.INT32 - assert model.get_tensor_datatype("MultiThreshold_0_out0") == DataType.BIPOLAR - assert model.get_tensor_datatype("MultiThreshold_1_out0") == DataType.BIPOLAR - assert model.get_tensor_datatype("MultiThreshold_2_out0") == DataType.BIPOLAR - assert model.get_tensor_datatype("MultiThreshold_3_out0") == DataType.BIPOLAR + assert model.get_tensor_datatype("MatMul_0_out0") == DataType["INT32"] + assert model.get_tensor_datatype("MatMul_1_out0") == DataType["INT32"] + assert model.get_tensor_datatype("MatMul_2_out0") == DataType["INT32"] + assert model.get_tensor_datatype("MatMul_3_out0") == DataType["INT32"] + assert model.get_tensor_datatype("MultiThreshold_0_out0") == DataType["BIPOLAR"] + assert model.get_tensor_datatype("MultiThreshold_1_out0") == DataType["BIPOLAR"] + assert model.get_tensor_datatype("MultiThreshold_2_out0") == DataType["BIPOLAR"] + assert model.get_tensor_datatype("MultiThreshold_3_out0") == DataType["BIPOLAR"] os.remove(export_onnx_path) diff --git a/tests/util/test_create.py b/tests/util/test_create.py index 42a288b74ecda9746296519b1b86563c75b2752e..c11e60175ea3ac94b6686ec5f8401a7c134fe53e 100644 --- a/tests/util/test_create.py +++ b/tests/util/test_create.py @@ -32,7 +32,9 @@ import finn.util.create as create from finn.core.datatype import DataType -@pytest.mark.parametrize("bitwidth", [DataType.BIPOLAR, DataType.INT2, DataType.INT4]) +@pytest.mark.parametrize( + "bitwidth", [DataType["BIPOLAR"], DataType["INT2"], DataType["INT4"]] +) def test_hls_random_mlp_maker(bitwidth): w = bitwidth a = bitwidth @@ -42,7 +44,7 @@ def test_hls_random_mlp_maker(bitwidth): "mh": 100, "simd": 185, "pe": 100, - "idt": DataType.BIPOLAR, + "idt": DataType["BIPOLAR"], "wdt": w, "act": a, }, @@ -56,7 +58,7 @@ def test_hls_random_mlp_maker(bitwidth): "pe": 1, "idt": a, "wdt": w, - "act": DataType.BIPOLAR, + "act": DataType["BIPOLAR"], }, ] diff --git a/tests/util/test_data_packing_hls.py b/tests/util/test_data_packing_hls.py index 3221eda34c85ed9d65b258b6489699cda8400517..9c47bb293e7640a30d6741fbc9ae4b9801f8bd7a 100644 --- a/tests/util/test_data_packing_hls.py +++ b/tests/util/test_data_packing_hls.py @@ -38,7 +38,9 @@ from finn.core.datatype import DataType from finn.util.data_packing import numpy_to_hls_code -@pytest.mark.parametrize("dtype", [DataType.BINARY, DataType.INT2, DataType.INT32]) +@pytest.mark.parametrize( + "dtype", [DataType["BINARY"], DataType["INT2"], DataType["INT32"]] +) @pytest.mark.parametrize("test_shape", [(1, 2, 4), (1, 1, 64), (2, 64)]) @pytest.mark.vivado def test_npy2apintstream(test_shape, dtype): @@ -119,17 +121,17 @@ def test_numpy_to_hls_code(): return "".join(s.split()) A = [[1, 1, 1, 0], [0, 1, 1, 0]] - ret = numpy_to_hls_code(A, DataType.BINARY, "test", True) + ret = numpy_to_hls_code(A, DataType["BINARY"], "test", True) eA = """ap_uint<4> test[2] = {ap_uint<4>("0xe", 16), ap_uint<4>("0x6", 16)};""" assert remove_all_whitespace(ret) == remove_all_whitespace(eA) B = [[[3, 3], [3, 3]], [[1, 3], [3, 1]]] - ret = numpy_to_hls_code(B, DataType.UINT2, "test", True) + ret = numpy_to_hls_code(B, DataType["UINT2"], "test", True) eB = """ap_uint<4> test[2][2] = {{ap_uint<4>("0xf", 16), ap_uint<4>("0xf", 16)}, {ap_uint<4>("0x7", 16), ap_uint<4>("0xd", 16)}};""" assert remove_all_whitespace(ret) == remove_all_whitespace(eB) - ret = numpy_to_hls_code(B, DataType.UINT2, "test", True, True) + ret = numpy_to_hls_code(B, DataType["UINT2"], "test", True, True) eB = """{{ap_uint<4>("0xf", 16), ap_uint<4>("0xf", 16)}, {ap_uint<4>("0x7", 16), ap_uint<4>("0xd", 16)}};""" assert remove_all_whitespace(ret) == remove_all_whitespace(eB)