Skip to content
Snippets Groups Projects
Commit 9c5f53fa authored by Yaman Umuroglu's avatar Yaman Umuroglu
Browse files

[StreamingFC] change const mem_mode param generation

use const instead of static, local instead of global
parent b273b562
No related branches found
No related tags found
No related merge requests found
...@@ -463,7 +463,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -463,7 +463,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
if export_wdt.bitwidth() != 1: if export_wdt.bitwidth() != 1:
f_weights.write( f_weights.write(
"static FixedPointWeights<{},{},{},{}> weights = ".format( "const FixedPointWeights<{},{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"), self.get_nodeattr("SIMD"),
export_wdt.get_hls_datatype_str(), export_wdt.get_hls_datatype_str(),
self.get_nodeattr("PE"), self.get_nodeattr("PE"),
...@@ -472,7 +472,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -472,7 +472,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
) )
else: else:
f_weights.write( f_weights.write(
"static BinaryWeights<{},{},{}> weights = ".format( "const BinaryWeights<{},{},{}> weights = ".format(
self.get_nodeattr("SIMD"), self.get_nodeattr("SIMD"),
self.get_nodeattr("PE"), self.get_nodeattr("PE"),
self.calc_wmem(), self.calc_wmem(),
...@@ -709,7 +709,8 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -709,7 +709,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
mem_mode = self.get_nodeattr("mem_mode") mem_mode = self.get_nodeattr("mem_mode")
if mem_mode == "const": if mem_mode == "const":
self.code_gen_dict["$GLOBALS$"] += ['#include "params.h"'] # self.code_gen_dict["$GLOBALS$"] += ['#include "params.h"']
pass
elif mem_mode == "decoupled": elif mem_mode == "decoupled":
self.code_gen_dict["$GLOBALS$"] += ['#include "mvau.hpp"'] self.code_gen_dict["$GLOBALS$"] += ['#include "mvau.hpp"']
else: else:
...@@ -928,6 +929,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): ...@@ -928,6 +929,7 @@ class StreamingFCLayer_Batch(HLSCustomOp):
) )
if mem_mode == "const": if mem_mode == "const":
self.code_gen_dict["$PRAGMAS$"].append('#include "params.h"')
# the weight tensor is ap_uint<simd*prec> [PE][WMEM] # the weight tensor is ap_uint<simd*prec> [PE][WMEM]
# partition for parallel access along the PE dimension (dim 1) # partition for parallel access along the PE dimension (dim 1)
self.code_gen_dict["$PRAGMAS$"].append( self.code_gen_dict["$PRAGMAS$"].append(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment