diff --git a/docs/finn/_build/doctrees/environment.pickle b/docs/finn/_build/doctrees/environment.pickle
index fea633144c18d76692cb8554887fae3ca177c98c..514b0d9424d1f5a1bd10354404412a83a8bba92d 100644
Binary files a/docs/finn/_build/doctrees/environment.pickle and b/docs/finn/_build/doctrees/environment.pickle differ
diff --git a/docs/finn/_build/html/end_to_end_flow.html b/docs/finn/_build/html/end_to_end_flow.html
index c9bdcf888e7eafbba036db71bc768dc6adbaecca..ae7c61e881b1466aa054842d1fc32a4d46275bd6 100644
--- a/docs/finn/_build/html/end_to_end_flow.html
+++ b/docs/finn/_build/html/end_to_end_flow.html
@@ -35,8 +35,8 @@
   <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
     <link rel="index" title="Index" href="genindex.html" />
     <link rel="search" title="Search" href="search.html" />
-    <link rel="next" title="Getting Started" href="getting_started.html" />
-    <link rel="prev" title="FINN" href="index.html" /> 
+    <link rel="next" title="finn package" href="source_code/finn.html" />
+    <link rel="prev" title="Getting Started" href="getting_started.html" /> 
 </head>
 
 <body class="wy-body-for-nav">
@@ -81,8 +81,8 @@
             
             
               <ul class="current">
-<li class="toctree-l1 current"><a class="current reference internal" href="#">FINN - End-to-End Flow</a></li>
 <li class="toctree-l1"><a class="reference internal" href="getting_started.html">Getting Started</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="#">FINN - End-to-End Flow</a></li>
 <li class="toctree-l1"><a class="reference internal" href="source_code/finn.html">finn package</a></li>
 </ul>
 
@@ -161,10 +161,10 @@
   
     <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
       
-        <a href="getting_started.html" class="btn btn-neutral float-right" title="Getting Started" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
+        <a href="source_code/finn.html" class="btn btn-neutral float-right" title="finn package" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
       
       
-        <a href="index.html" class="btn btn-neutral float-left" title="FINN" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
+        <a href="getting_started.html" class="btn btn-neutral float-left" title="Getting Started" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
       
     </div>
   
diff --git a/src/finn/analysis/fpgadataflow/hls_synth_res_estimation.py b/src/finn/analysis/fpgadataflow/hls_synth_res_estimation.py
index fe07b92a19683c035db6d710c8e58b6796555935..89f0c170ae354700ff23f0c1fd529482bb38f892 100644
--- a/src/finn/analysis/fpgadataflow/hls_synth_res_estimation.py
+++ b/src/finn/analysis/fpgadataflow/hls_synth_res_estimation.py
@@ -7,6 +7,7 @@ import finn.util.basic as util
 
 def hls_synth_res_estimation(model):
     """Extracts the results from the vivado synthesis.
+    
     Returns {node name : resource estimation}."""
 
     res_dict = {}
diff --git a/src/finn/analysis/fpgadataflow/res_estimation.py b/src/finn/analysis/fpgadataflow/res_estimation.py
index 27ad91b857316c533b35621cfd51a8262d61f94b..8b9e3df9472f6218edfc4ab3ef2bd6a63fbd1bae 100644
--- a/src/finn/analysis/fpgadataflow/res_estimation.py
+++ b/src/finn/analysis/fpgadataflow/res_estimation.py
@@ -4,6 +4,7 @@ import finn.util.basic as util
 
 def res_estimation(model):
     """Estimates the resources needed for the given model.
+    
     Returns {node name : resource estimation}."""
 
     res_dict = {}
diff --git a/src/finn/analysis/topology.py b/src/finn/analysis/topology.py
index 8f8d426dc4e1cbbae40c1976605c32f71599ef5f..27cb111f572030b6194f73084790b484ba23a83c 100644
--- a/src/finn/analysis/topology.py
+++ b/src/finn/analysis/topology.py
@@ -4,7 +4,9 @@ import numpy as np
 def is_linear(model):
     """Checks whether the given model graph is linear. This is done by looking
     at the fan-out of each tensor. All tensors have a fan-out <= 1 in a linear
-    graph. Returns {"is_linear": Bool}."""
+    graph. 
+    
+    Returns {"is_linear": Bool}."""
     per_tensor_fanouts = get_per_tensor_fanouts(model)
     # check for tensors that have fanout > 1
     multi_fanouts = list(filter(lambda x: x[1] > 1, per_tensor_fanouts.items()))
@@ -23,7 +25,9 @@ def get_per_tensor_fanouts(model):
 
 def all_tensors_f32(model):
     """Checks whether all tensors have a float32 dtype, extra quantization
-    annotations notwithstanding. Returns {"all_tensors_f32": Bool}."""
+    annotations notwithstanding. 
+    
+    Returns {"all_tensors_f32": Bool}."""
     all_tensors = model.make_empty_exec_context().items()
     non_f32_tensors = filter(lambda x: x[1].dtype != np.float32, all_tensors)
     return {"all_tensors_f32": len(list(non_f32_tensors)) == 0}
@@ -33,8 +37,9 @@ def node_inputs_in_expected_order(model):
     """Verifies that the node inputs are ordered in the way that FINN expects
     them. When a node has a mixture of static (= constant, initialized) inputs
     and dynamic inputs, the dynamic input should come first, followed by the
-    static one. Only verifiable for a small subset of op_types for now. Returns 
-    {"node_inputs_in_expected_order": Bool}."""
+    static one. Only verifiable for a small subset of op_types for now. 
+    
+    Returns {"node_inputs_in_expected_order": Bool}."""
     op_types = ["MatMul", "Conv", "Add", "Mul"]
     nodes = filter(lambda x: x.op_type in op_types, model.graph.node)
     all_OK = True
diff --git a/src/finn/analysis/verify_custom_nodes.py b/src/finn/analysis/verify_custom_nodes.py
index e49a0e77495a2d2daffbfb1eac010e656d183a8f..b7a2573e5e5a66011c4a9aef5d1fea5f95e6662d 100644
--- a/src/finn/analysis/verify_custom_nodes.py
+++ b/src/finn/analysis/verify_custom_nodes.py
@@ -3,7 +3,9 @@ import finn.custom_op.registry as registry
 
 def verify_nodes(model):
     """Checks if custom ops in graph are correctly built, with all attributes
-    and inputs. Returns {node op_type : info_messages}
+    and inputs. 
+    
+    Returns {node op_type : info_messages}
     
     * info_messages: is list of strings about the result of the verification."""
 
diff --git a/src/finn/core/datatype.py b/src/finn/core/datatype.py
index 15fb661d63262aa6eeaeb82ea63fc036febcb96f..21648caa510d10cd8c06c60315e39e773fac00c2 100644
--- a/src/finn/core/datatype.py
+++ b/src/finn/core/datatype.py
@@ -32,9 +32,16 @@ import numpy as np
 class DataType(Enum):
     """Enum class that contains FINN data types to set the quantization annotation. 
     ONNX does not support data types smaller than 8-bit integers, whereas in FINN we are
-    interested in smaller integers down to ternary and bipolar."""
-    # important to maintain ordering here: unsigned to signed, fewer to more
-    # bits. The get_smallest_possible() member function is dependent on this.
+    interested in smaller integers down to ternary and bipolar.
+    
+    Assignment of DataTypes to indices based on following ordering: 
+
+    * unsigned to signed
+    
+    * fewer to more bits
+
+    Currently supported DataTypes: """
+    # important: the get_smallest_possible() member function is dependent on ordering.
     BINARY = auto()
     UINT2 = auto()
     UINT3 = auto()
@@ -105,7 +112,7 @@ class DataType(Enum):
     def allowed(self, value):
         """Check whether given value is allowed for this DataType.
 
-    value (float32): value to be checked"""
+        * value (float32): value to be checked"""
 
         if "FLOAT" in self.name:
             return True
@@ -125,7 +132,7 @@ class DataType(Enum):
             raise Exception("Unrecognized data type: %s" % self.name)
 
     def get_num_possible_values(self):
-        """Return the number of possible values this DataType can take. Only
+        """Returns the number of possible values this DataType can take. Only
         implemented for integer types for now."""
         assert self.is_integer()
         if "INT" in self.name:
@@ -136,7 +143,7 @@ class DataType(Enum):
             return 3
 
     def get_smallest_possible(value):
-        """Return smallest (fewest bits) possible DataType that can represent
+        """Returns smallest (fewest bits) possible DataType that can represent
       value. Prefers unsigned integers where possible."""
         if not int(value) == value:
             return DataType["FLOAT32"]
@@ -146,16 +153,16 @@ class DataType(Enum):
                 return dt
 
     def signed(self):
-        """Return whether this DataType can represent negative numbers."""
+        """Returns whether this DataType can represent negative numbers."""
         return self.min() < 0
 
     def is_integer(self):
-        """Return whether this DataType represents integer values only."""
+        """Returns whether this DataType represents integer values only."""
         # only FLOAT32 is noninteger for now
         return self != DataType.FLOAT32
 
     def get_hls_datatype_str(self):
-        """Return the corresponding Vivado HLS datatype name."""
+        """Returns the corresponding Vivado HLS datatype name."""
         if self.is_integer():
             if self.signed():
                 return "ap_int<%d>" % self.bitwidth()
diff --git a/src/finn/core/modelwrapper.py b/src/finn/core/modelwrapper.py
index b01c82399adfabb289e484057d22808deac31175..cf5026ebf0fe02f3c658ea00b7e9daa5d2596ece 100644
--- a/src/finn/core/modelwrapper.py
+++ b/src/finn/core/modelwrapper.py
@@ -52,16 +52,17 @@ class ModelWrapper:
         self._model_proto = value
 
     def save(self, filename):
-        """Save the wrapper ONNX ModelProto into a file with given name."""
+        """Saves the wrapper ONNX ModelProto into a file with given name."""
         onnx.save(self._model_proto, filename)
 
     def analysis(self, analysis_fxn):
-        """Run given anaylsis_fxn on this model and return resulting dict."""
+        """Runs given anaylsis_fxn on this model and return resulting dict."""
         return analysis_fxn(self)
 
     def transform(self, transformation, make_deepcopy=True):
         """Applies given Transformation repeatedly until no more changes can be made
         and returns a transformed ModelWrapper instance.
+        
         If make_deepcopy is specified, operates on a new (deep)copy of model.
         """
         transformed_model = self
@@ -76,8 +77,11 @@ class ModelWrapper:
 
     def check_compatibility(self):
         """Checks this model for FINN compatibility:
+        
         * no embedded subgraphs
+        
         * all tensor shapes are specified, including activations
+        
         * all constants are initializers
         """
         # TODO check for no embedded subgraphs
@@ -147,7 +151,7 @@ class ModelWrapper:
             return None
 
     def set_tensor_shape(self, tensor_name, tensor_shape, dtype=TensorProto.FLOAT):
-        """Assign shape in ValueInfoProto for tensor with given name."""
+        """Assigns shape in ValueInfoProto for tensor with given name."""
         new_vi = oh.make_tensor_value_info(tensor_name, dtype, tensor_shape)
         # find what container tis tensor's ValueInfo lives in
         # if not found anywhere, we assume it's a new value_info
@@ -161,7 +165,7 @@ class ModelWrapper:
         target_container.append(new_vi)
 
     def set_initializer(self, tensor_name, tensor_value):
-        """Set the initializer value for tensor with given name."""
+        """Sets the initializer value for tensor with given name."""
         graph = self._model_proto.graph
         # convert tensor_value (numpy array) into TensorProto w/ correct name
         tensor_init_proto = np_helper.from_array(tensor_value)
@@ -181,7 +185,7 @@ class ModelWrapper:
         self.set_tensor_shape(tensor_name, list(tensor_value.shape), dtype)
 
     def rename_tensor(self, old_name, new_name):
-        """Rename a tensor from old_name to new_name."""
+        """Renames a tensor from old_name to new_name."""
         graph = self.graph
         # sweep over inputs
         if util.get_by_name(graph.input, old_name) is not None:
@@ -211,7 +215,7 @@ class ModelWrapper:
                 n.output[list(n.output).index(old_name)] = new_name
 
     def get_initializer(self, tensor_name):
-        """Get the initializer value for tensor with given name, if any."""
+        """Gets the initializer value for tensor with given name, if any."""
         graph = self._model_proto.graph
         init_names = [x.name for x in graph.initializer]
         try:
@@ -221,7 +225,7 @@ class ModelWrapper:
             return None
 
     def find_producer(self, tensor_name):
-        """Find and return the node that produces the tensor with given name.
+        """Finds and returns the node that produces the tensor with given name.
         Currently only works for linear graphs."""
         all_outputs = [x.output[0] for x in self._model_proto.graph.node]
         try:
@@ -231,7 +235,7 @@ class ModelWrapper:
             return None
 
     def find_consumer(self, tensor_name):
-        """Find and return the node that consumes the tensor with given name.
+        """Finds and returns the node that consumes the tensor with given name.
         Currently only works for linear graphs."""
         all_inputs = [x.input[0] for x in self._model_proto.graph.node]
         try:
@@ -241,7 +245,7 @@ class ModelWrapper:
             return None
 
     def get_all_tensor_names(self):
-        """Return a list of all (input, output and value_info) tensor names
+        """Returns a list of all (input, output and value_info) tensor names
         in the graph."""
         graph = self.graph
         names = [x.name for x in graph.value_info]
@@ -259,6 +263,7 @@ class ModelWrapper:
 
     def make_empty_exec_context(self):
         """Creates an empty execution context for this model.
+        
         The execution context is a dictionary of all tensors used for the
         inference computation. Any initializer values will be taken into
         account, all other tensors will be zero."""
@@ -294,7 +299,7 @@ class ModelWrapper:
         return ret
 
     def get_tensor_fanout(self, tensor_name):
-        """Return the number of nodes for which the tensor with given name is
+        """Returns the number of nodes for which the tensor with given name is
         as input."""
         graph = self.graph
         fanout = 0
diff --git a/src/finn/core/onnx_exec.py b/src/finn/core/onnx_exec.py
index 5ed45339cc2fe5f77339e33c4e7a8f6c556b704f..ebb5ee6d4470757127972297aec00f2a92fe4023 100644
--- a/src/finn/core/onnx_exec.py
+++ b/src/finn/core/onnx_exec.py
@@ -38,9 +38,10 @@ from finn.custom_op.registry import getCustomOp
 
 
 def execute_node(node, context, graph):
-    """Execute a single node by using onnxruntime, with custom function or
+    """Executes a single node by using onnxruntime, with custom function or
     if dataflow partition by using remote execution or rtlsim.
-    Input/output provided via context."""
+    
+    * Input/output provided via context."""
 
     if node.op_type == "StreamingDataflowPartition":
         sdp_node = getCustomOp(node)
@@ -94,10 +95,12 @@ def execute_node(node, context, graph):
 
 
 def execute_onnx(model, input_dict, return_full_exec_context=False):
-    """Execute given ONNX ModelWrapper with given named inputs.
-    If return_full_exec_context is False, a dict of named outputs is returned
+    """Executes given ONNX ModelWrapper with given named inputs.
+    
+    * If return_full_exec_context is False, a dict of named outputs is returned
     as indicated by the model.graph.output.
-    If return return_full_exec_context is True, the full set of tensors used by
+    
+    * If return return_full_exec_context is True, the full set of tensors used by
     the execution (including inputs, weights, activations and final outputs)
     will be returned as a dict."""
 
@@ -164,7 +167,7 @@ def execute_onnx(model, input_dict, return_full_exec_context=False):
 
 
 def execute_onnx_and_make_model(model, input_dict):
-    """Execute given ONNX ModelWrapper with given named inputs and return a new
+    """Executes given ONNX ModelWrapper with given named inputs and return a new
     ModelWrapper where an initializer is provided for each tensor as taken from
     the execution. This new model is useful for debugging, since it contains
     all the intermediate activation values."""
@@ -187,8 +190,9 @@ def compare_execution(
     input_dict,
     compare_fxn=lambda x, y: np.isclose(x, y, atol=1e-3).all(),
 ):
-    """Execute two ONNX models and compare their outputs using given function.
-    compare_fxn should take in two tensors and return a Boolean"""
+    """Executes two ONNX models and compare their outputs using given function.
+    
+    * compare_fxn should take in two tensors and return a Boolean"""
     # compare values from first output tensors produced
     res_a = list(execute_onnx(model_a, input_dict).items())[0][1]
     res_b = list(execute_onnx(model_b, input_dict).items())[0][1]