diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
index c31c5f9ec41b98faec67a01be4dfbd9090c463cb..3792c5704bcff3600407522b530327ef48d53f6b 100644
--- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
+++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
@@ -405,7 +405,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Training loss = 0.132480 test accuracy = 0.797989: 100%|██████████| 10/10 [00:57<00:00,  5.74s/it]\n"
+      "Training loss = 0.132480 test accuracy = 0.797989: 100%|██████████| 10/10 [00:57<00:00,  5.79s/it]\n"
      ]
     }
    ],
@@ -784,6 +784,49 @@
     "print(\"Model saved to %s\" % export_onnx_path)"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## One final fix: input datatype\n",
+    "\n",
+    "There's one more thing we'll do: we will mark the input tensor datatype as `DataType.BIPOLAR`, which will be used by the compiler later on. To do this, we'll utilize the `ModelWrapper` component from FINN, which lets us examine and manipulate the ONNX graph in an easier way.\n",
+    "\n",
+    "*In the near future it will be possible to add this information to the model [while exporting](https://github.com/Xilinx/brevitas/issues/232), instead of having to add it manually.*"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 26,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Input tensor name: 0\n",
+      "Input tensor shape: [1, 600]\n",
+      "Input tensor datatype: DataType.BIPOLAR\n"
+     ]
+    }
+   ],
+   "source": [
+    "from finn.core.modelwrapper import ModelWrapper\n",
+    "from finn.core.datatype import DataType\n",
+    "\n",
+    "finn_model = ModelWrapper(export_onnx_path)\n",
+    "\n",
+    "finnonnx_in_tensor_name = finn_model.graph.input[0].name\n",
+    "finnonnx_model_in_shape = finn_model.get_tensor_shape(finnonnx_in_tensor_name)\n",
+    "finn_model.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR)\n",
+    "print(\"Input tensor name: %s\" % finnonnx_in_tensor_name)\n",
+    "print(\"Input tensor shape: %s\" % str(finnonnx_model_in_shape))\n",
+    "print(\"Input tensor datatype: %s\" % str(finn_model.get_tensor_datatype(finnonnx_in_tensor_name)))\n",
+    "\n",
+    "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n",
+    "finn_model.save(ready_model_filename)"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -792,23 +835,24 @@
     "\n",
     "Let's examine the exported ONNX model with Netron. Particular things of note:\n",
     "\n",
-    "* The input preprocessing (x + 1) / 2 is exported as part of the network (initial Add and Div layers)\n",
+    "* The input tensor \"0\" is annotated with `quantization: finn_datatype: BIPOLAR`\n",
+    "* The input preprocessing (x + 1) / 2 is exported as part of the network (initial `Add` and `Div` layers)\n",
     "* We've exported the padded version; shape of the first MatMul node's weight parameter is 600x64\n",
-    "* The weight parameters (second inputs) for MatMul nodes are annotated with `quantization: finn_datatype:INT2`\n",
+    "* The weight parameters (second inputs) for MatMul nodes are annotated with `quantization: finn_datatype: INT2`\n",
     "* The quantized activations are exported as `MultiThreshold` nodes with `domain=finn.custom_op.general`\n",
     "* There's a final `MultiThreshold` node with threshold=0 to produce the final bipolar output (this is the `qnt_output` from `CybSecMLPForExport`"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 26,
+   "execution_count": 27,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Serving 'cybsec-mlp.onnx' at http://0.0.0.0:8081\n"
+      "Serving 'cybsec-mlp-ready.onnx' at http://0.0.0.0:8081\n"
      ]
     },
     {
@@ -825,10 +869,10 @@
        "        "
       ],
       "text/plain": [
-       "<IPython.lib.display.IFrame at 0x7fa9a3c044e0>"
+       "<IPython.lib.display.IFrame at 0x7f808a61f438>"
       ]
      },
-     "execution_count": 26,
+     "execution_count": 27,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -836,7 +880,7 @@
    "source": [
     "from finn.util.visualization import showInNetron\n",
     "\n",
-    "showInNetron(export_onnx_path)"
+    "showInNetron(ready_model_filename)"
    ]
   },
   {
diff --git a/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb
index 0f69019a314f6cca81dbafb186a0054f94d89939..fd40f5c58c7cbf49e9b4542dc3ebece93cf1e645 100644
--- a/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb
+++ b/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb
@@ -6,6 +6,8 @@
    "source": [
     "# Verify Exported ONNX Model in FINN\n",
     "\n",
+    "<font color=\"red\">**FPGA'21 tutorial:** We will skip this notebook during the tutorial due to time constraints. You are encouraged to go through it on your own after the hands-on session to verify the exported MLP in FINN.</font>\n",
+    "\n",
     "**Important: This notebook depends on the 1-train-mlp-with-brevitas notebook, because we are using the ONNX model that was exported there. So please make sure the needed .onnx file is generated before you run this notebook.**\n",
     "\n",
     "**Also remember to 'close and halt' any other FINN notebooks, since Netron visualizations use the same port.**\n",
diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb
index dec9b7b40a0ce1cbd072ab59d0f5b1ed63b39958..6433b8cbe14ab7562fe8983ebaf7db47b03c6706 100644
--- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb
+++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb
@@ -8,7 +8,7 @@
     "\n",
     "<font color=\"red\">**FPGA'21 tutorial:** We recommend clicking Cell -> Run All when you start reading this notebook for \"latency hiding\".</font>\n",
     "\n",
-    "**Important: This notebook depends on the 2-cybersecurity-finn-verification notebook because we are using models that were created by these notebooks. So please make sure the needed .onnx files are generated prior to running this notebook.**\n",
+    "**Important: This notebook depends on the 1-train-mlp-with-brevitas notebook because we are using models that were created by that notebook. So please make sure the needed .onnx files are generated prior to running this notebook.**\n",
     "\n",
     "<img align=\"left\" src=\"finn-example.png\" alt=\"drawing\" style=\"margin-right: 20px\" width=\"250\"/>\n",
     "\n",
@@ -112,8 +112,8 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Building dataflow accelerator from cybsec-mlp-verified.onnx\n",
-      "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n",
+      "Building dataflow accelerator from cybsec-mlp-ready.onnx\n",
+      "Intermediate outputs will be generated in /tmp/finn_dev_maltanar\n",
       "Final outputs will be generated in output_estimates_only\n",
       "Build log is at output_estimates_only/build_dataflow.log\n",
       "Running step: step_tidy_up [1/7]\n",
@@ -141,7 +141,7 @@
     "import finn.builder.build_dataflow as build\n",
     "import finn.builder.build_dataflow_config as build_cfg\n",
     "\n",
-    "model_file = \"cybsec-mlp-verified.onnx\"\n",
+    "model_file = \"cybsec-mlp-ready.onnx\"\n",
     "\n",
     "estimates_output_dir = \"output_estimates_only\"\n",
     "\n",
@@ -345,11 +345,11 @@
     "\n",
     "Once we have a configuration that gives satisfactory estimates, we can move on to generating the accelerator. We can do this in different ways depending on how we want to integrate the accelerator into a larger system. For instance, if we have a larger streaming system built in Vivado or if we'd like to re-use this generated accelerator as an IP component in other projects, the `STITCHED_IP` output product is a good choice. We can also use the `OOC_SYNTH` output product to get post-synthesis resource and clock frequency numbers for our accelerator.\n",
     "\n",
-    "<font color=\"red\">**FPGA'21 tutorial:** These next builds will take about 10 minutes to complete since multiple calls to Vivado and a call to RTL simulation are involved. \n",
-    "    \n",
-    "However, once the `step_create_stitched_ip [11/16]` below is completed, you can view the generated stitched IP in Vivado (over noVNC) while waiting for the rest of the steps to finish. \n",
+    "<font color=\"red\">**FPGA'21 tutorial:** These next builds will take about 10 minutes to complete since multiple calls to Vivado and a call to RTL simulation are involved. While this is running, you can examine the generated files with noVNC -- it is running on (your AWS URL):6080/vnc.html:\n",
+    "\n",
+    "* Once the `step_hls_codegen [8/16]` below is completed, you can view the generated HLS code under its own folder for each layer: `/tmp/finn_dev_ubuntu/code_gen_ipgen_StreamingFCLayer_Batch_XXXXXX`\n",
     "    \n",
-    "noVNC is running on (your AWS URL):6080/vnc.html\n",
+    "* Once the `step_create_stitched_ip [11/16]` below is completed, you can view the generated stitched IP in Vivado under `/home/ubuntu/sandbox/finn/notebooks/end2end_example/cybersecurity/output_ipstitch_ooc_rtlsim/stitched_ip`\n",
     "</font> "
    ]
   },
@@ -362,9 +362,8 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Previous run results deleted!\n",
-      "Building dataflow accelerator from cybsec-mlp-verified.onnx\n",
-      "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n",
+      "Building dataflow accelerator from cybsec-mlp-ready.onnx\n",
+      "Intermediate outputs will be generated in /tmp/finn_dev_maltanar\n",
       "Final outputs will be generated in output_ipstitch_ooc_rtlsim\n",
       "Build log is at output_ipstitch_ooc_rtlsim/build_dataflow.log\n",
       "Running step: step_tidy_up [1/16]\n",
@@ -403,7 +402,7 @@
     "import os\n",
     "import shutil\n",
     "\n",
-    "model_file = \"cybsec-mlp-verified.onnx\"\n",
+    "model_file = \"cybsec-mlp-ready.onnx\"\n",
     "\n",
     "rtlsim_output_dir = \"output_ipstitch_ooc_rtlsim\"\n",
     "\n",
@@ -498,15 +497,15 @@
      "output_type": "stream",
      "text": [
       "{\r\n",
-      "  \"vivado_proj_folder\": \"/tmp/finn_dev_ubuntu/synth_out_of_context_8u74_j7t/results_finn_design_wrapper\",\r\n",
-      "  \"LUT\": 7931.0,\r\n",
-      "  \"FF\": 7319.0,\r\n",
+      "  \"vivado_proj_folder\": \"/tmp/finn_dev_maltanar/synth_out_of_context_ex08r7hd/results_finn_design_wrapper\",\r\n",
+      "  \"LUT\": 7920.0,\r\n",
+      "  \"FF\": 7327.0,\r\n",
       "  \"DSP\": 0.0,\r\n",
       "  \"BRAM\": 18.0,\r\n",
-      "  \"WNS\": 1.562,\r\n",
+      "  \"WNS\": 1.565,\r\n",
       "  \"\": 0,\r\n",
-      "  \"fmax_mhz\": 118.51149561507465,\r\n",
-      "  \"estimated_throughput_fps\": 1481393.6951884332\r\n",
+      "  \"fmax_mhz\": 118.55364552459987,\r\n",
+      "  \"estimated_throughput_fps\": 1481920.5690574984\r\n",
       "}"
      ]
     }
@@ -626,27 +625,19 @@
    "source": [
     "## (Optional) Launch a Build: PYNQ Bitfile and Driver <a id=\"build_bitfile_driver\"></a>\n",
     "\n",
-    "<font color=\"red\">**FPGA'21 tutorial:** This section is not included in the hands-on tutorial due to the bitfile synthesis time (15-20 min). We encourage you to uncomment the cells below to try it out on your own after the tutorial.</font>"
+    "<font color=\"red\">**FPGA'21 tutorial:** This section is not included in the hands-on tutorial due to the bitfile synthesis time (15-20 min). If you own a PYNQ board, we encourage you to uncomment the cells below to try it out on your own after the tutorial.</font>"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 14,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Previous run results deleted!\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "import finn.builder.build_dataflow as build\n",
     "import finn.builder.build_dataflow_config as build_cfg\n",
     "\n",
-    "model_file = \"cybsec-mlp-verified.onnx\"\n",
+    "model_file = \"cybsec-mlp-ready.onnx\"\n",
     "\n",
     "final_output_dir = \"output_final\"\n",
     "\n",