diff --git a/deepof/pose_utils.py b/deepof/pose_utils.py
index 385510593f0766b193829206d289d5ae7c633f72..0caf31a230c44b56e4e6b6fa9d63587dee069c28 100644
--- a/deepof/pose_utils.py
+++ b/deepof/pose_utils.py
@@ -390,10 +390,7 @@ def rule_based_tagging(
     videos: List,
     coordinates: Coordinates,
     vid_index: int,
-    frame_limit: float = np.inf,
     recog_limit: int = 1,
-    mode: str = None,
-    fps: float = 0.0,
     path: str = os.path.join("."),
     hparams: dict = {},
 ) -> pd.DataFrame:
@@ -405,11 +402,7 @@ def rule_based_tagging(
         - videos (list): list of videos to load, in the same order as tracks
         - coordinates (deepof.preprocessing.coordinates): coordinates object containing the project information
         - vid_index (int): index in videos of the experiment to annotate
-        - mode (str): if show, enables the display of the annotated video in a separate window, saves to mp4 file
-        if save
-        - fps (float): frames per second of the analysed video. Same as input by default
         - path (str): directory in which the experimental data is stored
-        - frame_limit (float): limit the number of frames to output. Generates all annotated frames by default
         - recog_limit (int): number of frames to use for arena recognition (1 by default)
         - hparams (dict): dictionary to overwrite the default values of the hyperparameters of the functions
         that the rule-based pose estimation utilizes. Values can be:
@@ -429,6 +422,7 @@ def rule_based_tagging(
 
     hparams = get_hparameters(hparams)
     animal_ids = coordinates._animal_ids
+    undercond = "_" if len(animal_ids) > 1 else ""
 
     vid_name = re.findall("(.*?)_", tracks[vid_index])[0]
 
@@ -474,7 +468,7 @@ def rule_based_tagging(
             )
         )
 
-    if animal_ids:
+    if len(animal_ids) == 2:
         # Define behaviours that can be computed on the fly from the distance matrix
         tag_dict["nose2nose"] = onebyone_contact(bparts=["_Nose"])
 
@@ -497,40 +491,21 @@ def rule_based_tagging(
                     tol=hparams["follow_tol"],
                 )
             )
-            tag_dict[_id + "_climbing"] = deepof.utils.smooth_boolean_array(
-                pd.Series(
-                    (
-                        spatial.distance.cdist(
-                            np.array(coords[_id + "_Nose"]), np.zeros([1, 2])
-                        )
-                        > (w / 200 + arena[2])
-                    ).reshape(coords.shape[0]),
-                    index=coords.index,
-                ).astype(bool)
-            )
-            tag_dict[_id + "_speed"] = speeds[_id + "_speed"]
-            tag_dict[_id + "_huddle"] = deepof.utils.smooth_boolean_array(
-                huddle(
-                    coords,
-                    speeds,
-                    hparams["huddle_forward"],
-                    hparams["huddle_spine"],
-                    hparams["huddle_speed"],
-                )
-            )
 
-    else:
-        tag_dict["climbing"] = deepof.utils.smooth_boolean_array(
+    for _id in animal_ids:
+        tag_dict[_id + undercond + "climbing"] = deepof.utils.smooth_boolean_array(
             pd.Series(
                 (
-                    spatial.distance.cdist(np.array(coords["Nose"]), np.zeros([1, 2]))
+                    spatial.distance.cdist(
+                        np.array(coords[_id + undercond + "Nose"]), np.zeros([1, 2])
+                    )
                     > (w / 200 + arena[2])
                 ).reshape(coords.shape[0]),
                 index=coords.index,
             ).astype(bool)
         )
-        tag_dict["speed"] = speeds["Center"]
-        tag_dict["huddle"] = deepof.utils.smooth_boolean_array(
+        tag_dict[_id + undercond + "speed"] = speeds[_id + undercond + "Center"]
+        tag_dict[_id + undercond + "huddle"] = deepof.utils.smooth_boolean_array(
             huddle(
                 coords,
                 speeds,
@@ -540,6 +515,65 @@ def rule_based_tagging(
             )
         )
 
+    tag_df = pd.DataFrame(tag_dict)
+
+    return tag_df
+
+
+def rule_based_video(
+    coordinates,
+    tracks,
+    videos,
+    vid_index,
+    tag_dict,
+    mode,
+    path,
+    fps,
+    frame_limit,
+    recog_limit,
+    hparams,
+):
+    """Renders a version of the input video with all rule-based taggings in place.
+
+    Parameters:
+        - tracks (list): list containing experiment IDs as strings
+        - videos (list): list of videos to load, in the same order as tracks
+        - coordinates (deepof.preprocessing.coordinates): coordinates object containing the project information
+        - vid_index (int): index in videos of the experiment to annotate
+        - mode (str): if show, enables the display of the annotated video in a separate window, saves to mp4 file
+        if save
+        - fps (float): frames per second of the analysed video. Same as input by default
+        - path (str): directory in which the experimental data is stored
+        - frame_limit (float): limit the number of frames to output. Generates all annotated frames by default
+        - recog_limit (int): number of frames to use for arena recognition (1 by default)
+        - hparams (dict): dictionary to overwrite the default values of the hyperparameters of the functions
+        that the rule-based pose estimation utilizes. Values can be:
+            - speed_pause (int): size of the rolling window to use when computing speeds
+            - close_contact_tol (int): maximum distance between single bodyparts that can be used to report the trait
+            - side_contact_tol (int): maximum distance between single bodyparts that can be used to report the trait
+            - follow_frames (int): number of frames during which the following trait is tracked
+            - follow_tol (int): maximum distance between follower and followed's path during the last follow_frames,
+            in order to report a detection
+            - huddle_forward (int): maximum distance between ears and forward limbs to report a huddle detection
+            - huddle_spine (int): maximum average distance between spine body parts to report a huddle detection
+            - huddle_speed (int): maximum speed to report a huddle detection
+
+    Returns:
+        True
+
+    """
+
+    animal_ids = coordinates._animal_ids
+    # undercond = "_" if len(animal_ids) > 1 else ""
+
+    vid_name = re.findall("(.*?)_", tracks[vid_index])[0]
+
+    coords = coordinates.get_coords()[vid_name]
+    speeds = coordinates.get_coords(speed=1)[vid_name]
+    arena, h, w = deepof.utils.recognize_arena(
+        videos, vid_index, path, recog_limit, coordinates._arena
+    )
+
     if mode in ["show", "save"]:
 
         cap = cv2.VideoCapture(os.path.join(path, videos[vid_index]))
@@ -732,7 +766,3 @@ def rule_based_tagging(
 
         cap.release()
         cv2.destroyAllWindows()
-
-    tag_df = pd.DataFrame(tag_dict)
-
-    return tag_df
diff --git a/examples/main.ipynb b/examples/main.ipynb
index 8719c91d1ea9ba9b680a71f61b249ec4ad001baa..814554d73e15f977293380493c66ac98f7a336b1 100644
--- a/examples/main.ipynb
+++ b/examples/main.ipynb
@@ -1,87 +1,5 @@
 {
  "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%load_ext autoreload\n",
-    "%autoreload 2\n",
-    "\n",
-    "import warnings\n",
-    "warnings.filterwarnings(\"ignore\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "os.chdir(\"../\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#from source.utils import *\n",
-    "from deepof.preprocess import *\n",
-    "from deepof.model_utils import *\n",
-    "import pickle\n",
-    "import matplotlib.pyplot as plt\n",
-    "import pandas as pd\n",
-    "from collections import defaultdict\n",
-    "from tqdm import tqdm_notebook as tqdm"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 26,
-   "metadata": {
-    "tags": [
-     "parameters"
-    ]
-   },
-   "outputs": [],
-   "source": [
-    "path = \"../../PycharmProjects/deepof/tests/test_examples\"\n",
-    "path2 = \"../../Desktop/DLC_social_2/\""
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Set up and design the project"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 27,
-   "metadata": {},
-   "outputs": [
-    {
-     "ename": "FileNotFoundError",
-     "evalue": "[Errno 2] No such file or directory: '../../PycharmProjects/deepof/tests/test_examplesDLC_social_1_exp_conditions.pickle'",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
-      "\u001b[0;32m<ipython-input-27-2f7d5a25baed>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'{}DLC_social_1_exp_conditions.pickle'\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'rb'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m     \u001b[0mTreatment_dict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '../../PycharmProjects/deepof/tests/test_examplesDLC_social_1_exp_conditions.pickle'"
-     ]
-    }
-   ],
-   "source": [
-    "with open('{}DLC_social_1_exp_conditions.pickle'.format(path), 'rb') as handle:\n",
-    "    Treatment_dict = pickle.load(handle)"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": 28,
diff --git a/tests/test_pose_utils.py b/tests/test_pose_utils.py
index cc798a4445ca8076c7c5f11b6445a4af80a05a6e..390c8d6ab357b2895514e98b4e1e9eecafd2a6db 100644
--- a/tests/test_pose_utils.py
+++ b/tests/test_pose_utils.py
@@ -347,7 +347,7 @@ def test_rule_based_tagging():
         arena_dims=tuple([380]),
         video_format=".mp4",
         table_format=".h5",
-        animal_ids=None,
+        animal_ids=[""],
     ).run(verbose=True)
 
     hardcoded_tags = rule_based_tagging(
@@ -356,8 +356,6 @@ def test_rule_based_tagging():
         prun,
         vid_index=0,
         path=os.path.join(".", "tests", "test_examples", "Videos"),
-        mode="save",
-        frame_limit=100,
     )
 
     assert type(hardcoded_tags) == pd.DataFrame