diff --git a/deepof/data.py b/deepof/data.py
index a0721256785720bfd372ad5447cbbdf993295d71..6c0f8642400736cb91d662e2012f33a2f088467d 100644
--- a/deepof/data.py
+++ b/deepof/data.py
@@ -17,13 +17,11 @@ Contains methods for generating training and test sets ready for model training.
 import os
 import warnings
 from collections import defaultdict
-from multiprocessing import cpu_count
 from typing import Dict, List, Tuple, Union
 
 import matplotlib.pyplot as plt
 import numpy as np
 import pandas as pd
-import seaborn as sns
 import tensorflow as tf
 from joblib import delayed, Parallel, parallel_backend
 from pkg_resources import resource_filename
@@ -913,7 +911,6 @@ class coordinates:
         save_weights: bool = True,
         reg_cat_clusters: bool = False,
         reg_cluster_variance: bool = False,
-        entropy_samples: int = 10000,
         entropy_knn: int = 100,
         input_type: str = False,
         run: int = 0,
@@ -979,7 +976,6 @@ class coordinates:
             save_weights=save_weights,
             reg_cat_clusters=reg_cat_clusters,
             reg_cluster_variance=reg_cluster_variance,
-            entropy_samples=entropy_samples,
             entropy_knn=entropy_knn,
             input_type=input_type,
             run=run,
diff --git a/deepof/model_utils.py b/deepof/model_utils.py
index 5667c0e89a31216828f5f546246a83322cef91da..82c5e3e7fbaf3a7cc195a7acda93a0784e0e34f0 100644
--- a/deepof/model_utils.py
+++ b/deepof/model_utils.py
@@ -8,16 +8,13 @@ Functions and general utilities for the deepof tensorflow models. See documentat
 
 """
 
-from itertools import combinations
+from functools import partial
 from typing import Any, Tuple
 
 import matplotlib.pyplot as plt
-import numpy as np
 import tensorflow as tf
 import tensorflow_probability as tfp
-from functools import partial
 from tensorflow.keras import backend as K
-from tensorflow.keras.constraints import Constraint
 from tensorflow.keras.layers import Layer
 
 tfd = tfp.distributions
diff --git a/deepof/models.py b/deepof/models.py
index 2c27d987fdd9a7293f2cce920febf89c82a78bfb..8e08820c037f6e8466c4407f1eb801d88544c8db 100644
--- a/deepof/models.py
+++ b/deepof/models.py
@@ -8,18 +8,16 @@ deep autoencoder models for unsupervised pose detection
 
 """
 
-from typing import Any, Dict, Tuple
+from typing import Dict, Tuple
 
 import tensorflow as tf
 import tensorflow_probability as tfp
 from tensorflow.keras import Input, Model, Sequential
 from tensorflow.keras.activations import softplus
-from tensorflow.keras.constraints import UnitNorm
 from tensorflow.keras.initializers import he_uniform, random_uniform
 from tensorflow.keras.layers import BatchNormalization, Bidirectional
 from tensorflow.keras.layers import Dense, Dropout, GRU
-from tensorflow.keras.layers import RepeatVector, Reshape, TimeDistributed
-from tensorflow.keras.losses import Huber
+from tensorflow.keras.layers import RepeatVector, Reshape
 from tensorflow.keras.optimizers import Nadam
 
 import deepof.model_utils
@@ -28,6 +26,7 @@ tfb = tfp.bijectors
 tfd = tfp.distributions
 tfpl = tfp.layers
 
+
 # noinspection PyDefaultArgument
 class GMVAE:
     """  Gaussian Mixture Variational Autoencoder for pose motif elucidation.  """
@@ -204,8 +203,8 @@ class GMVAE:
             for _ in range(self.dense_layers_per_branch)
         ]
         Model_E4 = []
-        for l in seq_E:
-            Model_E4.append(l)
+        for layer in seq_E:
+            Model_E4.append(layer)
             Model_E4.append(BatchNormalization())
 
         # Decoder layers
@@ -219,8 +218,8 @@ class GMVAE:
             for _ in range(self.dense_layers_per_branch)
         ]
         Model_D1 = []
-        for l in seq_D:
-            Model_D1.append(l)
+        for layer in seq_D:
+            Model_D1.append(layer)
             Model_D1.append(BatchNormalization())
 
         Model_D2 = Dense(
@@ -501,7 +500,7 @@ class GMVAE:
         x_decoded_var = tf.keras.activations.softplus(
             Dense(tfpl.IndependentNormal.params_size(input_shape[2:]) // 2)(generator)
         )
-        x_decoded_var = tf.keras.layers.Lambda(lambda x: 1e-3 + x)(x_decoded_var)
+        x_decoded_var = tf.keras.layers.Lambda(lambda v: 1e-3 + v)(x_decoded_var)
         x_decoded = tf.keras.layers.concatenate(
             [x_decoded_mean, x_decoded_var], axis=-1
         )
@@ -549,7 +548,7 @@ class GMVAE:
                     predictor
                 )
             )
-            x_predicted_var = tf.keras.layers.Lambda(lambda x: 1e-3 + x)(
+            x_predicted_var = tf.keras.layers.Lambda(lambda v: 1e-3 + v)(
                 x_predicted_var
             )
             x_decoded = tf.keras.layers.concatenate(
diff --git a/deepof/pose_utils.py b/deepof/pose_utils.py
index b8c2c8e890557987dbe870a91eae721e81f80dd0..9201dbd7d963fa0a064686342bb2d481cb549742 100644
--- a/deepof/pose_utils.py
+++ b/deepof/pose_utils.py
@@ -19,7 +19,6 @@ import numpy as np
 import pandas as pd
 import regex as re
 import seaborn as sns
-import tensorflow as tf
 from scipy import stats
 
 import deepof.utils
@@ -205,7 +204,7 @@ def sniff_object(
     tol_speed: float,
     nose: str,
     centered_data: bool = False,
-    object: str = "arena",
+    s_object: str = "arena",
     animal_id: str = "",
 ):
     """Returns True if the specified mouse is sniffing an object
@@ -232,7 +231,7 @@ def sniff_object(
     if animal_id != "":
         animal_id += "_"
 
-    if object == "arena":
+    if s_object == "arena":
         if arena_type == "circular":
             center = np.zeros(2) if centered_data else np.array(arena[0])
             axes = arena[1]
@@ -256,7 +255,7 @@ def sniff_object(
             )
             nosing = nosing_min & (~nosing_max)
 
-    elif object == "partner":
+    elif s_object == "partner":
         raise NotImplementedError
 
     else:
@@ -425,6 +424,7 @@ def following_path(
         < distance_dframe[tuple(sorted([follower + "_Nose", followed + "_Nose"]))]
     )
 
+    # noinspection PyArgumentList
     follow = np.all(
         np.array([(dist_df.min(axis=1) < tol), right_orient1, right_orient2]),
         axis=0,
@@ -778,7 +778,7 @@ def rule_based_tagging(
                 params["climb_tol"],
                 params["huddle_speed"],
                 _id + undercond + "Nose",
-                object="arena",
+                s_object="arena",
                 animal_id=_id,
             )
         )
diff --git a/deepof/train_model.py b/deepof/train_model.py
index eb4d872eb9185a3e798b10f38da395bfcbf455dc..056c680a8f981954b0742c9febd29216eff180bf 100644
--- a/deepof/train_model.py
+++ b/deepof/train_model.py
@@ -11,6 +11,7 @@ usage: python -m examples.model_training -h
 
 import argparse
 import os
+
 import deepof.data
 import deepof.train_utils
 import deepof.utils
@@ -422,13 +423,12 @@ else:
         loss=loss,
         loss_warmup=kl_wu,
         warmup_mode=kl_annealing_mode,
-        X_val=(X_val if X_val.shape != (0,) else None),
         input_type=input_type,
         cp=False,
-        entropy_samples=entropy_samples,
         entropy_knn=entropy_knn,
         logparam=logparam,
         outpath=output_path,
+        overlap_loss=overlap_loss,
         run=run,
     )
 
diff --git a/deepof/train_utils.py b/deepof/train_utils.py
index bb2d79992f06877d9e4f112df7aa59ed0530224e..72cf9dddf03205f925d40cba291c10871523ea78 100644
--- a/deepof/train_utils.py
+++ b/deepof/train_utils.py
@@ -17,7 +17,6 @@ import numpy as np
 import tensorflow as tf
 from kerastuner import BayesianOptimization, Hyperband, Objective
 from kerastuner_tensorboard_logger import TensorBoardLogger
-from sklearn.metrics import roc_auc_score
 from tensorboard.plugins.hparams import api as hp
 
 import deepof.hypermodels
@@ -75,12 +74,10 @@ def get_callbacks(
     loss: str,
     loss_warmup: int = 0,
     warmup_mode: str = "none",
-    X_val: np.array = None,
     input_type: str = False,
     cp: bool = False,
     reg_cat_clusters: bool = False,
     reg_cluster_variance: bool = False,
-    entropy_samples: int = 15000,
     entropy_knn: int = 100,
     logparam: dict = None,
     outpath: str = ".",
@@ -294,7 +291,6 @@ def autoencoder_fitting(
     save_weights: bool,
     reg_cat_clusters: bool,
     reg_cluster_variance: bool,
-    entropy_samples: int,
     entropy_knn: int,
     input_type: str,
     run: int = 0,
@@ -314,13 +310,6 @@ def autoencoder_fitting(
         tf.data.experimental.AutoShardPolicy.DATA
     )
 
-    # Generate validation dataset for callback usage
-    X_val_dataset = (
-        tf.data.Dataset.from_tensor_slices(X_val)
-        .with_options(options)
-        .batch(batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
-    )
-
     # Defines what to log on tensorboard (useful for trying out different models)
     logparam = {
         "encoding": encoding_size,
@@ -342,11 +331,9 @@ def autoencoder_fitting(
         overlap_loss=overlap_loss,
         warmup_mode=kl_annealing_mode,
         input_type=input_type,
-        X_val=(X_val_dataset if X_val.shape != (0,) else None),
         cp=save_checkpoints,
         reg_cat_clusters=reg_cat_clusters,
         reg_cluster_variance=reg_cluster_variance,
-        entropy_samples=entropy_samples,
         entropy_knn=entropy_knn,
         logparam=logparam,
         outpath=output_path,
@@ -613,9 +600,8 @@ def tune_search(
         .batch(batch_size, drop_remainder=True)
         .shuffle(buffer_size=X_train.shape[0])
     )
-    val_dataset = (
-        tf.data.Dataset.from_tensor_slices((Xvals, tuple(yvals)))
-        .batch(batch_size, drop_remainder=True)
+    val_dataset = tf.data.Dataset.from_tensor_slices((Xvals, tuple(yvals))).batch(
+        batch_size, drop_remainder=True
     )
 
     # Convert data to tf.data.Dataset objects
diff --git a/tests/test_train_utils.py b/tests/test_train_utils.py
index 8438aaea3b70b3eac010b37140e5d8215e3749d9..c8e282b261b3d3adca8a7eced1a616c2daed08d5 100644
--- a/tests/test_train_utils.py
+++ b/tests/test_train_utils.py
@@ -8,17 +8,19 @@ Testing module for deepof.train_utils
 
 """
 
-from hypothesis import given, reproduce_failure
+import os
+
+import numpy as np
+import tensorflow as tf
 from hypothesis import HealthCheck
+from hypothesis import given
 from hypothesis import settings
 from hypothesis import strategies as st
 from hypothesis.extra.numpy import arrays
+
 import deepof.data
 import deepof.model_utils
 import deepof.train_utils
-import numpy as np
-import os
-import tensorflow as tf
 
 
 def test_load_treatments():
@@ -93,7 +95,6 @@ def test_autoencoder_fitting(
     phenotype_prediction,
     rule_based_prediction,
 ):
-
     X_train = np.random.uniform(-1, 1, [20, 5, 6])
     y_train = np.round(np.random.uniform(0, 1, [20, 1]))