diff --git a/deepof_experiments.smk b/deepof_experiments.smk
index 6de1cf60c8ba3c0b35a9b114f83ef118dfcee57f..2829bb95a8d58ad6747ad25d1f9a67d73d40987c 100644
--- a/deepof_experiments.smk
+++ b/deepof_experiments.smk
@@ -14,8 +14,8 @@ Plot rule graph: snakemake --snakefile deepof_experiments.smk --forceall --ruleg
 import os
 
 outpath = "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/"
-losses = ["ELBO"]  # , "MMD", "ELBO+MMD"]
-encodings = [6]  # [2, 4, 6, 8, 10, 12, 14, 16]
+losses = ["ELBO" , "MMD", "ELBO+MMD"]
+encodings = [2, 4, 6, 8, 10, 12, 14, 16]
 cluster_numbers = [25]  # [1, 5, 10, 15, 20, 25]
 latent_reg = ["none", "categorical", "variance", "categorical+variance"]
 entropy_knn = [20, 50, 80, 100]
@@ -24,7 +24,7 @@ pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0]
 
 rule deepof_experiments:
     input:
-        "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/supplementary_notebooks/recognise_elliptical_arena.ipynb",
+        # "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/supplementary_notebooks/recognise_elliptical_arena.ipynb",
         # expand(
         #     os.path.join(
         #         outpath,
@@ -34,15 +34,15 @@ rule deepof_experiments:
         #     k=cluster_numbers,
         #     enc=encodings,
         # ),
-        # expand(
-        #     "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
-        #     "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropyknn={entknn}_final_weights.h5",
-        #     loss=losses,
-        #     encs=encodings,
-        #     k=cluster_numbers,
-        #     latreg=latent_reg,
-        #     entknn=entropy_knn,
-        # ),
+        expand(
+            "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
+            "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropyknn={entknn}_final_weights.h5",
+            loss=losses,
+            encs=encodings,
+            k=cluster_numbers,
+            latreg=latent_reg,
+            entknn=entropy_knn,
+        ),
         # expand(
         #     "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
         #     "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",