From 8d39e5e0e073797ccc85f7b9e61c729d843e5d1d Mon Sep 17 00:00:00 2001
From: lucas_miranda <lucasmiranda42@gmail.com>
Date: Wed, 17 Mar 2021 11:48:39 +0100
Subject: [PATCH] Fixed a bug in model_utils.py that yielded nan entropy values
 when there were no neighbors in the radius and the selected cluster was 0

---
 deepof_experiments.smk | 43 ++++++++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/deepof_experiments.smk b/deepof_experiments.smk
index 8d901f2c..ee337bd7 100644
--- a/deepof_experiments.smk
+++ b/deepof_experiments.smk
@@ -18,7 +18,7 @@ losses = ["ELBO"]  # , "MMD", "ELBO+MMD"]
 encodings = [6]  # [2, 4, 6, 8, 10, 12, 14, 16]
 cluster_numbers = [25]  # [1, 5, 10, 15, 20, 25]
 latent_reg = ["none", "categorical", "variance", "categorical+variance"]
-entropy_min_n = [2,5,8,12]
+entropy_min_n = [2, 5, 8, 12]
 pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0]
 
 
@@ -34,22 +34,22 @@ rule deepof_experiments:
         #     enc=encodings,
         # ),
         expand(
-             "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
-             "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
-             loss=losses,
-             encs=encodings,
-             k=cluster_numbers,
-             latreg=latent_reg,
-             entmin=entropy_min_n,
-         )
-         # expand(
-         #     "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
-         #     "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
-         #     loss=losses,
-         #     encs=encodings,
-         #     k=cluster_numbers,
-         #     phenos=pheno_weights,
-         # ),
+            "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
+            "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
+            loss=losses,
+            encs=encodings,
+            k=cluster_numbers,
+            latreg=latent_reg,
+            entmin=entropy_min_n,
+        ),
+        # expand(
+        #     "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
+        #     "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
+        #     loss=losses,
+        #     encs=encodings,
+        #     k=cluster_numbers,
+        #     phenos=pheno_weights,
+        # ),
 
 
 rule coarse_hyperparameter_tuning:
@@ -83,11 +83,13 @@ rule coarse_hyperparameter_tuning:
 
 rule latent_regularization_experiments:
     input:
-        data_path=ancient("/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"),
+        data_path=ancient(
+            "/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"
+        ),
     output:
         trained_models=os.path.join(
             outpath,
-            "latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
+            "latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
         ),
     shell:
         "pipenv run python -m deepof.train_model "
@@ -104,10 +106,11 @@ rule latent_regularization_experiments:
         "--mmd-warmup 20 "
         "--montecarlo-kl 10 "
         "--encoding-size {wildcards.encs} "
+        "--entropy-min-n {wildcards.entmin} "
         "--batch-size 256 "
         "--window-size 24 "
         "--window-step 12 "
-        "--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
+        # "--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
         "--output-path {outpath}latent_regularization_experiments"
 
 
-- 
GitLab