Skip to content
Snippets Groups Projects
Commit 8d39e5e0 authored by Lucas Miranda's avatar Lucas Miranda
Browse files

Fixed a bug in model_utils.py that yielded nan entropy values when there were...

Fixed a bug in model_utils.py that yielded nan entropy values when there were no neighbors in the radius and the selected cluster was 0
parent 27e9cc02
No related branches found
No related tags found
No related merge requests found
Pipeline #95923 passed
......@@ -18,7 +18,7 @@ losses = ["ELBO"] # , "MMD", "ELBO+MMD"]
encodings = [6] # [2, 4, 6, 8, 10, 12, 14, 16]
cluster_numbers = [25] # [1, 5, 10, 15, 20, 25]
latent_reg = ["none", "categorical", "variance", "categorical+variance"]
entropy_min_n = [2,5,8,12]
entropy_min_n = [2, 5, 8, 12]
pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0]
......@@ -34,22 +34,22 @@ rule deepof_experiments:
# enc=encodings,
# ),
expand(
"/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
"GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
loss=losses,
encs=encodings,
k=cluster_numbers,
latreg=latent_reg,
entmin=entropy_min_n,
)
# expand(
# "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
# loss=losses,
# encs=encodings,
# k=cluster_numbers,
# phenos=pheno_weights,
# ),
"/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
"GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
loss=losses,
encs=encodings,
k=cluster_numbers,
latreg=latent_reg,
entmin=entropy_min_n,
),
# expand(
# "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
# loss=losses,
# encs=encodings,
# k=cluster_numbers,
# phenos=pheno_weights,
# ),
rule coarse_hyperparameter_tuning:
......@@ -83,11 +83,13 @@ rule coarse_hyperparameter_tuning:
rule latent_regularization_experiments:
input:
data_path=ancient("/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"),
data_path=ancient(
"/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"
),
output:
trained_models=os.path.join(
outpath,
"latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
"latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5",
),
shell:
"pipenv run python -m deepof.train_model "
......@@ -104,10 +106,11 @@ rule latent_regularization_experiments:
"--mmd-warmup 20 "
"--montecarlo-kl 10 "
"--encoding-size {wildcards.encs} "
"--entropy-min-n {wildcards.entmin} "
"--batch-size 256 "
"--window-size 24 "
"--window-step 12 "
"--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
# "--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
"--output-path {outpath}latent_regularization_experiments"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment