diff --git a/deepof_experiments.smk b/deepof_experiments.smk index e33f8c548fc95a3616c2cef14c7834270316caa7..0dcd5035cfae55d9e965b61769fe66014c33cbb6 100644 --- a/deepof_experiments.smk +++ b/deepof_experiments.smk @@ -18,7 +18,7 @@ losses = ["ELBO"] # , "MMD", "ELBO+MMD"] encodings = [6] # [2, 4, 6, 8, 10, 12, 14, 16] cluster_numbers = [25] # [1, 5, 10, 15, 20, 25] latent_reg = ["none", "categorical", "variance", "categorical+variance"] -entropy_min_n = [2, 5, 8, 12] +entropy_knn = [20, 50, 80, 100] pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0] @@ -35,12 +35,12 @@ rule deepof_experiments: # ), expand( "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/" - "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5", + "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropyknn={entknn}_final_weights.h5", loss=losses, encs=encodings, k=cluster_numbers, latreg=latent_reg, - entmin=entropy_min_n, + entknn=entropy_knn, ), # expand( # "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/" @@ -89,7 +89,7 @@ rule latent_regularization_experiments: output: trained_models=os.path.join( outpath, - "latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropymink={entmin}_final_weights.h5", + "latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_entropyknn={entknn}_final_weights.h5", ), shell: "pipenv run python -m deepof.train_model " @@ -106,7 +106,7 @@ rule latent_regularization_experiments: "--mmd-warmup 20 " "--montecarlo-kl 10 " "--encoding-size {wildcards.encs} " - "--entropy-min-n {wildcards.entmin} " + "--entropy-knn {wildcards.entknn} " "--batch-size 256 " "--window-size 24 " "--window-step 12 "