Commit 2acacb00 authored by lucas_miranda's avatar lucas_miranda
Browse files

Added nose2body to rule_based_annotation()

parent 6e2cd7a9
Pipeline #95388 passed with stages
in 26 minutes and 27 seconds
......@@ -559,8 +559,8 @@ class SEQ_2_SEQ_GMVAE:
encoder = Model_E3(encoder)
encoder = BatchNormalization()(encoder)
encoder = Dropout(self.DROPOUT_RATE)(encoder)
encoder = Sequential(Model_E4)(encoder)
encoder = BatchNormalization()(encoder)
# encoder = Sequential(Model_E4)(encoder)
# encoder = BatchNormalization()(encoder)
# encoding_shuffle = deepof.model_utils.MCDropout(self.DROPOUT_RATE)(encoder)
z_cat = Dense(
......@@ -670,9 +670,9 @@ class SEQ_2_SEQ_GMVAE:
# Define and instantiate generator
g = Input(shape=self.ENCODING)
generator = Sequential(Model_D1)(g)
generator = Model_B1(generator)
generator = Model_D2(generator)
# generator = Sequential(Model_D1)(g)
# generator = Model_B1(generator)
generator = Model_D2(g)
generator = Model_B2(generator)
generator = Model_D3(generator)
generator = Model_D4(generator)
......
......@@ -23,22 +23,23 @@ pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0]
rule deepof_experiments:
input:
# expand(
# os.path.join(
# outpath,
# "coarse_hyperparameter_tuning/trained_weights/GMVAE_loss={loss}_k={k}_encoding={enc}_final_weights.h5",
# ),
# loss=losses,
# k=cluster_numbers,
# enc=encodings,
# ),
expand(
os.path.join(
outpath,
"coarse_hyperparameter_tuning/trained_weights/GMVAE_loss={loss}_k={k}_encoding={enc}_final_weights.h5",
),
loss=losses,
k=cluster_numbers,
enc=encodings,
), # expand(
# "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
# loss=losses,
# encs=encodings,
# k=cluster_numbers,
# latreg=latent_reg,
# )
"/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/latent_regularization_experiments/trained_weights/"
"GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
loss=losses,
encs=encodings,
k=cluster_numbers,
latreg=latent_reg,
)
# expand(
# "/psycl/g/mpsstatgen/lucas/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
......@@ -78,60 +79,60 @@ rule coarse_hyperparameter_tuning:
"--hpt-trials 1"
# rule latent_regularization_experiments:
# input:
# data_path=ancient("/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"),
# output:
# trained_models=os.path.join(
# outpath,
# "latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
# ),
# shell:
# "pipenv run python -m deepof.train_model "
# "--train-path {input.data_path} "
# "--val-num 5 "
# "--components {wildcards.k} "
# "--input-type coords "
# "--predictor 0 "
# "--phenotype-classifier 0 "
# "--variational True "
# "--latent-reg {wildcards.latreg} "
# "--loss {wildcards.loss} "
# "--kl-warmup 20 "
# "--mmd-warmup 20 "
# "--montecarlo-kl 10 "
# "--encoding-size {wildcards.encs} "
# "--batch-size 256 "
# "--window-size 24 "
# "--window-step 12 "
# "--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
# "--output-path {outpath}latent_regularization_experiments"
#
#
# rule explore_phenotype_classification:
# input:
# data_path="/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/",
# output:
# trained_models=os.path.join(
# outpath,
# "pheno_classification_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
# ),
# shell:
# "pipenv run python -m deepof.train_model "
# "--train-path {input.data_path} "
# "--val-num 15 "
# "--components {wildcards.k} "
# "--input-type coords "
# "--predictor 0 "
# "--phenotype-classifier {wildcards.phenos} "
# "--variational True "
# "--loss {wildcards.loss} "
# "--kl-warmup 20 "
# "--mmd-warmup 20 "
# "--montecarlo-kl 10 "
# "--encoding-size {wildcards.encs} "
# "--batch-size 256 "
# "--window-size 11 "
# "--window-step 11 "
# "--stability-check 3 "
# "--output-path {outpath}pheno_classification_experiments"
rule latent_regularization_experiments:
input:
data_path=ancient("/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/"),
output:
trained_models=os.path.join(
outpath,
"latent_regularization_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_latreg={latreg}_final_weights.h5",
),
shell:
"pipenv run python -m deepof.train_model "
"--train-path {input.data_path} "
"--val-num 5 "
"--components {wildcards.k} "
"--input-type coords "
"--predictor 0 "
"--phenotype-classifier 0 "
"--variational True "
"--latent-reg {wildcards.latreg} "
"--loss {wildcards.loss} "
"--kl-warmup 20 "
"--mmd-warmup 20 "
"--montecarlo-kl 10 "
"--encoding-size {wildcards.encs} "
"--batch-size 256 "
"--window-size 24 "
"--window-step 12 "
"--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
"--output-path {outpath}latent_regularization_experiments"
rule explore_phenotype_classification:
input:
data_path="/psycl/g/mpsstatgen/lucas/DLC/DLC_models/deepof_single_topview/",
output:
trained_models=os.path.join(
outpath,
"pheno_classification_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
),
shell:
"pipenv run python -m deepof.train_model "
"--train-path {input.data_path} "
"--val-num 15 "
"--components {wildcards.k} "
"--input-type coords "
"--predictor 0 "
"--phenotype-classifier {wildcards.phenos} "
"--variational True "
"--loss {wildcards.loss} "
"--kl-warmup 20 "
"--mmd-warmup 20 "
"--montecarlo-kl 10 "
"--encoding-size {wildcards.encs} "
"--batch-size 256 "
"--window-size 11 "
"--window-step 11 "
"--stability-check 3 "
"--output-path {outpath}pheno_classification_experiments"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment