Commit 9096578a authored by lucas_miranda's avatar lucas_miranda
Browse files

Refactored train_utils.py

parent cb1bc384
Pipeline #93151 passed with stage
in 28 minutes and 36 seconds
......@@ -22,13 +22,18 @@ pheno_weights = [0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0, 100.0]
rule deepof_experiments:
input:
expand(
"/u/lucasmir/DLC/DLC_autoencoders/DeepOF/deepof/logs/dimension_and_loss_experiments/trained_weights/"
"GMVAE_loss={loss}_encoding={encs}_k={k}_run_1_final_weights.h5",
loss=losses,
encs=encodings,
k=cluster_numbers,
),
expand( "/u/lucasmir/DLC/DLC_autoencoders/DeepOF/deepof/logs/hyperparameter_tuning/trained_weights/"
"GMVAE_loss={loss}_encoding=2_k=15_run_1_final_weights.h5",
loss=losses,
)
# expand(
# "/u/lucasmir/DLC/DLC_autoencoders/DeepOF/deepof/logs/dimension_and_loss_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_run_1_final_weights.h5",
# loss=losses,
# encs=encodings,
# k=cluster_numbers,
# ),
# expand(
# "/u/lucasmir/DLC/DLC_autoencoders/DeepOF/deepof/logs/pheno_classification_experiments/trained_weights/"
# "GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
......@@ -39,91 +44,92 @@ rule deepof_experiments:
# ),
# rule coarse_hyperparameter_tuning:
# input:
# data_path="/u/lucasmir/DLC/DLC_models/deepof_single_topview/",
# output:
# trained_models=os.path.join(
# outpath,
# "coarse_hyperparameter_tuning/trained_weights/GMVAE_loss={loss}_encoding={encs}_run_1_final_weights.h5",
# ),
# shell:
# "pipenv run python -m deepof.train_model "
# "--train-path {input.data_path} "
# "--val-num 25 "
# "--components 10 "
# "--input-type coords "
# "--predictor 0 "
# "--variational True "
# "--loss {wildcards.loss} "
# "--kl-warmup 20 "
# "--mmd-warmup 20 "
# "--encoding-size 2 "
# "--batch-size 256 "
# "--window-size 11 "
# "--window-step 11 "
# "--output-path {outpath}coarse_hyperparameter_tuning "
# "--hyperparameter-tuning hyperband "
# "--hpt-trials 3"
rule explore_encoding_dimension_and_loss_function:
rule coarse_hyperparameter_tuning:
input:
data_path=ancient("/u/lucasmir/DLC/DLC_models/deepof_single_topview/"),
data_path="/u/lucasmir/DLC/DLC_models/deepof_single_topview/",
output:
trained_models=os.path.join(
outpath,
"dimension_and_loss_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_run_1_final_weights.h5",
"hyperparameter_tuning/trained_weights/GMVAE_loss={loss}_encoding=2_run_1_final_weights.h5",
),
shell:
"pipenv run python -m deepof.train_model "
"--train-path {input.data_path} "
"--val-num 5 "
"--components {wildcards.k} "
"--val-num 25 "
"--components 15 "
"--input-type coords "
"--predictor 0 "
"--phenotype-classifier 0 "
"--variational True "
"--loss {wildcards.loss} "
"--kl-warmup 20 "
"--mmd-warmup 20 "
"--montecarlo-kl 10 "
"--encoding-size {wildcards.encs} "
"--mmd-warmup 0 "
"--encoding-size 2 "
"--batch-size 256 "
"--window-size 24 "
"--window-step 6 "
"--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
"--stability-check 3 "
"--output-path {outpath}dimension_and_loss_experiments"
"--window-step 12 "
"--output-path {outpath}coarse_hyperparameter_tuning "
"--hyperparameter-tuning hyperband "
"--hpt-trials 3"
rule explore_phenotype_classification:
input:
data_path="/u/lucasmir/DLC/DLC_models/deepof_single_topview/",
output:
trained_models=os.path.join(
outpath,
"pheno_classification_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
),
shell:
"pipenv run python -m deepof.train_model "
"--train-path {input.data_path} "
"--val-num 15 "
"--components {wildcards.k} "
"--input-type coords "
"--predictor 0 "
"--phenotype-classifier {wildcards.phenos} "
"--variational True "
"--loss {wildcards.loss} "
"--kl-warmup 20 "
"--mmd-warmup 20 "
"--montecarlo-kl 10 "
"--encoding-size {wildcards.encs} "
"--batch-size 256 "
"--window-size 11 "
"--window-step 11 "
"--stability-check 3 "
"--output-path {outpath}pheno_classification_experiments"
# rule explore_encoding_dimension_and_loss_function:
# input:
# data_path=ancient("/u/lucasmir/DLC/DLC_models/deepof_single_topview/"),
# output:
# trained_models=os.path.join(
# outpath,
# "dimension_and_loss_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_run_1_final_weights.h5",
# ),
# shell:
# "pipenv run python -m deepof.train_model "
# "--train-path {input.data_path} "
# "--val-num 5 "
# "--components {wildcards.k} "
# "--input-type coords "
# "--predictor 0 "
# "--phenotype-classifier 0 "
# "--variational True "
# "--loss {wildcards.loss} "
# "--kl-warmup 20 "
# "--mmd-warmup 20 "
# "--montecarlo-kl 10 "
# "--encoding-size {wildcards.encs} "
# "--batch-size 256 "
# "--window-size 24 "
# "--window-step 6 "
# "--exclude-bodyparts Tail_base,Tail_1,Tail_2,Tail_tip "
# "--stability-check 3 "
# "--output-path {outpath}dimension_and_loss_experiments"
#
#
# rule explore_phenotype_classification:
# input:
# data_path="/u/lucasmir/DLC/DLC_models/deepof_single_topview/",
# output:
# trained_models=os.path.join(
# outpath,
# "pheno_classification_experiments/trained_weights/GMVAE_loss={loss}_encoding={encs}_k={k}_pheno={phenos}_run_1_final_weights.h5",
# ),
# shell:
# "pipenv run python -m deepof.train_model "
# "--train-path {input.data_path} "
# "--val-num 15 "
# "--components {wildcards.k} "
# "--input-type coords "
# "--predictor 0 "
# "--phenotype-classifier {wildcards.phenos} "
# "--variational True "
# "--loss {wildcards.loss} "
# "--kl-warmup 20 "
# "--mmd-warmup 20 "
# "--montecarlo-kl 10 "
# "--encoding-size {wildcards.encs} "
# "--batch-size 256 "
# "--window-size 11 "
# "--window-step 11 "
# "--stability-check 3 "
# "--output-path {outpath}pheno_classification_experiments"
......
......@@ -9,7 +9,7 @@ Testing module for deepof.train_utils
"""
from hypothesis import given
from hypothesis import HealthCheck
from hypothesis import HealthCheck, reproduce_failure
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
......@@ -93,16 +93,16 @@ def test_get_callbacks(
st.integers(min_value=2, max_value=10),
),
elements=st.floats(
min_value=0.0,
min_value=-1,
max_value=1,
),
),
batch_size=st.integers(min_value=128, max_value=512),
encoding_size=st.integers(min_value=1, max_value=16),
encoding_size=st.integers(min_value=2, max_value=16),
k=st.integers(min_value=1, max_value=10),
loss=st.one_of(st.just("ELBO"), st.just("MMD")),
pheno_class=st.floats(min_value=0.0, max_value=1.0),
predictor=st.floats(min_value=0.0, max_value=1.0),
loss=st.one_of(st.just("ELBO"), st.just("MMD"), st.just("ELBO+MMD")),
pheno_class=st.one_of(st.just(0.0), st.just(1.0)),
predictor=st.one_of(st.just(0.0), st.just(1.0)),
variational=st.booleans(),
)
def test_autoencoder_fitting(
......@@ -116,6 +116,7 @@ def test_autoencoder_fitting(
variational,
):
y_train = np.round(np.random.uniform(0, 1, X_train.shape[0]))
preprocessed_data = (X_train, y_train, X_train, y_train)
prun = deepof.data.project(
......@@ -130,8 +131,10 @@ def test_autoencoder_fitting(
batch_size=batch_size,
encoding_size=encoding_size,
epochs=1,
kl_warmup=10,
log_history=True,
log_hparams=True,
mmd_warmup=10,
n_components=k,
loss=loss,
phenotype_class=pheno_class,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment