Commit 4a54a438 authored by lucas_miranda's avatar lucas_miranda
Browse files

Refactored train_utils.py

parent b51981dd
Pipeline #93127 failed with stage
in 73 minutes and 59 seconds
...@@ -12,6 +12,7 @@ from datetime import date, datetime ...@@ -12,6 +12,7 @@ from datetime import date, datetime
from kerastuner import BayesianOptimization, Hyperband from kerastuner import BayesianOptimization, Hyperband
from kerastuner import HyperParameters from kerastuner import HyperParameters
from kerastuner_tensorboard_logger import TensorBoardLogger from kerastuner_tensorboard_logger import TensorBoardLogger
from sklearn.metrics import roc_auc_score
from tensorboard.plugins.hparams import api as hp from tensorboard.plugins.hparams import api as hp
from typing import Tuple, Union, Any, List from typing import Tuple, Union, Any, List
import deepof.hypermodels import deepof.hypermodels
...@@ -139,7 +140,7 @@ def get_callbacks( ...@@ -139,7 +140,7 @@ def get_callbacks(
return callbacks return callbacks
def log_hyperparameters(phenotype_class): def log_hyperparameters(phenotype_class: float, rec: str):
"""Blueprint for hyperparameter and metric logging in tensorboard during hyperparameter tuning""" """Blueprint for hyperparameter and metric logging in tensorboard during hyperparameter tuning"""
logparams = [ logparams = [
...@@ -163,7 +164,6 @@ def log_hyperparameters(phenotype_class): ...@@ -163,7 +164,6 @@ def log_hyperparameters(phenotype_class):
), ),
] ]
rec = "reconstruction_" if phenotype_class else ""
metrics = [ metrics = [
hp.Metric("val_{}mae".format(rec), display_name="val_{}mae".format(rec)), hp.Metric("val_{}mae".format(rec), display_name="val_{}mae".format(rec)),
hp.Metric("val_{}mse".format(rec), display_name="val_{}mse".format(rec)), hp.Metric("val_{}mse".format(rec), display_name="val_{}mse".format(rec)),
...@@ -192,7 +192,16 @@ def log_hyperparameters(phenotype_class): ...@@ -192,7 +192,16 @@ def log_hyperparameters(phenotype_class):
# noinspection PyUnboundLocalVariable # noinspection PyUnboundLocalVariable
def tensorboard_metric_logging(run_dir: str, hpms: Any): def tensorboard_metric_logging(
run_dir: str,
hpms: Any,
ae: Any,
X_val: np.ndarray,
y_val: np.ndarray,
phenotype_class: float,
predictor: float,
rec: str,
):
"""Autoencoder metric logging in tensorboard""" """Autoencoder metric logging in tensorboard"""
output = ae.predict(X_val) output = ae.predict(X_val)
...@@ -262,7 +271,6 @@ def autoencoder_fitting( ...@@ -262,7 +271,6 @@ def autoencoder_fitting(
tf.keras.backend.clear_session() tf.keras.backend.clear_session()
# Defines what to log on tensorboard (useful for trying out different models) # Defines what to log on tensorboard (useful for trying out different models)
logparam = { logparam = {
"encoding": encoding_size, "encoding": encoding_size,
"k": n_components, "k": n_components,
...@@ -287,8 +295,9 @@ def autoencoder_fitting( ...@@ -287,8 +295,9 @@ def autoencoder_fitting(
cbacks = cbacks[1:] cbacks = cbacks[1:]
# Logs hyperparameters to tensorboard # Logs hyperparameters to tensorboard
rec = "reconstruction_" if phenotype_class else ""
if log_hparams: if log_hparams:
logparams, metrics = log_hyperparameters(phenotype_class) logparams, metrics = log_hyperparameters(phenotype_class, rec)
with tf.summary.create_file_writer( with tf.summary.create_file_writer(
os.path.join(output_path, "hparams", run_ID) os.path.join(output_path, "hparams", run_ID)
...@@ -411,6 +420,12 @@ def autoencoder_fitting( ...@@ -411,6 +420,12 @@ def autoencoder_fitting(
tensorboard_metric_logging( tensorboard_metric_logging(
os.path.join(output_path, "hparams", run_ID), os.path.join(output_path, "hparams", run_ID),
logparam, logparam,
ae,
X_val,
y_val,
phenotype_class,
predictor,
rec,
) )
return return_list return return_list
......
...@@ -9,12 +9,13 @@ Testing module for deepof.train_utils ...@@ -9,12 +9,13 @@ Testing module for deepof.train_utils
""" """
from hypothesis import given from hypothesis import given
from hypothesis import settings, reproduce_failure from hypothesis import settings
from hypothesis import strategies as st from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays from hypothesis.extra.numpy import arrays
import deepof.data import deepof.data
import deepof.model_utils import deepof.model_utils
import deepof.train_utils import deepof.train_utils
import numpy as np
import os import os
import tensorflow as tf import tensorflow as tf
...@@ -113,7 +114,8 @@ def test_autoencoder_fitting( ...@@ -113,7 +114,8 @@ def test_autoencoder_fitting(
predictor, predictor,
variational, variational,
): ):
preprocessed_data = (X_train, [], X_train, []) y_train = np.round(np.random.uniform(0, 1, X_train.shape[0]))
preprocessed_data = (X_train, y_train, X_train, y_train)
prun = deepof.data.project( prun = deepof.data.project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"), path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
...@@ -131,7 +133,7 @@ def test_autoencoder_fitting( ...@@ -131,7 +133,7 @@ def test_autoencoder_fitting(
log_hparams=True, log_hparams=True,
n_components=k, n_components=k,
loss=loss, loss=loss,
phenotype_class=0, phenotype_class=pheno_class,
predictor=predictor, predictor=predictor,
variational=variational, variational=variational,
) )
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment