Commit a2278c5f authored by lucas_miranda's avatar lucas_miranda
Browse files

Implemented tests for autoencoder fitting

parent 03c57ea9
Pipeline #93076 passed with stage
in 25 minutes and 42 seconds
......@@ -784,6 +784,7 @@ class coordinates:
preprocessed_object: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
batch_size: int = 256,
encoding_size: int = 4,
epochs: int = 35,
hparams: dict = None,
kl_warmup: int = 0,
log_history: bool = True,
......@@ -808,6 +809,7 @@ class coordinates:
- preprocessed_object (Tuple[np.ndarray]): tuple containing a preprocessed object (X_train,
y_train, X_test, y_test)
- encoding_size (int): number of dimensions in the latent space of the autoencoder
- epochs (int): epochs during which to train the models
- batch_size (int): training batch size
- save_checkpoints (bool): if True, training checkpoints are saved to disk. Useful for debugging,
but can make training significantly slower
......@@ -840,6 +842,7 @@ class coordinates:
preprocessed_object=preprocessed_object,
batch_size=batch_size,
encoding_size=encoding_size,
epochs=epochs,
hparams=hparams,
kl_warmup=kl_warmup,
log_history=log_history,
......
......@@ -143,6 +143,7 @@ def autoencoder_fitting(
preprocessed_object: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
batch_size: int,
encoding_size: int,
epochs: int,
hparams: dict,
kl_warmup: int,
log_history: bool,
......@@ -293,7 +294,7 @@ def autoencoder_fitting(
ae.fit(
x=X_train,
y=X_train,
epochs=35,
epochs=epochs,
batch_size=batch_size,
verbose=1,
validation_data=(X_val, X_val),
......@@ -343,7 +344,7 @@ def autoencoder_fitting(
ae.fit(
x=Xs,
y=ys,
epochs=2,
epochs=epochs,
batch_size=batch_size,
verbose=1,
validation_data=(
......
......@@ -12,6 +12,7 @@ from hypothesis import given
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import deepof.data
import deepof.model_utils
import deepof.train_utils
import os
......@@ -80,6 +81,61 @@ def test_get_callbacks(
assert type(cycle1c) == deepof.model_utils.one_cycle_scheduler
@settings(max_examples=2, deadline=None)
@given(
X_train=arrays(
dtype=float,
shape=st.tuples(
st.integers(min_value=10, max_value=100),
st.integers(min_value=2, max_value=15),
st.integers(min_value=2, max_value=10),
),
elements=st.floats(
min_value=0.0,
max_value=1,
),
),
batch_size=st.integers(min_value=128, max_value=512),
encoding_size=st.integers(min_value=1, max_value=16),
k=st.integers(min_value=1, max_value=10),
loss=st.one_of(st.just("ELBO"), st.just("MMD")),
pheno_class=st.floats(min_value=0.0, max_value=1.0),
predictor=st.floats(min_value=0.0, max_value=1.0),
variational=st.booleans(),
)
def test_autoencoder_fitting(
X_train,
batch_size,
encoding_size,
k,
loss,
pheno_class,
predictor,
variational,
):
preprocessed_data = (X_train, [], X_train, [])
prun = deepof.data.project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
).run()
models = prun.deep_unsupervised_embedding(
preprocessed_data,
batch_size=batch_size,
encoding_size=encoding_size,
epochs=1,
n_components=k,
loss=loss,
phenotype_class=pheno_class,
predictor=predictor,
variational=variational,
)
print(models)
@settings(max_examples=1, deadline=None)
@given(
X_train=arrays(
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment