Commit b3941ccf authored by lucas_miranda's avatar lucas_miranda
Browse files

Changed full model for diagonal model in all variational implementations in models.py

parent 41b67856
%% Cell type:code id: tags:
``` python
%load_ext autoreload
%autoreload 2
import warnings
warnings.filterwarnings("ignore")
```
%% Cell type:code id: tags:
``` python
#from source.utils import *
from source.preprocess import *
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
from tqdm import tqdm_notebook as tqdm
```
%% Cell type:code id: tags:parameters
``` python
path = "../../Desktop/DLC_social_1/"
```
%% Cell type:markdown id: tags:
# Set up and design the project
%% Cell type:code id: tags:
``` python
with open('{}DLC_social_1_exp_conditions.pickle'.format(path), 'rb') as handle:
Treatment_dict = pickle.load(handle)
```
%% Cell type:code id: tags:
``` python
#Which angles to compute?
bp_dict = {'B_Nose':['B_Left_ear','B_Right_ear'],
'B_Left_ear':['B_Nose','B_Right_ear','B_Center','B_Left_flank'],
'B_Right_ear':['B_Nose','B_Left_ear','B_Center','B_Right_flank'],
'B_Center':['B_Left_ear','B_Right_ear','B_Left_flank','B_Right_flank','B_Tail_base'],
'B_Left_flank':['B_Left_ear','B_Center','B_Tail_base'],
'B_Right_flank':['B_Right_ear','B_Center','B_Tail_base'],
'B_Tail_base':['B_Center','B_Left_flank','B_Right_flank']}
```
%% Cell type:code id: tags:
``` python
%%time
DLC_social_1 = project(path=path,#Path where to find the required files
smooth_alpha=0.85, #Alpha value for exponentially weighted smoothing
distances=['B_Center','B_Nose','B_Left_ear','B_Right_ear','B_Left_flank',
'B_Right_flank','B_Tail_base'],
ego=False,
angles=True,
connectivity=bp_dict,
arena='circular', #Type of arena used in the experiments
arena_dims=[380], #Dimensions of the arena. Just one if it's circular
video_format='.mp4',
table_format='.h5',
exp_conditions=Treatment_dict)
```
%% Cell type:markdown id: tags:
# Run project
%% Cell type:code id: tags:
``` python
%%time
DLC_social_1_coords = DLC_social_1.run(verbose=True)
print(DLC_social_1_coords)
type(DLC_social_1_coords)
```
%% Cell type:markdown id: tags:
# Generate coords
%% Cell type:code id: tags:
``` python
%%time
ptest = DLC_social_1_coords.get_coords(center=True, polar=False, speed=0, length='00:10:00')
ptest._type
```
%% Cell type:code id: tags:
``` python
%%time
dtest = DLC_social_1_coords.get_distances(speed=0, length='00:10:00')
dtest._type
```
%% Cell type:code id: tags:
``` python
%%time
atest = DLC_social_1_coords.get_angles(degrees=True, speed=0, length='00:10:00')
atest._type
```
%% Cell type:markdown id: tags:
# Visualization playground
%% Cell type:code id: tags:
``` python
#ptest.plot_heatmaps(['B_Center', 'W_Center'], i=1)
```
%% Cell type:code id: tags:
``` python
#Plot animation of trajectory over time with different smoothings
#plt.plot(ptest['Day2Test13DLC']['B_Center'].iloc[:5000]['x'],
# ptest['Day2Test13DLC']['B_Center'].iloc[:5000]['y'], label='alpha=0.85')
#plt.xlabel('x')
#plt.ylabel('y')
#plt.title('Mouse Center Trajectory using different exponential smoothings')
#plt.legend()
#plt.show()
```
%% Cell type:markdown id: tags:
# Dimensionality reduction playground
%% Cell type:code id: tags:
``` python
#pca = ptest.pca(4, 1000)
```
%% Cell type:code id: tags:
``` python
#plt.scatter(*pca[0].T)
#plt.show()
```
%% Cell type:markdown id: tags:
# Preprocessing playground
%% Cell type:code id: tags:
``` python
mtest = merge_tables(DLC_social_1_coords.get_coords(center=True, polar=True, length='00:10:00'))#,
# DLC_social_1_coords.get_distances(speed=0, length='00:10:00'),
# DLC_social_1_coords.get_angles(degrees=True, speed=0, length='00:10:00'))
```
%% Cell type:code id: tags:
``` python
#pptest = mtest.preprocess(window_size=51, filter='gaussian', sigma=10, shift=20)
```
%% Cell type:code id: tags:
``` python
pttest = mtest.preprocess(window_size=11, window_step=6, filter=None, standard_scaler=True)
pttest.shape
```
%% Cell type:code id: tags:
``` python
#plt.plot(pttest[2,:,2], label='normal')
#plt.plot(pptest[2,:,2], label='gaussian')
#plt.legend()
#plt.show()
```
%% Cell type:markdown id: tags:
# Trained models playground
%% Cell type:markdown id: tags:
### Seq 2 seq Variational Auto Encoder
%% Cell type:code id: tags:
``` python
from datetime import datetime
import tensorflow.keras as k
import tensorflow as tf
```
%% Cell type:code id: tags:
``` python
NAME = 'Baseline_VAE_short_512_10=warmup_begin_tfp'
log_dir = os.path.abspath(
"logs/fit/{}_{}".format(NAME, datetime.now().strftime("%Y%m%d-%H%M%S"))
)
tensorboard_callback = k.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
```
%% Cell type:code id: tags:
``` python
from source.models import SEQ_2_SEQ_AE, SEQ_2_SEQ_VAE, SEQ_2_SEQ_VAEP, SEQ_2_SEQ_MMVAEP
```
%% Cell type:code id: tags:
``` python
encoder, decoder, ae = SEQ_2_SEQ_AE(pttest.shape).build()
ae.build(pttest.shape)
```
%% Cell type:code id: tags:
``` python
ae.summary()
```
%% Cell type:code id: tags:
``` python
encoder, generator, vae, kl_warmup_callback, mmd_warmup_callback = SEQ_2_SEQ_VAE(pttest.shape,
loss='ELBO+MMD',
kl_warmup_epochs=10,
mmd_warmup_epochs=10).build()
# vae.build(pttest.shape)
```
%% Cell type:code id: tags:
``` python
# vae.summary()
```
%% Cell type:code id: tags:
``` python
# encoder, generator, vaep, kl_warmup_callback, mmd_warmup_callback = SEQ_2_SEQ_VAEP(pttest.shape,
# loss='ELBO+MMD',
# kl_warmup_epochs=10,
# mmd_warmup_epochs=10).build()
# vaep.build(pttest.shape)
```
%% Cell type:code id: tags:
``` python
# vaep.summary()
vaep.summary()
```
%% Cell type:code id: tags:
``` python
# encoder, generator, gmvaep, kl_warmup_callback, mmd_warmup_callback = SEQ_2_SEQ_MMVAEP(pttest.shape,
# loss='ELBO+MMD',
# number_of_components=2,
# kl_warmup_epochs=10,
# mmd_warmup_epochs=10).build()
# gmvaep.build(pttest.shape)
```
%% Cell type:code id: tags:
``` python
#np.random.shuffle(pttest)
pttrain = pttest[:-15000]
pttest = pttest[-15000:]
```
%% Cell type:code id: tags:
``` python
#lr_schedule = tf.keras.callbacks.LearningRateScheduler(
# lambda epoch: 1e-3 * 10**(epoch / 20))
```
%% Cell type:code id: tags:
``` python
# tf.config.experimental_run_functions_eagerly(False)
history = vae.fit(x=pttrain[:-1], y=pttrain[:-1], epochs=100, batch_size=512, verbose=1,
validation_data=(pttest[:-1], pttest[:-1]),
callbacks=[tensorboard_callback, kl_warmup_callback, mmd_warmup_callback])
```
%% Cell type:code id: tags:
``` python
# tf.config.experimental_run_functions_eagerly(False)
# history = vaep.fit(x=pttrain[:-1], y=[pttrain[:-1],pttrain[1:]], epochs=100, batch_size=512, verbose=1,
# validation_data=(pttest[:-1], [pttest[:-1],pttest[1:]]),
# callbacks=[tensorboard_callback, kl_warmup_callback, mmd_warmup_callback])
```
%% Cell type:code id: tags:
``` python
# tf.config.experimental_run_functions_eagerly(False)
# history = gmvaep.fit(x=pttrain[:-1], y=[pttrain[:-1],pttrain[1:]], epochs=2, batch_size=512, verbose=1,
# validation_data=(pttest[:-1], [pttest[:-1],pttest[1:]]),
# callbacks=[tensorboard_callback, kl_warmup_callback, mmd_warmup_callback])
```
%% Cell type:markdown id: tags:
## Probability playground
%% Cell type:code id: tags:
``` python
#I need to find a way of using DistributionLambda in my settings,
#to build a gaussian mixture likelihhod with the proper categorical prior for clustering
```
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
```
......
......@@ -109,13 +109,37 @@ class UncorrelatedFeaturesConstraint(Constraint):
return self.weightage * self.uncorrelated_feature(x)
class GaussianMixtureLayer(Layer):
def __init(self, *args, **kwargs):
self.is_placeholder = True
super(GaussianMixtureLayer, self).__init__(*args, **kwargs)
class MultivariateNormalDiag(tfpl.DistributionLambda):
def __init__(
self,
event_size,
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs
):
super(MultivariateNormalDiag, self).__init__(
lambda t: MultivariateNormalDiag.new(t, event_size, validate_args),
convert_to_tensor_fn,
**kwargs
)
def call(self, inputs, **kwargs):
pass
@staticmethod
def new(params, event_size, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.name_scope(name or "MultivariateNormalDiag"):
params = tf.convert_to_tensor(params, name="params")
return tfd.mvn_diag.MultivariateNormalDiag(
loc=params[..., :event_size],
scale_diag=params[..., event_size:],
validate_args=validate_args,
)
@staticmethod
def params_size(event_size, name=None):
"""The number of `params` needed to create a single distribution."""
with tf.name_scope(name or "MultivariateNormalDiag_params_size"):
return 2 * event_size
class KLDivergenceLayer(tfpl.KLDivergenceAddLoss):
......
......@@ -282,7 +282,7 @@ class SEQ_2_SEQ_VAE:
encoder = Model_E5(encoder)
encoder = Dense(
tfpl.MultivariateNormalTriL.params_size(self.ENCODING), activation=None
MultivariateNormalDiag.params_size(self.ENCODING), activation=None
)(encoder)
# Define and control custom loss functions
......@@ -299,7 +299,7 @@ class SEQ_2_SEQ_VAE:
)
)
z = tfpl.MultivariateNormalTriL(self.ENCODING)(encoder)
z = MultivariateNormalDiag(self.ENCODING)(encoder)
if "ELBO" in self.loss:
z = KLDivergenceLayer(self.prior, weight=kl_beta)(z)
......@@ -495,7 +495,7 @@ class SEQ_2_SEQ_VAEP:
encoder = Model_E5(encoder)
encoder = Dense(
tfpl.MultivariateNormalTriL.params_size(self.ENCODING), activation=None
MultivariateNormalDiag.params_size(self.ENCODING), activation=None
)(encoder)
# Define and control custom loss functions
......@@ -511,7 +511,7 @@ class SEQ_2_SEQ_VAEP:
)
)
z = tfpl.MultivariateNormalTriL(self.ENCODING)(encoder)
z = MultivariateNormalDiag(self.ENCODING)(encoder)
if "ELBO" in self.loss:
z = KLDivergenceLayer(self.prior, weight=kl_beta)(z)
......@@ -585,7 +585,7 @@ class SEQ_2_SEQ_VAEP:
# end-to-end autoencoder
encoder = Model(x, z, name="SEQ_2_SEQ_VEncoder")
vaep = Model(
inputs=x, outputs=[x_decoded_mean, x_predicted_mean], name="SEQ_2_SEQ_VAE"
inputs=x, outputs=[x_decoded_mean, x_predicted_mean], name="SEQ_2_SEQ_VAEP"
)
# Build generator as a separate entity
......@@ -883,7 +883,6 @@ class SEQ_2_SEQ_MMVAEP:
# TODO:
# - Try sample, mean and mode for MMDiscrepancyLayer
# - Gaussian Mixture + Categorical priors -> Deep Clustering
# - prior of equal gaussians
# - prior of equal gaussians + gaussian noise on the means (not exactly the same init)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment