Commit 9da3260c authored by lucas_miranda's avatar lucas_miranda
Browse files

Added nose2body to rule_based_annotation()

parent 80fad0ed
......@@ -584,7 +584,7 @@ class SEQ_2_SEQ_GMVAE:
)
// 2,
activation=None,
initializer=Orthogonal(),
initializer=Orthogonal(), # An alternative is a constant initializer with a matrix of values computed from the labels
)(encoder)
z_gauss_var = Dense(
......@@ -781,346 +781,6 @@ class SEQ_2_SEQ_GMVAE:
self._prior = value
# noinspection PyDefaultArgument
# class SEQ_2_SEQ_CONV_GMVAE:
# """ Gaussian Mixture Variational Autoencoder for pose motif elucidation. """
#
# def __init__(
# self,
# architecture_hparams: dict = {},
# batch_size: int = 256,
# compile_model: bool = True,
# encoding: int = 16,
# entropy_reg_weight: float = 0.0,
# initialiser_iters: int = int(1),
# kl_warmup_epochs: int = 20,
# loss: str = "ELBO",
# mmd_warmup_epochs: int = 20,
# montecarlo_kl: int = 1,
# neuron_control: bool = False,
# number_of_components: int = 1,
# overlap_loss: float = False,
# phenotype_prediction: float = 0.0,
# predictor: float = 0.0,
# reg_cat_clusters: bool = False,
# reg_cluster_variance: bool = False,
# ):
# self.hparams = self.get_hparams(architecture_hparams)
# self.batch_size = batch_size
# self.bidirectional_merge = self.hparams["bidirectional_merge"]
# self.CONV_filters = self.hparams["units_conv"]
# self.DENSE_1 = int(self.hparams["units_lstm"] / 2)
# self.DENSE_2 = self.hparams["units_dense2"]
# self.DROPOUT_RATE = self.hparams["dropout_rate"]
# self.ENCODING = encoding
# self.LSTM_units_1 = self.hparams["units_lstm"]
# self.LSTM_units_2 = int(self.hparams["units_lstm"] / 2)
# self.clipvalue = self.hparams["clipvalue"]
# self.dense_activation = self.hparams["dense_activation"]
# self.dense_layers_per_branch = self.hparams["dense_layers_per_branch"]
# self.learn_rate = self.hparams["learning_rate"]
# self.lstm_unroll = True
# self.compile = compile_model
# self.entropy_reg_weight = entropy_reg_weight
# self.initialiser_iters = initialiser_iters
# self.kl_warmup = kl_warmup_epochs
# self.loss = loss
# self.mc_kl = montecarlo_kl
# self.mmd_warmup = mmd_warmup_epochs
# self.neuron_control = neuron_control
# self.number_of_components = number_of_components
# self.overlap_loss = overlap_loss
# self.phenotype_prediction = phenotype_prediction
# self.predictor = predictor
# self.prior = "standard_normal"
# self.reg_cat_clusters = reg_cat_clusters
# self.reg_cluster_variance = reg_cluster_variance
#
# assert (
# "ELBO" in self.loss or "MMD" in self.loss
# ), "loss must be one of ELBO, MMD or ELBO+MMD (default)"
#
# @property
# def prior(self):
# """Property to set the value of the prior
# once the class is instanciated"""
#
# return self._prior
#
# def get_prior(self):
# """Sets the Variational Autoencoder prior distribution"""
#
# if self.prior == "standard_normal":
# # init_means = deepof.model_utils.far_away_uniform_initialiser(
# # shape=(self.number_of_components, self.ENCODING),
# # minval=0,
# # maxval=5,
# # iters=self.initialiser_iters,
# # )
#
# self.prior = tfd.MixtureSameFamily(
# mixture_distribution=tfd.categorical.Categorical(
# probs=tf.ones(self.number_of_components) / self.number_of_components
# ),
# components_distribution=tfd.MultivariateNormalDiag(
# loc=tf.Variable(
# tf.random.normal(
# [self.number_of_components, self.ENCODING],
# name="prior_means",
# )
# ),
# scale_diag=tfp.util.TransformedVariable(
# tf.ones([self.number_of_components, self.ENCODING]),
# tfb.Softplus(),
# name="prior_scales",
# ),
# ),
# )
#
# else: # pragma: no cover
# raise NotImplementedError(
# "Gaussian Mixtures are currently the only supported prior"
# )
#
# @staticmethod
# def get_hparams(params: Dict) -> Dict:
# """Sets the default parameters for the model. Overwritable with a dictionary"""
#
# defaults = {
# "bidirectional_merge": "ave",
# "clipvalue": 1.0,
# "dense_activation": "relu",
# "dense_layers_per_branch": 1,
# "dropout_rate": 1e-3,
# "learning_rate": 1e-3,
# "units_conv": 160,
# "units_dense2": 120,
# "units_conv2": 300,
# }
#
# for k, v in params.items():
# defaults[k] = v
#
# return defaults
#
# def get_layers(self, input_shape):
# """Instanciate all layers in the model"""
#
# # Encoder Layers
#
# # Decoder layers
#
# # Predictor layers
#
# # Phenotype classification layers
#
# pass
#
# def build(self, input_shape: Tuple):
# """Builds the tf.keras model"""
#
# # Instanciate prior
# self.get_prior()
#
# # Get model layers
# () = self.get_layers(input_shape)
#
# # Define and instantiate encoder
# x = Input(shape=input_shape[1:])
# encoder = 0
#
# # encoding_shuffle = deepof.model_utils.MCDropout(self.DROPOUT_RATE)(encoder)
# z_cat = Dense(
# self.number_of_components,
# activation="softmax",
# kernel_regularizer=(
# tf.keras.regularizers.l1_l2(l1=0.01, l2=0.01)
# if self.reg_cat_clusters
# else None
# ),
# )(encoder)
#
# if self.entropy_reg_weight > 0:
# z_cat = deepof.model_utils.Entropy_regulariser(self.entropy_reg_weight)(
# z_cat
# )
#
# z_gauss_mean = Dense(
# tfpl.IndependentNormal.params_size(
# self.ENCODING * self.number_of_components
# )
# // 2,
# activation=None,
# )(encoder)
#
# z_gauss_var = Dense(
# tfpl.IndependentNormal.params_size(
# self.ENCODING * self.number_of_components
# )
# // 2,
# activation=None,
# activity_regularizer=(
# tf.keras.regularizers.l2(0.01) if self.reg_cluster_variance else None
# ),
# )(encoder)
#
# z_gauss = tf.keras.layers.concatenate([z_gauss_mean, z_gauss_var], axis=1)
#
# z_gauss = Reshape([2 * self.ENCODING, self.number_of_components])(z_gauss)
#
# # Identity layer controlling for dead neurons in the Gaussian Mixture posterior
# if self.neuron_control:
# z_gauss = deepof.model_utils.Dead_neuron_control()(z_gauss)
#
# if self.overlap_loss:
# z_gauss = deepof.model_utils.Gaussian_mixture_overlap(
# self.ENCODING,
# self.number_of_components,
# loss=self.overlap_loss,
# )(z_gauss)
#
# z = tfpl.DistributionLambda(
# lambda gauss: tfd.mixture.Mixture(
# cat=tfd.categorical.Categorical(
# probs=gauss[0],
# ),
# components=[
# tfd.Independent(
# tfd.Normal(
# loc=gauss[1][..., : self.ENCODING, k],
# scale=softplus(gauss[1][..., self.ENCODING :, k]) + 1e-5,
# ),
# reinterpreted_batch_ndims=1,
# )
# for k in range(self.number_of_components)
# ],
# ),
# convert_to_tensor_fn="sample",
# )([z_cat, z_gauss])
#
# # Define and control custom loss functions
# kl_warmup_callback = False
# if "ELBO" in self.loss:
#
# kl_beta = deepof.model_utils.K.variable(1.0, name="kl_beta")
# kl_beta._trainable = False
# if self.kl_warmup:
# kl_warmup_callback = LambdaCallback(
# on_epoch_begin=lambda epoch, logs: deepof.model_utils.K.set_value(
# kl_beta, deepof.model_utils.K.min([epoch / self.kl_warmup, 1])
# )
# )
#
# # noinspection PyCallingNonCallable
# z = deepof.model_utils.KLDivergenceLayer(
# self.prior,
# test_points_fn=lambda q: q.sample(self.mc_kl),
# test_points_reduce_axis=0,
# weight=kl_beta,
# )(z)
#
# mmd_warmup_callback = False
# if "MMD" in self.loss:
#
# mmd_beta = deepof.model_utils.K.variable(1.0, name="mmd_beta")
# mmd_beta._trainable = False
# if self.mmd_warmup:
# mmd_warmup_callback = LambdaCallback(
# on_epoch_begin=lambda epoch, logs: deepof.model_utils.K.set_value(
# mmd_beta, deepof.model_utils.K.min([epoch / self.mmd_warmup, 1])
# )
# )
#
# z = deepof.model_utils.MMDiscrepancyLayer(
# batch_size=self.batch_size, prior=self.prior, beta=mmd_beta
# )(z)
#
# # Define and instantiate generator
# g = Input(shape=self.ENCODING)
# generator = 0
# x_decoded_mean = tfpl.IndependentNormal(
# event_shape=input_shape[2:],
# convert_to_tensor_fn=tfp.distributions.Distribution.mean,
# name="vae_reconstruction",
# )(generator)
#
# # define individual branches as models
# encoder = Model(x, z, name="SEQ_2_SEQ_CONV_VEncoder")
# generator = Model(g, x_decoded_mean, name="vae_reconstruction")
#
# def log_loss(x_true, p_x_q_given_z):
# """Computes the negative log likelihood of the data given
# the output distribution"""
# return -tf.reduce_sum(p_x_q_given_z.log_prob(x_true))
#
# model_outs = [generator(encoder.outputs)]
# model_losses = [log_loss]
# model_metrics = {"vae_reconstruction": ["mae", "mse"]}
# loss_weights = [1.0]
#
# if self.predictor > 0:
# # Define and instantiate predictor
# predictor = 0
# x_predicted_mean = tfpl.IndependentNormal(
# event_shape=input_shape[2:],
# convert_to_tensor_fn=tfp.distributions.Distribution.mean,
# name="vae_prediction",
# )(predictor)
#
# model_outs.append(x_predicted_mean)
# model_losses.append(log_loss)
# model_metrics["vae_prediction"] = ["mae", "mse"]
# loss_weights.append(self.predictor)
#
# if self.phenotype_prediction > 0:
# pheno_pred = Model_PC1(z)
# pheno_pred = Dense(tfpl.IndependentBernoulli.params_size(1))(pheno_pred)
# pheno_pred = tfpl.IndependentBernoulli(
# event_shape=1,
# convert_to_tensor_fn=tfp.distributions.Distribution.mean,
# name="phenotype_prediction",
# )(pheno_pred)
#
# model_outs.append(pheno_pred)
# model_losses.append(log_loss)
# model_metrics["phenotype_prediction"] = ["AUC", "accuracy"]
# loss_weights.append(self.phenotype_prediction)
#
# # define grouper and end-to-end autoencoder model
# grouper = Model(encoder.inputs, z_cat, name="Deep_Gaussian_Mixture_clustering")
# gmvaep = Model(
# inputs=encoder.inputs,
# outputs=model_outs,
# name="SEQ_2_SEQ_CONV_GMVAE",
# )
#
# if self.compile:
# gmvaep.compile(
# loss=model_losses,
# optimizer=Nadam(
# lr=self.learn_rate,
# clipvalue=self.clipvalue,
# ),
# metrics=model_metrics,
# loss_weights=loss_weights,
# )
#
# gmvaep.build(input_shape)
#
# return (
# encoder,
# generator,
# grouper,
# gmvaep,
# kl_warmup_callback,
# mmd_warmup_callback,
# )
#
# @prior.setter
# def prior(self, value):
# self._prior = value
# TODO:
# - Check usefulness of stateful sequential layers! (stateful=True in the LSTMs)
# - Investigate full covariance matrix approximation for the latent space! (details on tfp course) :)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment