Commit 79dca42a authored by lucas_miranda's avatar lucas_miranda
Browse files

Modified GMVAEP - GRUs instead of LSTMs, stricted clipping, less deep, l1...

Modified GMVAEP - GRUs instead of LSTMs, stricted clipping, less deep, l1 regularization in cluster means, uniform initializer of variances
parent bcc49658
Pipeline #102886 canceled with stages
in 16 minutes and 8 seconds
......@@ -155,8 +155,8 @@ class GMVAE:
Model_E0 = tf.keras.layers.Conv1D(
filters=self.CONV_filters,
kernel_size=5,
strides=2, # Increased strides to yield shorter sequences
padding="same",
strides=1, # Increased strides to yield shorter sequences
padding="valid",
activation=self.dense_activation,
kernel_initializer=he_uniform(),
use_bias=True,
......
......@@ -640,7 +640,7 @@
},
{
"cell_type": "code",
"execution_count": 37,
"execution_count": 62,
"metadata": {},
"outputs": [],
"source": [
......@@ -668,7 +668,7 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 63,
"metadata": {
"scrolled": false
},
......@@ -681,50 +681,50 @@
"__________________________________________________________________________________________________\n",
"Layer (type) Output Shape Param # Connected to \n",
"==================================================================================================\n",
"input_19 (InputLayer) [(None, 22, 26)] 0 \n",
"input_31 (InputLayer) [(None, 22, 26)] 0 \n",
"__________________________________________________________________________________________________\n",
"conv1d_33 (Conv1D) (None, 11, 64) 8384 input_19[0][0] \n",
"conv1d_52 (Conv1D) (None, 18, 64) 8384 input_31[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_118 (BatchN (None, 11, 64) 256 conv1d_33[0][0] \n",
"batch_normalization_178 (BatchN (None, 18, 64) 256 conv1d_52[0][0] \n",
"__________________________________________________________________________________________________\n",
"bidirectional_60 (Bidirectional (None, 11, 256) 197632 batch_normalization_118[0][0] \n",
"bidirectional_96 (Bidirectional (None, 18, 256) 148992 batch_normalization_178[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_119 (BatchN (None, 11, 256) 1024 bidirectional_60[0][0] \n",
"batch_normalization_179 (BatchN (None, 18, 256) 1024 bidirectional_96[0][0] \n",
"__________________________________________________________________________________________________\n",
"bidirectional_61 (Bidirectional (None, 128) 164352 batch_normalization_119[0][0] \n",
"bidirectional_97 (Bidirectional (None, 128) 123648 batch_normalization_179[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_120 (BatchN (None, 128) 512 bidirectional_61[0][0] \n",
"batch_normalization_180 (BatchN (None, 128) 512 bidirectional_97[0][0] \n",
"__________________________________________________________________________________________________\n",
"dense_109 (Dense) (None, 64) 8256 batch_normalization_120[0][0] \n",
"dense_163 (Dense) (None, 64) 8256 batch_normalization_180[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_121 (BatchN (None, 64) 256 dense_109[0][0] \n",
"batch_normalization_181 (BatchN (None, 64) 256 dense_163[0][0] \n",
"__________________________________________________________________________________________________\n",
"dropout_10 (Dropout) (None, 64) 0 batch_normalization_121[0][0] \n",
"dropout_16 (Dropout) (None, 64) 0 batch_normalization_181[0][0] \n",
"__________________________________________________________________________________________________\n",
"sequential_18 (Sequential) (None, 32) 2208 dropout_10[0][0] \n",
"sequential_30 (Sequential) (None, 32) 2208 dropout_16[0][0] \n",
"__________________________________________________________________________________________________\n",
"cluster_means (Dense) (None, 90) 2970 sequential_18[0][0] \n",
"cluster_means (Dense) (None, 90) 2970 sequential_30[0][0] \n",
"__________________________________________________________________________________________________\n",
"cluster_variances (Dense) (None, 90) 2970 sequential_18[0][0] \n",
"cluster_variances (Dense) (None, 90) 2970 sequential_30[0][0] \n",
"__________________________________________________________________________________________________\n",
"concatenate_19 (Concatenate) (None, 180) 0 cluster_means[0][0] \n",
"concatenate_31 (Concatenate) (None, 180) 0 cluster_means[0][0] \n",
" cluster_variances[0][0] \n",
"__________________________________________________________________________________________________\n",
"cluster_assignment (Dense) (None, 15) 495 sequential_18[0][0] \n",
"cluster_assignment (Dense) (None, 15) 495 sequential_30[0][0] \n",
"__________________________________________________________________________________________________\n",
"reshape_10 (Reshape) (None, 12, 15) 0 concatenate_19[0][0] \n",
"reshape_16 (Reshape) (None, 12, 15) 0 concatenate_31[0][0] \n",
"__________________________________________________________________________________________________\n",
"encoding_distribution (Distribu multiple 0 cluster_assignment[0][0] \n",
" reshape_10[0][0] \n",
" reshape_16[0][0] \n",
"__________________________________________________________________________________________________\n",
"kl_divergence_layer_8 (KLDiverg multiple 181 encoding_distribution[0][0] \n",
"kl_divergence_layer_14 (KLDiver multiple 181 encoding_distribution[0][0] \n",
"__________________________________________________________________________________________________\n",
"latent_distribution (Lambda) multiple 0 kl_divergence_layer_8[0][0] \n",
"latent_distribution (Lambda) multiple 0 kl_divergence_layer_14[0][0] \n",
"__________________________________________________________________________________________________\n",
"vae_reconstruction (Functional) multiple 419092 latent_distribution[0][0] \n",
"vae_reconstruction (Functional) multiple 337940 latent_distribution[0][0] \n",
"==================================================================================================\n",
"Total params: 808,588\n",
"Trainable params: 806,411\n",
"Total params: 638,092\n",
"Trainable params: 635,915\n",
"Non-trainable params: 2,177\n",
"__________________________________________________________________________________________________\n"
]
......@@ -740,7 +740,7 @@
},
{
"cell_type": "code",
"execution_count": 46,
"execution_count": 52,
"metadata": {},
"outputs": [],
"source": [
......@@ -748,10 +748,10 @@
"def plot_model(model, name):\n",
" tf.keras.utils.plot_model(\n",
" model,\n",
" to_file=os.path.join(\n",
" path,\n",
" \"deepof_{}_{}.png\".format(name, datetime.now().strftime(\"%Y%m%d-%H%M%S\")),\n",
" ),\n",
" #to_fileos.path.join(\n",
" # path,\n",
" # \"deepof_{}_{}.png\".format(name, datetime.now().strftime(\"%Y%m%d-%H%M%S\")),\n",
" #),\n",
" show_shapes=True,\n",
" show_dtype=False,\n",
" show_layer_names=True,\n",
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment