Commit 718a7f6f authored by lucas_miranda's avatar lucas_miranda
Browse files

Changed prior initialization

parent 95c7dba5
Pipeline #102737 canceled with stages
in 12 minutes and 5 seconds
......@@ -500,62 +500,60 @@ class ClusterOverlap(Layer):
encodings, categorical = inputs[0], inputs[1]
if tf.test.is_gpu_available():
hard_groups = tf.math.argmax(categorical, axis=1)
max_groups = tf.reduce_max(categorical, axis=1)
get_local_neighbourhood_entropy = partial(
get_neighbourhood_entropy,
tensor=encodings,
clusters=hard_groups,
k=self.k,
)
hard_groups = tf.math.argmax(categorical, axis=1)
max_groups = tf.reduce_max(categorical, axis=1)
get_local_neighbourhood_entropy = partial(
get_neighbourhood_entropy,
tensor=encodings,
clusters=hard_groups,
k=self.k,
)
purity_vector = tf.map_fn(
get_local_neighbourhood_entropy,
tf.constant(list(range(self.batch_size))),
dtype=tf.dtypes.float32,
)
purity_vector = tf.map_fn(
get_local_neighbourhood_entropy,
tf.constant(list(range(self.batch_size))),
dtype=tf.dtypes.float32,
)
### CANDIDATE FOR REMOVAL. EXPLORE HOW USEFUL THIS REALLY IS ###
neighbourhood_entropy = purity_vector * max_groups
number_of_clusters = tf.cast(
tf.shape(
tf.unique(
tf.reshape(
tf.gather(
tf.cast(hard_groups, tf.dtypes.float32),
tf.where(max_groups >= self.min_confidence),
batch_dims=0,
),
[-1],
### CANDIDATE FOR REMOVAL. EXPLORE HOW USEFUL THIS REALLY IS ###
neighbourhood_entropy = purity_vector * max_groups
number_of_clusters = tf.cast(
tf.shape(
tf.unique(
tf.reshape(
tf.gather(
tf.cast(hard_groups, tf.dtypes.float32),
tf.where(max_groups >= self.min_confidence),
batch_dims=0,
),
)[0],
[-1],
),
)[0],
tf.dtypes.float32,
)
)[0],
tf.dtypes.float32,
)
self.add_metric(
number_of_clusters,
name="number_of_populated_clusters",
)
self.add_metric(
number_of_clusters,
name="number_of_populated_clusters",
)
self.add_metric(
max_groups,
aggregation="mean",
name="average_confidence_in_selected_cluster",
)
self.add_metric(
max_groups,
aggregation="mean",
name="average_confidence_in_selected_cluster",
)
self.add_metric(
neighbourhood_entropy, aggregation="mean", name="neighbourhood_entropy"
)
self.add_metric(
neighbourhood_entropy, aggregation="mean", name="neighbourhood_entropy"
)
if self.loss_weight:
# minimize local entropy
self.add_loss(self.loss_weight * tf.reduce_mean(neighbourhood_entropy))
# maximize number of clusters
# self.add_loss(-self.loss_weight * tf.reduce_mean(number_of_clusters))
if self.loss_weight:
# minimize local entropy
self.add_loss(self.loss_weight * tf.reduce_mean(neighbourhood_entropy))
# maximize number of clusters
# self.add_loss(-self.loss_weight * tf.reduce_mean(number_of_clusters))
return encodings
......@@ -473,7 +473,7 @@ class GMVAE:
# Dummy layer with no parameters, to retrieve the previous tensor
z = tf.keras.layers.Lambda(lambda t: t, name="latent_distribution")(z)
if self.number_of_components > 1:
if self.number_of_components > 1 and self.overlap_loss:
z = deepof.model_utils.ClusterOverlap(
batch_size=self.batch_size,
encoding_dim=self.ENCODING,
......
......@@ -54,7 +54,7 @@
},
{
"cell_type": "code",
"execution_count": 449,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
......@@ -92,7 +92,7 @@
},
{
"cell_type": "code",
"execution_count": 185,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
......@@ -105,15 +105,15 @@
},
{
"cell_type": "code",
"execution_count": 77,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 41.9 s, sys: 3.25 s, total: 45.2 s\n",
"Wall time: 37.5 s\n"
"CPU times: user 43.6 s, sys: 2.99 s, total: 46.6 s\n",
"Wall time: 38.6 s\n"
]
}
],
......@@ -126,7 +126,7 @@
},
{
"cell_type": "code",
"execution_count": 78,
"execution_count": 7,
"metadata": {
"scrolled": true
},
......@@ -143,8 +143,8 @@
"Computing angles...\n",
"Done!\n",
"deepof analysis of 166 videos\n",
"CPU times: user 9min 14s, sys: 11.8 s, total: 9min 26s\n",
"Wall time: 2min 3s\n"
"CPU times: user 9min, sys: 11.8 s, total: 9min 11s\n",
"Wall time: 2min 1s\n"
]
}
],
......@@ -163,7 +163,7 @@
},
{
"cell_type": "code",
"execution_count": 79,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
......@@ -175,7 +175,7 @@
},
{
"cell_type": "code",
"execution_count": 80,
"execution_count": 9,
"metadata": {},
"outputs": [
{
......@@ -583,7 +583,7 @@
" 'GMVAE_input_type=coords_window_size=22_NextSeqPred=0.15_PhenoPred=0.0_RuleBasedPred=0.15_loss=ELBO_loss_warmup=10_warmup_mode=sigmoid_encoding=6_k=15_latreg=variance_entknn=100_run=4_final_weights.h5']"
]
},
"execution_count": 80,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
......@@ -594,7 +594,7 @@
},
{
"cell_type": "code",
"execution_count": 81,
"execution_count": 10,
"metadata": {},
"outputs": [
{
......@@ -603,7 +603,7 @@
"'GMVAE_input_type=coords_window_size=22_NextSeqPred=0.0_PhenoPred=0.0_RuleBasedPred=0.0_loss=ELBO_loss_warmup=25_warmup_mode=linear_encoding=6_k=15_latreg=variance_entknn=100_run=6_final_weights.h5'"
]
},
"execution_count": 81,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
......@@ -615,7 +615,7 @@
},
{
"cell_type": "code",
"execution_count": 82,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
......@@ -630,7 +630,7 @@
},
{
"cell_type": "code",
"execution_count": 195,
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
......@@ -656,6 +656,33 @@
"gmvaep.load_weights(os.path.join(trained_network, deepof_weights))"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"ename": "InvalidArgumentError",
"evalue": " slice index 15 of dimension 0 out of bounds.\n\t [[{{node SEQ_2_SEQ_GMVAE/cluster_overlap_1/map/while/body/_10/SEQ_2_SEQ_GMVAE/cluster_overlap_1/map/while/PartitionedCall/PartitionedCall/strided_slice}}]] [Op:__inference_predict_function_37314]\n\nFunction call stack:\npredict_function\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-16-9722ae5ef3e8>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgmvaep\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_prep\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1627\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1628\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_predict_batch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1629\u001b[0;31m \u001b[0mtmp_batch_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1630\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1631\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 826\u001b[0m \u001b[0mtracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 827\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtrace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_name\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 828\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 829\u001b[0m \u001b[0mcompiler\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"xla\"\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_experimental_compile\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m\"nonXla\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 830\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0;31m# If we did not create any variables the trace we have is good enough.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m return self._concrete_stateful_fn._call_flat(\n\u001b[0;32m--> 895\u001b[0;31m filtered_flat_args, self._concrete_stateful_fn.captured_inputs) # pylint: disable=protected-access\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfn_with_cond\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minner_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minner_kwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minner_filtered_flat_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m 1917\u001b[0m \u001b[0;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1918\u001b[0m return self._build_call_outputs(self._inference_function.call(\n\u001b[0;32m-> 1919\u001b[0;31m ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0m\u001b[1;32m 1920\u001b[0m forward_backward = self._select_forward_and_backward_functions(\n\u001b[1;32m 1921\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m 558\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 559\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 560\u001b[0;31m ctx=ctx)\n\u001b[0m\u001b[1;32m 561\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 562\u001b[0m outputs = execute.execute_with_cancellation(\n",
"\u001b[0;32m~/opt/anaconda3/envs/Machine_Learning/lib/python3.6/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0;32m---> 60\u001b[0;31m inputs, attrs, num_outputs)\n\u001b[0m\u001b[1;32m 61\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m: slice index 15 of dimension 0 out of bounds.\n\t [[{{node SEQ_2_SEQ_GMVAE/cluster_overlap_1/map/while/body/_10/SEQ_2_SEQ_GMVAE/cluster_overlap_1/map/while/PartitionedCall/PartitionedCall/strided_slice}}]] [Op:__inference_predict_function_37314]\n\nFunction call stack:\npredict_function\n"
]
}
],
"source": [
"gmvaep.predict(data_prep[:10])"
]
},
{
"cell_type": "code",
"execution_count": 84,
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment