diff --git a/source/models.py b/source/models.py index cd4c68e1411f154e1981bc44c183a3a9f7bd0a77..65d349049c89e60d093dc90b7f6fb640b2dc4835 100644 --- a/source/models.py +++ b/source/models.py @@ -6,7 +6,7 @@ from tensorflow.keras.initializers import he_uniform, Orthogonal from tensorflow.keras.layers import BatchNormalization, Bidirectional, Dense from tensorflow.keras.layers import Dropout, Lambda, LSTM from tensorflow.keras.layers import RepeatVector, TimeDistributed -from tensorflow.keras.losses import BinaryCrossentropy, Huber +from tensorflow.keras.losses import Huber from tensorflow.keras.optimizers import Adam from source.model_utils import * import tensorflow as tf @@ -137,7 +137,7 @@ class SEQ_2_SEQ_AE: model = Sequential([encoder, decoder], name="SEQ_2_SEQ_AE") model.compile( - loss="binary_crossentropy",#Huber(reduction="sum", delta=100.0), + loss=Huber(reduction="sum", delta=100.0), optimizer=Adam(lr=self.learn_rate, clipvalue=0.5,), metrics=["mae"], ) @@ -309,7 +309,7 @@ class SEQ_2_SEQ_VAE: return self.input_shape[1:] * huber(x_, x_decoded_mean_) vae.compile( - loss="binary_crossentropy",#huber_loss, + loss=huber_loss, optimizer=Adam(lr=self.learn_rate,), metrics=["mae"], experimental_run_tf_function=False, @@ -518,7 +518,7 @@ class SEQ_2_SEQ_VAEP: return self.input_shape[1:] * huber(x_, x_decoded_mean_) vaep.compile( - loss="binary_crossentropy", + loss=huber_loss, optimizer=Adam(lr=self.learn_rate,), metrics=["mae"], experimental_run_tf_function=False,