diff --git a/source/hypermodels.py b/source/hypermodels.py
index 6ff6daf768c3b05bcbe5f2a68c0d95ab77ebd454..23f4f482b713fa6835ef568e67460ac2162e67e5 100644
--- a/source/hypermodels.py
+++ b/source/hypermodels.py
@@ -72,9 +72,9 @@ class SEQ_2_SEQ_AE(HyperModel):
         )
 
         # Decoder layers
-        Model_D0 = DenseTranspose(Model_E5, activation="relu", input_shape=(ENCODING,), output_dim=64)
-        Model_D1 = DenseTranspose(Model_E4, activation="relu", output_dim=128)
-        Model_D2 = DenseTranspose(Model_E3, activation="relu", output_dim=256)
+        Model_D0 = DenseTranspose(Model_E5, activation="relu", output_dim=ENCODING)
+        Model_D1 = DenseTranspose(Model_E4, activation="relu", output_dim=DENSE_2)
+        Model_D2 = DenseTranspose(Model_E3, activation="relu", output_dim=DENSE_1)
         Model_D3 = RepeatVector(self.input_shape[1])
         Model_D4 = Bidirectional(
             LSTM(
diff --git a/source/models.py b/source/models.py
index 572445aa659304e38dcfd43274b8bb9f181e9d66..49e196ba350b844c33d31184c2a693b54b1204e9 100644
--- a/source/models.py
+++ b/source/models.py
@@ -72,9 +72,9 @@ class SEQ_2_SEQ_AE:
         )
 
         # Decoder layers
-        Model_D0 = Dense(self.DENSE_2, activation="relu")
-        Model_D1 = Dense(self.DENSE_1, activation="relu")
-        Model_D2 = Dense(self.DENSE_1, activation="relu")
+        Model_D0 = DenseTranspose(Model_E5, activation="relu", output_dim=self.ENCODING)
+        Model_D1 = DenseTranspose(Model_E4, activation="relu", output_dim=self.DENSE_2)
+        Model_D2 = DenseTranspose(Model_E3, activation="relu", output_dim=self.DENSE_1)
         Model_D3 = RepeatVector(self.input_shape[1])
         Model_D4 = Bidirectional(
             LSTM(
@@ -189,9 +189,9 @@ class SEQ_2_SEQ_VAE:
 
         # Decoder layers
 
-        Model_D0 = Dense(self.DENSE_2, activation="relu")
-        Model_D1 = Dense(self.DENSE_1, activation="relu")
-        Model_D2 = Dense(self.DENSE_1, activation="relu")
+        Model_D0 = DenseTranspose(Model_E5, activation="relu", output_dim=self.ENCODING)
+        Model_D1 = DenseTranspose(Model_E4, activation="relu", output_dim=self.DENSE_2)
+        Model_D2 = DenseTranspose(Model_E3, activation="relu", output_dim=self.DENSE_1)
         Model_D3 = RepeatVector(self.input_shape[1])
         Model_D4 = Bidirectional(
             LSTM(