From e74353e44b49397628c8175ac3d0a86458763d2f Mon Sep 17 00:00:00 2001 From: Dawith Date: Thu, 23 Oct 2025 20:41:45 -0400 Subject: [PATCH] decoder and encoder have the same TFmer blocks --- model/transformer.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/model/transformer.py b/model/transformer.py index 917f859..5a03188 100644 --- a/model/transformer.py +++ b/model/transformer.py @@ -74,27 +74,6 @@ def build_transformerblock(self, inputs, head_size, num_heads, return outputs - def build_decoderblock(self, inputs, head_size, num_heads, ff_dim, - dropout): - """ - Constructs the decoder block. This consists of masked multi-head - attention, dropout, layer normalization, a residual connection, - a feedforward neural network, and another residual connection, but in - the reverse order as the encoder block. - """ - - x = LayerNormalization(epsilon=1e-6)(inputs) - x = Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(inputs) - x = Dropout(dropout)(x) - x = Conv1D(filters=inputs.shape[-1], kernel_size=1)(x) - x = Dropout(dropout)(x) - res = x + inputs - outputs = MultiHeadAttention( - key_dim=head_size, num_heads=num_heads, - dropout=dropout)(res, res, use_causal_mask=True) - - return outputs - def call(self, inputs): """ Calls the TimeSeriesTransformer model on a batch of inputs.