Skip to content

Commit

Permalink
Two-category classification
Browse files Browse the repository at this point in the history
  • Loading branch information
Dawith committed Apr 18, 2025
1 parent 540757d commit 044c3fa
Showing 1 changed file with 5 additions and 4 deletions.
9 changes: 5 additions & 4 deletions model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"""

from keras import Input, Model
from keras.layers import BatchNormalization, Conv1D, Dense, Dropout, \
from keras.layers import BatchNormalization, Conv1D, Dense, Dropout, Reshape, \
GlobalAveragePooling1D, LayerNormalization, Masking, \
MultiHeadAttention

Expand Down Expand Up @@ -57,7 +57,7 @@ def _modelstack(self, input_shape, head_size, num_heads, ff_dim,
num_Transformer_blocks: int, the number of Transformer blocks.
mlp_units: list of ints, the number of neurons in each layer of
the MLP.
n_classes: int, the number of output classes.
n_classes: list of ints, the number of output classes.
dropout: float, dropout rate.
mlp_dropout: float, dropout rate in the MLP.
Expand All @@ -75,9 +75,10 @@ def _modelstack(self, input_shape, head_size, num_heads, ff_dim,
for dim in mlp_units:
x = Dense(dim, activation="relu")(x)
x = Dropout(mlp_dropout)(x)
outputs = Dense(n_classes, activation="softmax")(x)
y = Dense(n_classes[0], activation="softmax")(x)
z = Dense(n_classes[1], activation="softmax")(x)

return Model(inputs, outputs)
return Model(inputs, [y, z])

def _transformerblocks(self, inputs, head_size, num_heads,
ff_dim, dropout):
Expand Down

0 comments on commit 044c3fa

Please sign in to comment.