-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
autoencoders.py
23 lines (18 loc) · 918 Bytes
/
autoencoders.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, LSTM, Dense
# Define input shape
input_dim = # Define your vocabulary size or embedding input size
latent_dim = 256 # Dimension of the latent space representation
# Encoder
inputs = Input(shape=(max_sequence_length,))
embedding = Embedding(input_dim, 100)(inputs) # Adjust embedding size as needed
lstm = LSTM(latent_dim)(embedding)
encoder_output = Dense(latent_dim, activation='relu')(lstm)
# Decoder
decoder_input = Dense(max_sequence_length, activation='relu')(encoder_output)
decoder_output = LSTM(input_dim, return_sequences=True)(decoder_input)
# Autoencoder model
autoencoder = Model(inputs, decoder_output)
autoencoder.compile(optimizer='adam', loss='categorical_crossentropy')
# Train the autoencoder
autoencoder.fit(train_data, train_data, epochs=10, batch_size=32, validation_data=(val_data, val_data))