There have multiple problem in your code, for simplicity, I just give you one well defined model instead, following code build a LSTM Autoencoder that reconstruct the inputs with shape (batch_size, timesteps, number_of_features_at_each_timesteps)
:
import torch
from torch import nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=self.embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return hidden_n
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim=64, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim, self.n_features = 2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, 1, 1)
x = x.permute(1, 0, 2)
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
return self.output_layer(x)
class RecurrentAutoencoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
print("Inputs size:", x.size())
x = self.encoder(x)
print("Representation size: ", x.size())
x = self.decoder(x)
print("Outputs size: ", x.size())
return x
batch_n = 5
seq_len = 10
n_features = 3
inputs = torch.randn(batch_n, seq_len, n_features).to(device)
model = RecurrentAutoencoder(seq_len, n_features).to(device)
y = model(inputs)
Outputs:
Inputs size: torch.Size([5, 10, 3])
Representation size: torch.Size([1, 5, 64])
Outputs size: torch.Size([5, 10, 3])
Beware the representation (i.e outputs of encoder) have shape (1, batch_size, embedding_dim)