78 lines
2.0 KiB
Python
78 lines
2.0 KiB
Python
import torch
|
|
|
|
|
|
class LSTMModel(torch.nn.Module):
|
|
def __init__(
|
|
self,
|
|
inputSize,
|
|
output_size,
|
|
num_layers: int,
|
|
hidden_size: int,
|
|
dropout: float = 0.2,
|
|
):
|
|
super(LSTMModel, self).__init__()
|
|
self.inputSize = inputSize
|
|
self.output_size = output_size
|
|
|
|
self.num_layers = num_layers
|
|
self.hidden_size = hidden_size
|
|
self.dropout = dropout
|
|
|
|
self.lstm = torch.nn.LSTM(
|
|
input_size=inputSize[-1],
|
|
hidden_size=hidden_size,
|
|
num_layers=num_layers,
|
|
dropout=dropout,
|
|
batch_first=True,
|
|
)
|
|
self.linear = torch.nn.Linear(hidden_size, output_size)
|
|
|
|
def forward(self, x):
|
|
# Forward pass through the LSTM layers
|
|
_, (hidden_state, _) = self.lstm(x)
|
|
|
|
# Use the hidden state from the last time step for the output
|
|
output = self.linear(hidden_state[-1])
|
|
|
|
return output
|
|
|
|
|
|
class GRUModel(torch.nn.Module):
|
|
def __init__(
|
|
self,
|
|
inputSize,
|
|
output_size,
|
|
num_layers: int,
|
|
hidden_size: int,
|
|
dropout: float = 0.2,
|
|
):
|
|
super(GRUModel, self).__init__()
|
|
self.inputSize = inputSize
|
|
self.output_size = output_size
|
|
|
|
self.num_layers = num_layers
|
|
self.hidden_size = hidden_size
|
|
self.dropout = dropout
|
|
|
|
self.gru = torch.nn.GRU(
|
|
input_size=inputSize[-1],
|
|
hidden_size=hidden_size,
|
|
num_layers=num_layers,
|
|
dropout=dropout,
|
|
batch_first=True,
|
|
)
|
|
self.linear = torch.nn.Linear(hidden_size, output_size)
|
|
|
|
def forward(self, x):
|
|
# if dimension is 2, add batch dimension to 1
|
|
if x.dim() == 2:
|
|
x = x.unsqueeze(0)
|
|
|
|
# Forward pass through the GRU layers
|
|
x, _ = self.gru(x)
|
|
x = x[:, -1, :]
|
|
# Use the hidden state from the last time step for the output
|
|
output = self.linear(x)
|
|
|
|
return output
|