Non autregressive gru model load

This commit is contained in:
2024-05-06 16:11:15 +02:00
parent 19ab597ae6
commit d7f4c1849b
7 changed files with 55 additions and 22 deletions

View File

@@ -30,15 +30,15 @@ Multiple experiments are conducted to find which hyperparameters and input featu
\toprule
Features & Layers & Hidden Size & \multicolumn{2}{c}{MSE} & \multicolumn{2}{c}{MAE} & \multicolumn{2}{c}{CRPS} \\
\cmidrule(lr){4-5} \cmidrule(lr){6-7} \cmidrule(lr){8-9}
& & & Train & Test & Train & Test & Train & Test \\
& & & AR & NAR & AR & NAR & AR & NAR \\
\midrule
NRV & & & & & & & & \\
& 2 & 256 & 34942.89 & 39838.35 & 142.43 & 150.81 & 81.34 & 85.04 \\
& 4 & 256 & 34705.61 & 39506.55 & 141.74 & 149.81 & 81.89 & 85.46 \\
& 8 & 256 & 32885.71 & 37747.11 & 138.16 & 146.67 & 79.99 & 83.67 \\
& 2 & 512 & 35362.66 & 39955.79 & 143.19 & 150.77 & 84.37 & 87.88 \\
& 4 & 512 & 38253.89 & 43301.13 & 148.33 & 156.73 & 85.98 & 89.78 \\
& 8 & 512 & 33131.93 & 37681.71 & 138.93 & 146.62 & 79.64 & 83.08 \\
& 2 & 256 & 39838.35 & 40097.62 & 150.81 & 150.37 & 85.04 & 76.12 \\
& 4 & 256 & 39506.55 & 39968.96 & 149.81 & 150.04 & 85.46 & 76.07 \\
& 8 & 256 & 37747.11 & 40400.37 & 146.67 & 151.03 & 83.67 & 76.59 \\
& 2 & 512 & 39955.79 & 40917.24 & 150.77 & 152.04 & 87.88 & 76.06 \\
& 4 & 512 & 43301.13 & 39954.62 & 156.73 & 150.14 & 89.78 & 76.25 \\
& 8 & 512 & 37681.71 & 40379.14 & 146.62 & 151.05 & 83.08 & 76.42 \\
\midrule
NRV + Load & & & & & & & & & \\
& 2 & 256 & 33202.80 & 38427.91 & 138.02 & 147.27 & 79.62 & 84.17 \\

View File

@@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 6 MAY 2024 13:49
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 6 MAY 2024 16:04
entering extended mode
restricted \write18 enabled.
file:line:error style messages enabled.
@@ -1456,7 +1456,7 @@ Here is how much of TeX's memory you used:
1141 hyphenation exceptions out of 8191
83i,16n,131p,2100b,5180s stack positions out of 10000i,1000n,20000p,200000b,200000s
</Users/victormylle/Library/texlive/2023/texmf-var/fonts/pk/ljfour/public/bbm/bbm12.600pk></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertinust1math/LibertinusT1Math.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTI.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/stix/stix-mathcal.pfb>
Output written on verslag.pdf (37 pages, 6373443 bytes).
Output written on verslag.pdf (37 pages, 6373408 bytes).
PDF statistics:
542 PDF objects out of 1000 (max. 8388607)
412 compressed objects within 5 object streams

Binary file not shown.

Binary file not shown.

View File

@@ -1,7 +1,15 @@
import torch
class LSTMModel(torch.nn.Module):
def __init__(self, inputSize, output_size, num_layers: int, hidden_size: int, dropout: float = 0.2):
def __init__(
self,
inputSize,
output_size,
num_layers: int,
hidden_size: int,
dropout: float = 0.2,
):
super(LSTMModel, self).__init__()
self.inputSize = inputSize
self.output_size = output_size
@@ -10,20 +18,34 @@ class LSTMModel(torch.nn.Module):
self.hidden_size = hidden_size
self.dropout = dropout
self.lstm = torch.nn.LSTM(input_size=inputSize[-1], hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, batch_first=True)
self.lstm = torch.nn.LSTM(
input_size=inputSize[-1],
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
batch_first=True,
)
self.linear = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
# Forward pass through the LSTM layers
_, (hidden_state, _) = self.lstm(x)
# Use the hidden state from the last time step for the output
output = self.linear(hidden_state[-1])
return output
class GRUModel(torch.nn.Module):
def __init__(self, inputSize, output_size, num_layers: int, hidden_size: int, dropout: float = 0.2):
def __init__(
self,
inputSize,
output_size,
num_layers: int,
hidden_size: int,
dropout: float = 0.2,
):
super(GRUModel, self).__init__()
self.inputSize = inputSize
self.output_size = output_size
@@ -32,14 +54,24 @@ class GRUModel(torch.nn.Module):
self.hidden_size = hidden_size
self.dropout = dropout
self.gru = torch.nn.GRU(input_size=inputSize[-1], hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, batch_first=True)
self.gru = torch.nn.GRU(
input_size=inputSize[-1],
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
batch_first=True,
)
self.linear = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
# if dimension is 2, add batch dimension to 1
if x.dim() == 2:
x = x.unsqueeze(0)
# Forward pass through the GRU layers
x, _ = self.gru(x)
x = x[:, -1, :]
# Use the hidden state from the last time step for the output
output = self.linear(x)
return output

View File

@@ -633,6 +633,7 @@ class NonAutoRegressiveQuantileRegression(Trainer):
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
for actual_idx, idx in sample_indices.items():
features, target, _ = data_loader.dataset[idx]
print(features.shape, target.shape)
features = features.to(self.device)
target = target.to(self.device)

View File

@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NAQR: GRU")
task = clearml_helper.get_task(task_name="NAQR: GRU (2 - 256)")
task = clearml_helper.get_task(task_name="NAQR: GRU (2 - 256) + Load")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -27,8 +27,8 @@ from src.models.time_embedding_layer import TimeEmbedding
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
@@ -64,8 +64,8 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 256,
"num_layers": 2,
"hidden_size": 512,
"num_layers": 8,
"dropout": 0.2,
}
@@ -83,7 +83,7 @@ model_parameters = task.connect(model_parameters, name="model_parameters")
lstm_model = GRUModel(
inputDim,
len(quantiles),
len(quantiles) * 96,
hidden_size=model_parameters["hidden_size"],
num_layers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],