Some changes
This commit is contained in:
@@ -1,63 +0,0 @@
|
|||||||
from src.data import DataProcessor, DataConfig
|
|
||||||
from src.trainers.quantile_trainer import AutoRegressiveQuantileTrainer, NonAutoRegressiveQuantileRegression
|
|
||||||
from src.trainers.probabilistic_baseline import ProbabilisticBaselineTrainer
|
|
||||||
from src.trainers.autoregressive_trainer import AutoRegressiveTrainer
|
|
||||||
from src.trainers.trainer import Trainer
|
|
||||||
from src.utils.clearml import ClearMLHelper
|
|
||||||
from src.models import *
|
|
||||||
from src.losses import *
|
|
||||||
import torch
|
|
||||||
import numpy as np
|
|
||||||
from torch.nn import MSELoss, L1Loss
|
|
||||||
from datetime import datetime
|
|
||||||
import pytz
|
|
||||||
import torch.nn as nn
|
|
||||||
|
|
||||||
#### ClearML ####
|
|
||||||
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
|
||||||
|
|
||||||
#### Data Processor ####
|
|
||||||
data_config = DataConfig()
|
|
||||||
data_config.NRV_HISTORY = True
|
|
||||||
data_config.LOAD_HISTORY = False
|
|
||||||
data_config.LOAD_FORECAST = False
|
|
||||||
|
|
||||||
data_config.WIND_FORECAST = False
|
|
||||||
data_config.WIND_HISTORY = False
|
|
||||||
|
|
||||||
data_processor = DataProcessor(data_config)
|
|
||||||
data_processor.set_batch_size(1024)
|
|
||||||
data_processor.set_full_day_skip(False)
|
|
||||||
|
|
||||||
#### Hyperparameters ####
|
|
||||||
data_processor.set_output_size(1)
|
|
||||||
inputDim = data_processor.get_input_size()
|
|
||||||
learningRate = 0.0001
|
|
||||||
epochs = 100
|
|
||||||
|
|
||||||
# quantiles = torch.tensor([0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]).to("cuda")
|
|
||||||
quantiles = torch.tensor(
|
|
||||||
[0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# model = LinearRegression(inputDim, len(quantiles))
|
|
||||||
model = NonLinearRegression(inputDim, len(quantiles), hiddenSize=1024, numLayers=5)
|
|
||||||
optimizer = torch.optim.Adam(model.parameters(), lr=learningRate)
|
|
||||||
|
|
||||||
#### Trainer ####
|
|
||||||
trainer = AutoRegressiveQuantileTrainer(
|
|
||||||
model,
|
|
||||||
optimizer,
|
|
||||||
data_processor,
|
|
||||||
quantiles,
|
|
||||||
"cuda",
|
|
||||||
debug=True,
|
|
||||||
clearml_helper=clearml_helper,
|
|
||||||
)
|
|
||||||
trainer.add_metrics_to_track(
|
|
||||||
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
|
|
||||||
)
|
|
||||||
trainer.early_stopping(patience=10)
|
|
||||||
trainer.plot_every(5)
|
|
||||||
|
|
||||||
trainer.train(epochs=epochs, remotely=True)
|
|
||||||
@@ -5,7 +5,6 @@ class ClearMLHelper:
|
|||||||
self.project_name = project_name
|
self.project_name = project_name
|
||||||
|
|
||||||
def get_task(self, task_name: str = "Model Training"):
|
def get_task(self, task_name: str = "Model Training"):
|
||||||
Task.add_requirements("requirements.txt")
|
|
||||||
Task.ignore_requirements("torch")
|
Task.ignore_requirements("torch")
|
||||||
Task.ignore_requirements("torchvision")
|
Task.ignore_requirements("torchvision")
|
||||||
Task.ignore_requirements("tensorboard")
|
Task.ignore_requirements("tensorboard")
|
||||||
|
|||||||
Reference in New Issue
Block a user