Files
Thesis/src/training_scripts/non_autoregressive_quantiles.py

145 lines
4.2 KiB
Python

from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NAQR: Non-Linear")
task = clearml_helper.get_task(
task_name="NAQR: Non-Linear (8 - 512) + NRV + LOAD + PV + WIND + NP"
)
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.models.lstm_model import GRUModel
from src.data import DataProcessor, DataConfig
from src.trainers.quantile_trainer import (
AutoRegressiveQuantileTrainer,
NonAutoRegressiveQuantileRegression,
)
from src.trainers.trainer import Trainer
from src.utils.clearml import ClearMLHelper
from src.models import *
from src.losses import *
import torch
from torch.nn import MSELoss, L1Loss
import torch.nn as nn
from src.models.time_embedding_layer import TimeEmbedding
#### Data Processor ####
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor.set_batch_size(64)
data_processor.set_full_day_skip(True)
#### Hyperparameters ####
data_processor.set_output_size(96)
inputDim = data_processor.get_input_size()
epochs = 300
# add parameters to clearml
quantiles = task.get_parameter("general/quantiles", cast=True)
# make sure it is a list
if quantiles is None:
quantiles = [0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]
task.set_parameter("general/quantiles", quantiles)
else:
# if string, convert to list "[0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]"
if isinstance(quantiles, str):
quantiles = eval(quantiles)
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 8,
"dropout": 0.2,
}
model_parameters = task.connect(model_parameters, name="model_parameters")
# linear_model = LinearRegression(inputDim, len(quantiles) * 96)
non_linear_model = NonLinearRegression(
inputDim,
len(quantiles) * 96,
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
model = non_linear_model
model.output_size = 96
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
### Policy Evaluator ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = NonAutoRegressiveQuantileRegression(
model,
inputDim,
optimizer,
data_processor,
quantiles,
"cuda",
policy_evaluator=None,
debug=False,
)
trainer.add_metrics_to_track(
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
)
trainer.early_stopping(patience=5)
trainer.plot_every(20)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
# idx_samples = trainer.test_set_samples
# _, test_loader = trainer.data_processor.get_dataloaders(
# predict_sequence_length=trainer.model.output_size, full_day_skip=False
# )
# policy_evaluator.evaluate_test_set(idx_samples, test_loader)
# policy_evaluator.plot_profits_table()
# policy_evaluator.plot_thresholds_per_day()
# optimal_penalty, profit, charge_cycles = (
# policy_evaluator.optimize_penalty_for_target_charge_cycles(
# idx_samples=idx_samples,
# test_loader=test_loader,
# initial_penalty=1000,
# target_charge_cycles=283,
# learning_rate=15,
# max_iterations=150,
# tolerance=1,
# )
# )
# print(
# f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
# )
# task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
# task.get_logger().report_single_value(name="Optimal Profit", value=profit)
# task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
task.close()