161 lines
4.7 KiB
Python
161 lines
4.7 KiB
Python
from src.utils.clearml import ClearMLHelper
|
|
|
|
#### ClearML ####
|
|
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
|
task = clearml_helper.get_task(task_name="AQR: Linear + Load + Wind + PV + QE + NP")
|
|
task.execute_remotely(queue_name="default", exit_process=True)
|
|
|
|
from src.policies.PolicyEvaluator import PolicyEvaluator
|
|
from src.policies.simple_baseline import BaselinePolicy, Battery
|
|
from src.models.lstm_model import GRUModel
|
|
from src.data import DataProcessor, DataConfig
|
|
from src.trainers.quantile_trainer import (
|
|
AutoRegressiveQuantileTrainer,
|
|
NonAutoRegressiveQuantileRegression,
|
|
)
|
|
from src.trainers.trainer import Trainer
|
|
from src.utils.clearml import ClearMLHelper
|
|
from src.models import *
|
|
from src.losses import *
|
|
import torch
|
|
from torch.nn import MSELoss, L1Loss
|
|
import torch.nn as nn
|
|
from src.models.time_embedding_layer import TimeEmbedding, TrigonometricTimeEmbedding
|
|
|
|
#### Data Processor ####
|
|
data_config = DataConfig()
|
|
|
|
data_config.NRV_HISTORY = True
|
|
|
|
data_config.LOAD_HISTORY = True
|
|
data_config.LOAD_FORECAST = True
|
|
|
|
data_config.WIND_FORECAST = True
|
|
data_config.WIND_HISTORY = True
|
|
|
|
data_config.PV_FORECAST = True
|
|
data_config.PV_HISTORY = True
|
|
|
|
data_config.QUARTER = True
|
|
data_config.DAY_OF_WEEK = False
|
|
|
|
data_config.NOMINAL_NET_POSITION = True
|
|
|
|
|
|
data_config = task.connect(data_config, name="data_features")
|
|
|
|
data_processor = DataProcessor(data_config, path="", lstm=False)
|
|
data_processor.set_batch_size(512)
|
|
data_processor.set_full_day_skip(False)
|
|
|
|
|
|
#### Hyperparameters ####
|
|
data_processor.set_output_size(1)
|
|
inputDim = data_processor.get_input_size()
|
|
epochs = 16
|
|
|
|
# add parameters to clearml
|
|
quantiles = task.get_parameter("general/quantiles", cast=True)
|
|
# make sure it is a list
|
|
if quantiles is None:
|
|
quantiles = [0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]
|
|
task.set_parameter("general/quantiles", quantiles)
|
|
else:
|
|
# if string, convert to list "[0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]"
|
|
if isinstance(quantiles, str):
|
|
quantiles = eval(quantiles)
|
|
|
|
model_parameters = {
|
|
"learning_rate": 0.0001,
|
|
"hidden_size": 256,
|
|
"num_layers": 16,
|
|
"dropout": 0.2,
|
|
"time_feature_embedding": 2,
|
|
}
|
|
|
|
model_parameters = task.connect(model_parameters, name="model_parameters")
|
|
|
|
time_embedding = TimeEmbedding(
|
|
data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"]
|
|
)
|
|
|
|
# time_embedding = TrigonometricTimeEmbedding(data_processor.get_time_feature_size())
|
|
|
|
# lstm_model = GRUModel(
|
|
# time_embedding.output_dim(inputDim),
|
|
# len(quantiles),
|
|
# hidden_size=model_parameters["hidden_size"],
|
|
# num_layers=model_parameters["num_layers"],
|
|
# dropout=model_parameters["dropout"],
|
|
# )
|
|
|
|
# non_linear_model = NonLinearRegression(
|
|
# time_embedding.output_dim(inputDim),
|
|
# len(quantiles),
|
|
# hiddenSize=model_parameters["hidden_size"],
|
|
# numLayers=model_parameters["num_layers"],
|
|
# dropout=model_parameters["dropout"],
|
|
# )
|
|
|
|
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
|
|
|
|
model = nn.Sequential(time_embedding, linear_model)
|
|
|
|
model.output_size = 1
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
|
|
|
|
### Policy Evaluator ###
|
|
battery = Battery(2, 1)
|
|
baseline_policy = BaselinePolicy(battery, data_path="")
|
|
policy_evaluator = PolicyEvaluator(baseline_policy, task)
|
|
|
|
#### Trainer ####
|
|
trainer = AutoRegressiveQuantileTrainer(
|
|
model,
|
|
inputDim,
|
|
optimizer,
|
|
data_processor,
|
|
quantiles,
|
|
"cuda",
|
|
policy_evaluator=None,
|
|
debug=False,
|
|
)
|
|
|
|
trainer.add_metrics_to_track(
|
|
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
|
|
)
|
|
trainer.early_stopping(patience=25)
|
|
trainer.plot_every(15)
|
|
trainer.train(task=task, epochs=epochs, remotely=True)
|
|
|
|
### Policy Evaluation ###
|
|
idx_samples = trainer.test_set_samples
|
|
_, test_loader = trainer.data_processor.get_dataloaders(
|
|
predict_sequence_length=trainer.model.output_size, full_day_skip=False
|
|
)
|
|
|
|
policy_evaluator.evaluate_test_set(idx_samples, test_loader)
|
|
policy_evaluator.plot_profits_table()
|
|
policy_evaluator.plot_thresholds_per_day()
|
|
|
|
optimal_penalty, profit, charge_cycles = (
|
|
policy_evaluator.optimize_penalty_for_target_charge_cycles(
|
|
idx_samples=idx_samples,
|
|
test_loader=test_loader,
|
|
initial_penalty=1000,
|
|
target_charge_cycles=283,
|
|
initial_learning_rate=3,
|
|
max_iterations=150,
|
|
tolerance=1,
|
|
)
|
|
)
|
|
|
|
print(
|
|
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
|
|
)
|
|
task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
|
|
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
|
|
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
|
|
|
|
task.close()
|