Added policy executer file for remotely executing

This commit is contained in:
Victor Mylle
2024-01-15 21:19:33 +00:00
parent 67cc6d4bb9
commit 428e3d9e4b
7 changed files with 323 additions and 110 deletions

View File

@@ -1,17 +1,42 @@
import argparse
from clearml import Task, Model
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.data import DataProcessor, DataConfig
import torch
import numpy as np
import pandas as pd
import datetime
from tqdm import tqdm
from src.utils.imbalance_price_calculator import ImbalancePriceCalculator
import time
### import functions ###
from src.trainers.quantile_trainer import auto_regressive as quantile_auto_regressive
from src.trainers.diffusion_trainer import sample_diffusion
from src.utils.clearml import ClearMLHelper
# argparse to parse task id and model type
parser = argparse.ArgumentParser()
parser.add_argument('--task_id', type=int, default=None)
parser.add_argument('--task_id', type=str, default=None)
parser.add_argument('--model_type', type=str, default=None)
args = parser.parse_args()
assert args.task_id is not None, "Please specify task id"
assert args.model_type is not None, "Please specify model type"
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
### Load Imbalance Prices ###
imbalance_prices = pd.read_csv('data/imbalance_prices.csv', sep=';')
imbalance_prices["DateTime"] = pd.to_datetime(imbalance_prices['DateTime'], utc=True)
imbalance_prices = imbalance_prices.sort_values(by=['DateTime'])
def get_imbalance_prices(date):
imbalance_prices_day = imbalance_prices[imbalance_prices["DateTime"].dt.date == date]
return imbalance_prices_day['Positive imbalance price'].values
def load_model(task_id: str):
"""
Load model from task id
@@ -31,7 +56,7 @@ def load_model(task_id: str):
data_config.DAY_OF_WEEK = False
### Data Processor ###
data_processor = DataProcessor(data_config, path="../../", lstm=False)
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor.set_batch_size(8192)
data_processor.set_full_day_skip(True)
@@ -50,4 +75,155 @@ def load_model(task_id: str):
predict_sequence_length=96
)
return configuration, model, test_loader
return configuration, model, data_processor, test_loader
def quantile_auto_regressive_predicted_NRV(model, date, data_processor, test_loader):
idx = test_loader.dataset.get_idx_for_date(date.date())
initial, _, samples, target = quantile_auto_regressive(test_loader.dataset, model, [idx]*500, 96)
samples = samples.cpu().numpy()
target = target.cpu().numpy()
# inverse using data_processor
samples = data_processor.inverse_transform(samples)
target = data_processor.inverse_transform(target)
return initial.cpu().numpy()[0][-1], samples, target
def diffusion_predicted_NRV(model, date, _, test_loader):
device = next(model.parameters()).device
idx = test_loader.dataset.get_idx_for_date(date.date())
prev_features, targets = test_loader.dataset.get_batch([idx])
if len(list(prev_features.shape)) == 2:
initial_sequence = prev_features[:, :96]
else:
initial_sequence = prev_features[:, :, 0]
prev_features = prev_features.to(device)
targets = targets.to(device)
samples = sample_diffusion(model, 1000, prev_features)
return initial_sequence.cpu().numpy()[0][-1], samples.cpu().numpy(), targets.cpu().numpy()
def get_next_day_profits_for_date(model, data_processor, test_loader, date, ipc, predict_NRV: callable, penalties: list):
charge_thresholds = np.arange(-100, 250, 25)
discharge_thresholds = np.arange(-100, 250, 25)
predicted_nrv_profits_cycles = {i: [0, 0] for i in penalties}
baseline_profits_cycles = {i: [0, 0] for i in penalties}
initial, nrvs, target = predict_NRV(model, date, data_processor, test_loader)
initial = np.repeat(initial, nrvs.shape[0])
combined = np.concatenate((initial.reshape(-1, 1), nrvs), axis=1)
reconstructed_imbalance_prices = ipc.get_imbalance_prices_2023_for_date_vectorized(date, combined)
reconstructed_imbalance_prices = torch.tensor(reconstructed_imbalance_prices, device="cuda")
yesterday_imbalance_prices = get_imbalance_prices(date.date() - datetime.timedelta(days=1))
yesterday_imbalance_prices = torch.tensor(np.array([yesterday_imbalance_prices]), device="cpu")
real_imbalance_prices = get_imbalance_prices(date.date())
for penalty in penalties:
found_charge_thresholds, found_discharge_thresholds = baseline_policy.get_optimal_thresholds(reconstructed_imbalance_prices, charge_thresholds, discharge_thresholds, penalty)
next_day_charge_threshold = found_charge_thresholds.mean(axis=0)
next_day_discharge_threshold = found_discharge_thresholds.mean(axis=0)
yesterday_charge_thresholds, yesterday_discharge_thresholds = baseline_policy.get_optimal_thresholds(yesterday_imbalance_prices, charge_thresholds, discharge_thresholds, penalty)
next_day_profit, next_day_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([next_day_charge_threshold]), torch.tensor([next_day_discharge_threshold]))
yesterday_profit, yesterday_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([yesterday_charge_thresholds.mean(axis=0)]), torch.tensor([yesterday_discharge_thresholds.mean(axis=0)]))
predicted_nrv_profits_cycles[penalty][0] += next_day_profit.item()
predicted_nrv_profits_cycles[penalty][1] += next_day_charge_cycles.item()
baseline_profits_cycles[penalty][0] += yesterday_profit.item()
baseline_profits_cycles[penalty][1] += yesterday_charge_cycles.item()
return predicted_nrv_profits_cycles, baseline_profits_cycles
def next_day_test_set(model, data_processor, test_loader, ipc, predict_NRV: callable):
penalties = [0, 10, 50, 150, 250, 350, 500]
predicted_nrv_profits_cycles = {i: [0, 0] for i in penalties}
baseline_profits_cycles = {i: [0, 0] for i in penalties}
# get all dates in test set
dates = baseline_policy.test_data["DateTime"].dt.date.unique()
# dates back to datetime
dates = pd.to_datetime(dates)
for date in tqdm(dates):
try:
new_predicted_nrv_profits_cycles, new_baseline_profits_cycles = get_next_day_profits_for_date(model, data_processor, test_loader, date, ipc, predict_NRV, penalties)
for penalty in penalties:
predicted_nrv_profits_cycles[penalty][0] += new_predicted_nrv_profits_cycles[penalty][0]
predicted_nrv_profits_cycles[penalty][1] += new_predicted_nrv_profits_cycles[penalty][1]
baseline_profits_cycles[penalty][0] += new_baseline_profits_cycles[penalty][0]
baseline_profits_cycles[penalty][1] += new_baseline_profits_cycles[penalty][1]
except Exception as e:
# raise e
# print(f"Error for date {date}")
continue
return predicted_nrv_profits_cycles, baseline_profits_cycles
def main():
configuration, model, data_processor, test_loader = load_model(args.task_id)
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Policy Test")
task.connect(args, name="Arguments")
task.execute_remotely(queue_name="default", exit_process=True)
if args.model_type == "quantile":
predict_NRV = quantile_auto_regressive_predicted_NRV
task.add_tags(["quantile"])
elif args.model_type == "diffusion":
predict_NRV = diffusion_predicted_NRV
task.add_tags(["diffusion"])
else:
raise ValueError("Please specify model type")
ipc = ImbalancePriceCalculator(data_path="")
predicted_nrv_profits_cycles, baseline_profits_cycles = next_day_test_set(model, data_processor, test_loader, ipc, predict_NRV)
# create dataframe with columns "name", "penalty", "profit", "cycles"
df = pd.DataFrame(columns=["name", "penalty", "profit", "cycles"])
# use concat
for penalty in predicted_nrv_profits_cycles.keys():
new_rows = pd.DataFrame({
"name": [args.model_type, "baseline"],
"penalty": [penalty, penalty],
"profit": [predicted_nrv_profits_cycles[penalty][0], baseline_profits_cycles[penalty][0]],
"cycles": [predicted_nrv_profits_cycles[penalty][1], baseline_profits_cycles[penalty][1]]
})
df = pd.concat([df, new_rows], ignore_index=True)
# sort by name, penalty ascending
df = df.sort_values(by=["name", "penalty"])
task.get_logger().report_table(
"Policy Results",
"Policy Results",
iteration=0,
table_plot=df
)
# close task
task.close()
if __name__ == "__main__":
main()

View File

@@ -127,7 +127,7 @@ class BaselinePolicy():
return df, df_test
def get_optimal_thresholds(self, imbalance_prices, charge_thresholds, discharge_thresholds):
def get_optimal_thresholds(self, imbalance_prices, charge_thresholds, discharge_thresholds, charge_cycles_penalty: float = 0):
threshold_pairs = itertools.product(charge_thresholds, discharge_thresholds)
threshold_pairs = filter(lambda x: x[0] < x[1], threshold_pairs)
@@ -136,12 +136,16 @@ class BaselinePolicy():
charge_thresholds = torch.tensor([x[0] for x in threshold_pairs])
discharge_thresholds = torch.tensor([x[1] for x in threshold_pairs])
# set device to imbalance_prices device
charge_thresholds = charge_thresholds.to(imbalance_prices.device)
discharge_thresholds = discharge_thresholds.to(imbalance_prices.device)
next_day_charge_thresholds, next_day_discharge_thresholds = [], []
# imbalance_prices: (1000, 96) -> (1000, threshold_pairs, 96)
imbalance_prices = imbalance_prices.unsqueeze(1).repeat(1, charge_thresholds.shape[0], 1)
imbalance_prices = imbalance_prices.unsqueeze(1).expand(-1, len(threshold_pairs), -1)
profits, charge_cycles = self.simulate(imbalance_prices, charge_thresholds, discharge_thresholds)
profits, charge_cycles = self.simulate(imbalance_prices, charge_thresholds, discharge_thresholds, charge_cycles_penalty=charge_cycles_penalty)
# get the index of the best threshold pair for each day (1000, 96) -> (1000)
best_threshold_indices = torch.argmax(profits, dim=1)
@@ -155,7 +159,11 @@ class BaselinePolicy():
return next_day_charge_thresholds, next_day_discharge_thresholds
def simulate(self, price_matrix, charge_thresholds: torch.tensor, discharge_thresholds: torch.tensor, charge_cycles_penalty: float = 250):
def simulate(self, price_matrix, charge_thresholds: torch.tensor, discharge_thresholds: torch.tensor, charge_cycles_penalty: float = 0):
# make sure all on the same device
charge_thresholds = charge_thresholds.to(price_matrix.device)
discharge_thresholds = discharge_thresholds.to(price_matrix.device)
batch_size, num_thresholds, num_time_steps = price_matrix.shape
# Reshape thresholds for broadcasting
@@ -171,6 +179,10 @@ class BaselinePolicy():
profits = torch.zeros_like(battery_states)
charge_cycles = torch.zeros_like(battery_states)
battery_states = battery_states.to(price_matrix.device)
profits = profits.to(price_matrix.device)
charge_cycles = charge_cycles.to(price_matrix.device)
for i in range(num_time_steps):
discharge_mask = ~((charge_matrix[:, :, i] == -1) & (battery_states == 0))
charge_mask = ~((charge_matrix[:, :, i] == 1) & (battery_states == self.battery.capacity))