55 lines
1.7 KiB
Python
55 lines
1.7 KiB
Python
from src.utils.clearml import ClearMLHelper
|
|
|
|
#### ClearML ####
|
|
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
|
task = clearml_helper.get_task(task_name="Perfect Baseline")
|
|
task.execute_remotely(queue_name="default", exit_process=True)
|
|
|
|
from src.policies.simple_baseline import BaselinePolicy, Battery
|
|
from src.data import DataProcessor, DataConfig
|
|
from policies.baselines.PerfectBaseline import PerfectBaseline
|
|
|
|
### Data Processor ###
|
|
data_config = DataConfig()
|
|
data_config.NRV_HISTORY = True
|
|
data_config.LOAD_HISTORY = True
|
|
data_config.LOAD_FORECAST = True
|
|
|
|
data_config.WIND_FORECAST = True
|
|
data_config.WIND_HISTORY = True
|
|
|
|
data_config.QUARTER = False
|
|
data_config.DAY_OF_WEEK = False
|
|
|
|
data_config.NOMINAL_NET_POSITION = True
|
|
|
|
data_processor = DataProcessor(data_config, path="", lstm=False)
|
|
data_processor.set_batch_size(64)
|
|
data_processor.set_full_day_skip(True)
|
|
|
|
### Policy Evaluator ###
|
|
battery = Battery(2, 1)
|
|
baseline_policy = BaselinePolicy(battery, data_path="")
|
|
policy_evaluator = PerfectBaseline(baseline_policy, task)
|
|
|
|
penalty, profit, charge_cycles = (
|
|
policy_evaluator.optimize_penalty_for_target_charge_cycles(
|
|
data_processor=data_processor,
|
|
initial_penalty=0,
|
|
target_charge_cycles=283,
|
|
learning_rate=2,
|
|
max_iterations=100,
|
|
tolerance=1,
|
|
)
|
|
)
|
|
# policy_evaluator.plot_profits_table()
|
|
print()
|
|
print("Test Set Results")
|
|
print(f"Penalty: {penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}")
|
|
|
|
task.get_logger().report_single_value(name="Optimal Penalty", value=penalty)
|
|
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
|
|
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
|
|
|
|
task.close()
|