Trying out more linear baselines

This commit is contained in:
2024-04-17 12:55:46 +02:00
parent 0edcc91e65
commit 6b02c9aab8
5 changed files with 42 additions and 28 deletions

View File

@@ -159,11 +159,23 @@ where:
\begin{itemize}
\item \( Q_{\tau}(y | \mathbf{x}) \) is the \( \tau \)-th quantile of the conditional distribution of the NRV
\item \( \mathbf{X} \) is the input features (e.g. the 96 previous NRV values)
\item \(\beta_{0,\tau}, \beta_{1,\tau}, \beta_{2,\tau}, \ldots, \beta_{n,\tau} \) are the coefficients
\item \(\beta_{0,\tau}, \beta_{1,\tau}, \beta_{2,\tau}, \ldots, \beta_{n,\tau} \) are the coefficients including the bias
\end{itemize}
The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is 96 * 13 + 13 = 1261.
The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is $96 * 13 + 13 = 1261$.
TODO: add results for this model
More input features can be used as well.
\begin{table}[ht]
\centering
\begin{tabular}{@{}lccccc@{}} % Corrected to six columns
\toprule
& \multicolumn{2}{c}{MSE} & \multicolumn{2}{c}{MAE} & CRPS \\
\cmidrule(lr){2-3} \cmidrule(lr){4-5} \cmidrule(lr){6-6}
& Train & Test & Train & Test & Test \\
\midrule
Model 1 & 0.012 & 0.015 & 0.085 & 0.090 & 0.070 \\
Model 2 & 0.010 & 0.013 & 0.080 & 0.085 & 0.065 \\
Model 3 & 0.008 & 0.011 & 0.075 & 0.080 & 0.060 \\
\bottomrule
\end{tabular}
\caption{Performance Metrics of the Model}
\end{table}

Binary file not shown.

Binary file not shown.

View File

@@ -22,6 +22,8 @@
\usepackage{multirow}
\usepackage{float}
\usepackage{bbm}
\usepackage{booktabs}
\newcolumntype{C}{>{\centering\arraybackslash}X}

View File

@@ -27,8 +27,8 @@ from src.models.time_embedding_layer import TimeEmbedding
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
@@ -125,32 +125,32 @@ trainer.plot_every(2)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
idx_samples = trainer.test_set_samples
_, test_loader = trainer.data_processor.get_dataloaders(
predict_sequence_length=trainer.model.output_size, full_day_skip=False
)
# idx_samples = trainer.test_set_samples
# _, test_loader = trainer.data_processor.get_dataloaders(
# predict_sequence_length=trainer.model.output_size, full_day_skip=False
# )
# policy_evaluator.evaluate_test_set(idx_samples, test_loader)
# policy_evaluator.plot_profits_table()
# policy_evaluator.plot_thresholds_per_day()
optimal_penalty, profit, charge_cycles = (
policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=idx_samples,
test_loader=test_loader,
initial_penalty=1000,
target_charge_cycles=283,
initial_learning_rate=3,
max_iterations=150,
tolerance=1,
)
)
# optimal_penalty, profit, charge_cycles = (
# policy_evaluator.optimize_penalty_for_target_charge_cycles(
# idx_samples=idx_samples,
# test_loader=test_loader,
# initial_penalty=1000,
# target_charge_cycles=283,
# initial_learning_rate=3,
# max_iterations=150,
# tolerance=1,
# )
# )
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
# print(
# f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
# )
# task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
# task.get_logger().report_single_value(name="Optimal Profit", value=profit)
# task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
task.close()