diff --git a/Reports/Thesis/sections/nrv_prediction.tex b/Reports/Thesis/sections/nrv_prediction.tex index a8464d6..113a6f3 100644 --- a/Reports/Thesis/sections/nrv_prediction.tex +++ b/Reports/Thesis/sections/nrv_prediction.tex @@ -159,11 +159,23 @@ where: \begin{itemize} \item \( Q_{\tau}(y | \mathbf{x}) \) is the \( \tau \)-th quantile of the conditional distribution of the NRV \item \( \mathbf{X} \) is the input features (e.g. the 96 previous NRV values) - \item \(\beta_{0,\tau}, \beta_{1,\tau}, \beta_{2,\tau}, \ldots, \beta_{n,\tau} \) are the coefficients + \item \(\beta_{0,\tau}, \beta_{1,\tau}, \beta_{2,\tau}, \ldots, \beta_{n,\tau} \) are the coefficients including the bias \end{itemize} -The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is 96 * 13 + 13 = 1261. +The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is $96 * 13 + 13 = 1261$. -TODO: add results for this model - -More input features can be used as well. +\begin{table}[ht] +\centering +\begin{tabular}{@{}lccccc@{}} % Corrected to six columns +\toprule + & \multicolumn{2}{c}{MSE} & \multicolumn{2}{c}{MAE} & CRPS \\ + \cmidrule(lr){2-3} \cmidrule(lr){4-5} \cmidrule(lr){6-6} + & Train & Test & Train & Test & Test \\ +\midrule +Model 1 & 0.012 & 0.015 & 0.085 & 0.090 & 0.070 \\ +Model 2 & 0.010 & 0.013 & 0.080 & 0.085 & 0.065 \\ +Model 3 & 0.008 & 0.011 & 0.075 & 0.080 & 0.060 \\ +\bottomrule +\end{tabular} +\caption{Performance Metrics of the Model} +\end{table} \ No newline at end of file diff --git a/Reports/Thesis/verslag.pdf b/Reports/Thesis/verslag.pdf index 968d43c..6fabd14 100644 Binary files a/Reports/Thesis/verslag.pdf and b/Reports/Thesis/verslag.pdf differ diff --git a/Reports/Thesis/verslag.synctex.gz b/Reports/Thesis/verslag.synctex.gz index 401bedf..dbe1b03 100644 Binary files a/Reports/Thesis/verslag.synctex.gz and b/Reports/Thesis/verslag.synctex.gz differ diff --git a/Reports/Thesis/verslag.tex b/Reports/Thesis/verslag.tex index 15b2c63..578b735 100644 --- a/Reports/Thesis/verslag.tex +++ b/Reports/Thesis/verslag.tex @@ -22,6 +22,8 @@ \usepackage{multirow} \usepackage{float} \usepackage{bbm} +\usepackage{booktabs} + \newcolumntype{C}{>{\centering\arraybackslash}X} diff --git a/src/training_scripts/autoregressive_quantiles.py b/src/training_scripts/autoregressive_quantiles.py index 425e267..6ca5b69 100644 --- a/src/training_scripts/autoregressive_quantiles.py +++ b/src/training_scripts/autoregressive_quantiles.py @@ -27,8 +27,8 @@ from src.models.time_embedding_layer import TimeEmbedding data_config = DataConfig() data_config.NRV_HISTORY = True -data_config.LOAD_HISTORY = False -data_config.LOAD_FORECAST = False +data_config.LOAD_HISTORY = True +data_config.LOAD_FORECAST = True data_config.WIND_FORECAST = False data_config.WIND_HISTORY = False @@ -125,32 +125,32 @@ trainer.plot_every(2) trainer.train(task=task, epochs=epochs, remotely=True) ### Policy Evaluation ### -idx_samples = trainer.test_set_samples -_, test_loader = trainer.data_processor.get_dataloaders( - predict_sequence_length=trainer.model.output_size, full_day_skip=False -) +# idx_samples = trainer.test_set_samples +# _, test_loader = trainer.data_processor.get_dataloaders( +# predict_sequence_length=trainer.model.output_size, full_day_skip=False +# ) # policy_evaluator.evaluate_test_set(idx_samples, test_loader) # policy_evaluator.plot_profits_table() # policy_evaluator.plot_thresholds_per_day() -optimal_penalty, profit, charge_cycles = ( - policy_evaluator.optimize_penalty_for_target_charge_cycles( - idx_samples=idx_samples, - test_loader=test_loader, - initial_penalty=1000, - target_charge_cycles=283, - initial_learning_rate=3, - max_iterations=150, - tolerance=1, - ) -) +# optimal_penalty, profit, charge_cycles = ( +# policy_evaluator.optimize_penalty_for_target_charge_cycles( +# idx_samples=idx_samples, +# test_loader=test_loader, +# initial_penalty=1000, +# target_charge_cycles=283, +# initial_learning_rate=3, +# max_iterations=150, +# tolerance=1, +# ) +# ) -print( - f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}" -) -task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty) -task.get_logger().report_single_value(name="Optimal Profit", value=profit) -task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles) +# print( +# f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}" +# ) +# task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty) +# task.get_logger().report_single_value(name="Optimal Profit", value=profit) +# task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles) task.close()