Updated thesis
This commit is contained in:
BIN
Reports/November/verslag 2.synctex.gz
Normal file
BIN
Reports/November/verslag 2.synctex.gz
Normal file
Binary file not shown.
20
Reports/November/verslag.bbl
Normal file
20
Reports/November/verslag.bbl
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
% $ biblatex auxiliary file $
|
||||||
|
% $ biblatex bbl format version 3.2 $
|
||||||
|
% Do not modify the above lines!
|
||||||
|
%
|
||||||
|
% This is an auxiliary file used by the 'biblatex' package.
|
||||||
|
% This file may safely be deleted. It will be recreated by
|
||||||
|
% biber as required.
|
||||||
|
%
|
||||||
|
\begingroup
|
||||||
|
\makeatletter
|
||||||
|
\@ifundefined{ver@biblatex.sty}
|
||||||
|
{\@latex@error
|
||||||
|
{Missing 'biblatex' package}
|
||||||
|
{The bibliography requires the 'biblatex' package.}
|
||||||
|
\aftergroup\endinput}
|
||||||
|
{}
|
||||||
|
\endgroup
|
||||||
|
|
||||||
|
\endinput
|
||||||
|
|
||||||
Binary file not shown.
@@ -41,7 +41,7 @@
|
|||||||
>
|
>
|
||||||
]>
|
]>
|
||||||
<requests version="1.0">
|
<requests version="1.0">
|
||||||
<internal package="biblatex" priority="9" active="1">
|
<internal package="biblatex" priority="9" active="0">
|
||||||
<generic>latex</generic>
|
<generic>latex</generic>
|
||||||
<provides type="dynamic">
|
<provides type="dynamic">
|
||||||
<file>verslag.bcf</file>
|
<file>verslag.bcf</file>
|
||||||
@@ -64,7 +64,7 @@
|
|||||||
<file>english-apa.lbx</file>
|
<file>english-apa.lbx</file>
|
||||||
</requires>
|
</requires>
|
||||||
</internal>
|
</internal>
|
||||||
<external package="biblatex" priority="5" active="1">
|
<external package="biblatex" priority="5" active="0">
|
||||||
<generic>biber</generic>
|
<generic>biber</generic>
|
||||||
<cmdline>
|
<cmdline>
|
||||||
<binary>biber</binary>
|
<binary>biber</binary>
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 1.0 MiB |
Binary file not shown.
|
After Width: | Height: | Size: 103 KiB |
781
Reports/Thesis/references.bib
Normal file
781
Reports/Thesis/references.bib
Normal file
@@ -0,0 +1,781 @@
|
|||||||
|
|
||||||
|
@online{noauthor_zotero_nodate,
|
||||||
|
title = {Zotero {\textbar} Connectors},
|
||||||
|
url = {https://www.zotero.org/download/connectors},
|
||||||
|
urldate = {2022-10-12},
|
||||||
|
file = {Zotero | Connectors:/Users/victormylle/Zotero/storage/EPF3ZZRA/connectors.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_elia_nodate,
|
||||||
|
title = {Elia: de electriciteitsmarkt en -systeem},
|
||||||
|
url = {https://www.elia.be/nl/elektriciteitsmarkt-en-systeem},
|
||||||
|
shorttitle = {Elia},
|
||||||
|
abstract = {Elia deelt de Europese ambitie om een geïntegreerde elektriciteitsmarkt tot stand te brengen en verschillende marktspelers aan te moedigen tot het aanbieden van systeemdiensten.},
|
||||||
|
urldate = {2023-06-23},
|
||||||
|
langid = {dutch},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/7QY94WTW/elektriciteitsmarkt-en-systeem.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{gao_easy--hard_2023,
|
||||||
|
title = {Easy-to-Hard Learning for Information Extraction},
|
||||||
|
url = {http://arxiv.org/abs/2305.09193},
|
||||||
|
abstract = {Information extraction ({IE}) systems aim to automatically extract structured information, such as named entities, relations between entities, and events, from unstructured texts. While most existing work addresses a particular {IE} task, universally modeling various {IE} tasks with one model has achieved great success recently. Despite their success, they employ a one-stage learning strategy, i.e., directly learning to extract the target structure given the input text, which contradicts the human learning process. In this paper, we propose a unified easy-to-hard learning framework consisting of three stages, i.e., the easy stage, the hard stage, and the main stage, for {IE} by mimicking the human learning process. By breaking down the learning process into multiple stages, our framework facilitates the model to acquire general {IE} task knowledge and improve its generalization ability. Extensive experiments across four {IE} tasks demonstrate the effectiveness of our framework. We achieve new state-of-the-art results on 13 out of 17 datasets. Our code is available at {\textbackslash}url\{https://github.com/{DAMO}-{NLP}-{SG}/{IE}-E2H\}.},
|
||||||
|
number = {{arXiv}:2305.09193},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Gao, Chang and Zhang, Wenxuan and Lam, Wai and Bing, Lidong},
|
||||||
|
urldate = {2023-07-10},
|
||||||
|
date = {2023-05-19},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2305.09193 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/5YBG5XYS/2305.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/D8LIDUE8/Gao et al. - 2023 - Easy-to-Hard Learning for Information Extraction.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{gaur_semi-supervised_2021,
|
||||||
|
title = {Semi-supervised deep learning based named entity recognition model to parse education section of resumes},
|
||||||
|
volume = {33},
|
||||||
|
issn = {1433-3058},
|
||||||
|
url = {https://doi.org/10.1007/s00521-020-05351-2},
|
||||||
|
doi = {10.1007/s00521-020-05351-2},
|
||||||
|
abstract = {A job seeker’s resume contains several sections, including educational qualifications. Educational qualifications capture the knowledge and skills relevant to the job. Machine processing of the education sections of resumes has been a difficult task. In this paper, we attempt to identify educational institutions’ names and degrees from a resume’s education section. Usually, a significant amount of annotated data is required for neural network-based named entity recognition techniques. A semi-supervised approach is used to overcome the lack of large annotated data. We trained a deep neural network model on an initial (seed) set of resume education sections. This model is used to predict entities of unlabeled education sections and is rectified using a correction module. The education sections containing the rectified entities are augmented to the seed set. The updated seed set is used for retraining, leading to better accuracy than the previously trained model. This way, it can provide a high overall accuracy without the need of large annotated data. Our model has achieved an accuracy of 92.06\% on the named entity recognition task.},
|
||||||
|
pages = {5705--5718},
|
||||||
|
number = {11},
|
||||||
|
journaltitle = {Neural Computing and Applications},
|
||||||
|
shortjournal = {Neural Comput \& Applic},
|
||||||
|
author = {Gaur, Bodhvi and Saluja, Gurpreet Singh and Sivakumar, Hamsa Bharathi and Singh, Sanjay},
|
||||||
|
urldate = {2023-07-10},
|
||||||
|
date = {2021-06-01},
|
||||||
|
langid = {english},
|
||||||
|
keywords = {Deep learning models, Named entity recognition ({NER}), Natural language processing, Resume information extraction, Semi-supervised learning},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/4NK6IXHZ/Gaur et al. - 2021 - Semi-supervised deep learning based named entity r.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{landolsi_information_2023,
|
||||||
|
title = {Information extraction from electronic medical documents: state of the art and future research directions},
|
||||||
|
volume = {65},
|
||||||
|
issn = {0219-3116},
|
||||||
|
url = {https://doi.org/10.1007/s10115-022-01779-1},
|
||||||
|
doi = {10.1007/s10115-022-01779-1},
|
||||||
|
shorttitle = {Information extraction from electronic medical documents},
|
||||||
|
abstract = {In the medical field, a doctor must have a comprehensive knowledge by reading and writing narrative documents, and he is responsible for every decision he takes for patients. Unfortunately, it is very tiring to read all necessary information about drugs, diseases and patients due to the large amount of documents that are increasing every day. Consequently, so many medical errors can happen and even kill people. Likewise, there is such an important field that can handle this problem, which is the information extraction. There are several important tasks in this field to extract the important and desired information from unstructured text written in natural language. The main principal tasks are named entity recognition and relation extraction since they can structure the text by extracting the relevant information. However, in order to treat the narrative text we should use natural language processing techniques to extract useful information and features. In our paper, we introduce and discuss the several techniques and solutions used in these tasks. Furthermore, we outline the challenges in information extraction from medical documents. In our knowledge, this is the most comprehensive survey in the literature with an experimental analysis and a suggestion for some uncovered directions.},
|
||||||
|
pages = {463--516},
|
||||||
|
number = {2},
|
||||||
|
journaltitle = {Knowledge and Information Systems},
|
||||||
|
shortjournal = {Knowl Inf Syst},
|
||||||
|
author = {Landolsi, Mohamed Yassine and Hlaoua, Lobna and Ben Romdhane, Lotfi},
|
||||||
|
urldate = {2023-07-10},
|
||||||
|
date = {2023-02-01},
|
||||||
|
langid = {english},
|
||||||
|
keywords = {Electronic medical records, Information extraction, Medical named entities recognition, Medical relation extraction, Section detection},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/KRTKZW3M/Landolsi et al. - 2023 - Information extraction from electronic medical doc.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{fu_spanner_2021,
|
||||||
|
location = {Online},
|
||||||
|
title = {{SpanNER}: Named Entity Re-/Recognition as Span Prediction},
|
||||||
|
url = {https://aclanthology.org/2021.acl-long.558},
|
||||||
|
doi = {10.18653/v1/2021.acl-long.558},
|
||||||
|
shorttitle = {{SpanNER}},
|
||||||
|
abstract = {Recent years have seen the paradigm shift of Named Entity Recognition ({NER}) systems from sequence labeling to span prediction. Despite its preliminary effectiveness, the span prediction model's architectural bias has not been fully understood. In this paper, we first investigate the strengths and weaknesses when the span prediction model is used for named entity recognition compared with the sequence labeling framework and how to further improve it, which motivates us to make complementary advantages of systems based on different paradigms. We then reveal that span prediction, simultaneously, can serve as a system combiner to re-recognize named entities from different systems' outputs. We experimentally implement 154 systems on 11 datasets, covering three languages, comprehensive results show the effectiveness of span prediction models that both serve as base {NER} systems and system combiners. We make all codes and datasets available: https://github.com/neulab/spanner, as well as an online system demo: http://spanner.sh. Our model also has been deployed into the {ExplainaBoard} platform, which allows users to flexibly perform a system combination of top-scoring systems in an interactive way: http://explainaboard.nlpedia.ai/leaderboard/task-ner/.},
|
||||||
|
eventtitle = {{ACL}-{IJCNLP} 2021},
|
||||||
|
pages = {7183--7195},
|
||||||
|
booktitle = {Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
|
||||||
|
publisher = {Association for Computational Linguistics},
|
||||||
|
author = {Fu, Jinlan and Huang, Xuanjing and Liu, Pengfei},
|
||||||
|
urldate = {2023-07-10},
|
||||||
|
date = {2021-08},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/6JU4DR5Y/Fu et al. - 2021 - SpanNER Named Entity Re-Recognition as Span Pred.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{li_unified_2020,
|
||||||
|
location = {Online},
|
||||||
|
title = {A Unified {MRC} Framework for Named Entity Recognition},
|
||||||
|
url = {https://aclanthology.org/2020.acl-main.519},
|
||||||
|
doi = {10.18653/v1/2020.acl-main.519},
|
||||||
|
abstract = {The task of named entity recognition ({NER}) is normally divided into nested {NER} and flat {NER} depending on whether named entities are nested or not.Models are usually separately developed for the two tasks, since sequence labeling models, the most widely used backbone for flat {NER}, are only able to assign a single label to a particular token, which is unsuitable for nested {NER} where a token may be assigned several labels. In this paper, we propose a unified framework that is capable of handling both flat and nested {NER} tasks. Instead of treating the task of {NER} as a sequence labeling problem, we propose to formulate it as a machine reading comprehension ({MRC}) task. For example, extracting entities with the per label is formalized as extracting answer spans to the question “which person is mentioned in the text”.This formulation naturally tackles the entity overlapping issue in nested {NER}: the extraction of two overlapping entities with different categories requires answering two independent questions. Additionally, since the query encodes informative prior knowledge, this strategy facilitates the process of entity extraction, leading to better performances for not only nested {NER}, but flat {NER}. We conduct experiments on both nested and flat {NER} datasets.Experiment results demonstrate the effectiveness of the proposed formulation. We are able to achieve a vast amount of performance boost over current {SOTA} models on nested {NER} datasets, i.e., +1.28, +2.55, +5.44, +6.37,respectively on {ACE}04, {ACE}05, {GENIA} and {KBP}17, along with {SOTA} results on flat {NER} datasets, i.e., +0.24, +1.95, +0.21, +1.49 respectively on English {CoNLL} 2003, English {OntoNotes} 5.0, Chinese {MSRA} and Chinese {OntoNotes} 4.0.},
|
||||||
|
eventtitle = {{ACL} 2020},
|
||||||
|
pages = {5849--5859},
|
||||||
|
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
|
||||||
|
publisher = {Association for Computational Linguistics},
|
||||||
|
author = {Li, Xiaoya and Feng, Jingrong and Meng, Yuxian and Han, Qinghong and Wu, Fei and Li, Jiwei},
|
||||||
|
urldate = {2023-07-10},
|
||||||
|
date = {2020-07},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/TIVIKNGN/Li et al. - 2020 - A Unified MRC Framework for Named Entity Recogniti.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{decorte_jobbert_2021,
|
||||||
|
title = {{JobBERT}: Understanding Job Titles through Skills},
|
||||||
|
url = {http://arxiv.org/abs/2109.09605},
|
||||||
|
shorttitle = {{JobBERT}},
|
||||||
|
abstract = {Job titles form a cornerstone of today's human resources ({HR}) processes. Within online recruitment, they allow candidates to understand the contents of a vacancy at a glance, while internal {HR} departments use them to organize and structure many of their processes. As job titles are a compact, convenient, and readily available data source, modeling them with high accuracy can greatly benefit many {HR} tech applications. In this paper, we propose a neural representation model for job titles, by augmenting a pre-trained language model with co-occurrence information from skill labels extracted from vacancies. Our {JobBERT} method leads to considerable improvements compared to using generic sentence encoders, for the task of job title normalization, for which we release a new evaluation benchmark.},
|
||||||
|
number = {{arXiv}:2109.09605},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Demeester, Thomas and Develder, Chris},
|
||||||
|
urldate = {2023-07-20},
|
||||||
|
date = {2021-09-20},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2109.09605 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{sun_retentive_2023,
|
||||||
|
title = {Retentive Network: A Successor to Transformer for Large Language Models},
|
||||||
|
url = {http://arxiv.org/abs/2307.08621},
|
||||||
|
shorttitle = {Retentive Network},
|
||||||
|
abstract = {In this work, we propose Retentive Network ({RetNet}) as a foundation architecture for large language models, simultaneously achieving training parallelism, low-cost inference, and good performance. We theoretically derive the connection between recurrence and attention. Then we propose the retention mechanism for sequence modeling, which supports three computation paradigms, i.e., parallel, recurrent, and chunkwise recurrent. Specifically, the parallel representation allows for training parallelism. The recurrent representation enables low-cost \$O(1)\$ inference, which improves decoding throughput, latency, and {GPU} memory without sacrificing performance. The chunkwise recurrent representation facilitates efficient long-sequence modeling with linear complexity, where each chunk is encoded parallelly while recurrently summarizing the chunks. Experimental results on language modeling show that {RetNet} achieves favorable scaling results, parallel training, low-cost deployment, and efficient inference. The intriguing properties make {RetNet} a strong successor to Transformer for large language models. Code will be available at https://aka.ms/retnet.},
|
||||||
|
number = {{arXiv}:2307.08621},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Sun, Yutao and Dong, Li and Huang, Shaohan and Ma, Shuming and Xia, Yuqing and Xue, Jilong and Wang, Jianyong and Wei, Furu},
|
||||||
|
urldate = {2023-07-25},
|
||||||
|
date = {2023-07-19},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2307.08621 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{zhang_generation-driven_2023,
|
||||||
|
title = {Generation-driven Contrastive Self-training for Zero-shot Text Classification with Instruction-tuned {GPT}},
|
||||||
|
url = {http://arxiv.org/abs/2304.11872},
|
||||||
|
abstract = {Moreover, {GPT}-based zero-shot classification models tend to make independent predictions over test instances, which can be sub-optimal as the instance correlations and the decision boundaries in the target space are ignored. To address these difficulties and limitations, we propose a new approach to zero-shot text classification, namely {\textbackslash}ourmodelshort, which leverages the strong generative power of {GPT} to assist in training a smaller, more adaptable, and efficient sentence encoder classifier with contrastive self-training. Specifically, {GenCo} applies {GPT} in two ways: firstly, it generates multiple augmented texts for each input instance to enhance the semantic embedding of the instance and improve the mapping to relevant labels; secondly, it generates augmented texts conditioned on the predicted label during self-training, which makes the generative process tailored to the decision boundaries in the target space. In our experiments, {GenCo} outperforms previous state-of-the-art methods on multiple benchmark datasets, even when only limited in-domain text data is available.},
|
||||||
|
number = {{arXiv}:2304.11872},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Zhang, Ruohong and Wang, Yau-Shian and Yang, Yiming},
|
||||||
|
urldate = {2023-08-01},
|
||||||
|
date = {2023-04-24},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2304.11872 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language, Computer Science - Artificial Intelligence, interesting},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2ULMRMN5/2304.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/D98MRNHP/Zhang et al. - 2023 - Generation-driven Contrastive Self-training for Ze.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{zhang_clusterllm_2023,
|
||||||
|
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
|
||||||
|
url = {http://arxiv.org/abs/2305.14871},
|
||||||
|
shorttitle = {{ClusterLLM}},
|
||||||
|
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
|
||||||
|
number = {{arXiv}:2305.14871},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
|
||||||
|
urldate = {2023-08-08},
|
||||||
|
date = {2023-05-24},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2305.14871 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{zhang_clusterllm_2023-1,
|
||||||
|
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
|
||||||
|
url = {http://arxiv.org/abs/2305.14871},
|
||||||
|
shorttitle = {{ClusterLLM}},
|
||||||
|
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
|
||||||
|
number = {{arXiv}:2305.14871},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
|
||||||
|
urldate = {2023-08-08},
|
||||||
|
date = {2023-05-24},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2305.14871 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{zhang_clusterllm_2023-2,
|
||||||
|
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
|
||||||
|
url = {http://arxiv.org/abs/2305.14871},
|
||||||
|
shorttitle = {{ClusterLLM}},
|
||||||
|
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
|
||||||
|
number = {{arXiv}:2305.14871},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
|
||||||
|
urldate = {2023-08-08},
|
||||||
|
date = {2023-05-24},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2305.14871 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/68L6AESY/2305.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/VGWL9LRC/Zhang et al. - 2023 - ClusterLLM Large Language Models as a Guide for T.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{vijeikis_efficient_2022,
|
||||||
|
title = {Efficient Violence Detection in Surveillance},
|
||||||
|
volume = {22},
|
||||||
|
rights = {http://creativecommons.org/licenses/by/3.0/},
|
||||||
|
issn = {1424-8220},
|
||||||
|
url = {https://www.mdpi.com/1424-8220/22/6/2216},
|
||||||
|
doi = {10.3390/s22062216},
|
||||||
|
abstract = {Intelligent video surveillance systems are rapidly being introduced to public places. The adoption of computer vision and machine learning techniques enables various applications for collected video features; one of the major is safety monitoring. The efficacy of violent event detection is measured by the efficiency and accuracy of violent event detection. In this paper, we present a novel architecture for violence detection from video surveillance cameras. Our proposed model is a spatial feature extracting a U-Net-like network that uses {MobileNet} V2 as an encoder followed by {LSTM} for temporal feature extraction and classification. The proposed model is computationally light and still achieves good results—experiments showed that an average accuracy is 0.82 ± 2\% and average precision is 0.81 ± 3\% using a complex real-world security camera footage dataset based on {RWF}-2000.},
|
||||||
|
pages = {2216},
|
||||||
|
number = {6},
|
||||||
|
journaltitle = {Sensors},
|
||||||
|
author = {Vijeikis, Romas and Raudonis, Vidas and Dervinis, Gintaras},
|
||||||
|
urldate = {2023-08-08},
|
||||||
|
date = {2022-01},
|
||||||
|
langid = {english},
|
||||||
|
note = {Number: 6
|
||||||
|
Publisher: Multidisciplinary Digital Publishing Institute},
|
||||||
|
keywords = {computer vision, deep learning, intelligent video surveillance, {LSTM}, U-Net, violence detection, violent behavior},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/PSYA8YSJ/Vijeikis et al. - 2022 - Efficient Violence Detection in Surveillance.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{toubeau_interpretable_2022,
|
||||||
|
title = {Interpretable Probabilistic Forecasting of Imbalances in Renewable-Dominated Electricity Systems},
|
||||||
|
volume = {13},
|
||||||
|
issn = {1949-3029, 1949-3037},
|
||||||
|
url = {https://ieeexplore.ieee.org/document/9464660/},
|
||||||
|
doi = {10.1109/TSTE.2021.3092137},
|
||||||
|
abstract = {High penetration of renewable energy such as wind power and photovoltaic ({PV}) requires large amounts of flexibility to balance their inherent variability. Making an accurate prediction of the future power system imbalance is an efficient approach to reduce these balancing costs. However, the imbalance is affected not only by renewables but also by complex market dynamics and technology constraints, for which the dependence structure is unknown. Therefore, this paper introduces a new architecture of sequence-to-sequence recurrent neural networks to efficiently process time-based information in an interpretable fashion. To that end, the selection of relevant variables is internalized into the model, which provides insights on the relative importance of individual inputs, while bypassing the cumbersome need for data preprocessing. Then, the model is further enriched with an attention mechanism that is tailored to focus on the relevant contextual information, which is useful to better understand the underlying dynamics such as seasonal patterns. Outcomes show that adding modules to generate explainable forecasts makes the model more efficient and robust, thus leading to enhanced performance.},
|
||||||
|
pages = {1267--1277},
|
||||||
|
number = {2},
|
||||||
|
journaltitle = {{IEEE} Transactions on Sustainable Energy},
|
||||||
|
shortjournal = {{IEEE} Trans. Sustain. Energy},
|
||||||
|
author = {Toubeau, Jean-Francois and Bottieau, Jeremie and Wang, Yi and Vallee, Francois},
|
||||||
|
urldate = {2023-09-28},
|
||||||
|
date = {2022-04},
|
||||||
|
langid = {english},
|
||||||
|
file = {Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:/Users/victormylle/Zotero/storage/WA7DZBXX/Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_deep_nodate,
|
||||||
|
title = {Deep Generative Modelling: A Comparative Review of {VAEs}, {GANs}, Normalizing Flows, Energy-Based and Autoregressive Models {\textbar} {IEEE} Journals \& Magazine {\textbar} {IEEE} Xplore},
|
||||||
|
url = {https://ieeexplore.ieee.org/document/9555209},
|
||||||
|
urldate = {2023-10-11},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{bond-taylor_deep_2022,
|
||||||
|
title = {Deep Generative Modelling: A Comparative Review of {VAEs}, {GANs}, Normalizing Flows, Energy-Based and Autoregressive Models},
|
||||||
|
volume = {44},
|
||||||
|
issn = {0162-8828, 2160-9292, 1939-3539},
|
||||||
|
url = {https://ieeexplore.ieee.org/document/9555209/},
|
||||||
|
doi = {10.1109/TPAMI.2021.3116668},
|
||||||
|
shorttitle = {Deep Generative Modelling},
|
||||||
|
abstract = {Deep generative models are a class of techniques that train deep neural networks to model the distribution of training samples. Research has fragmented into various interconnected approaches, each of which make trade-offs including run-time, diversity, and architectural restrictions. In particular, this compendium covers energy-based models, variational autoencoders, generative adversarial networks, autoregressive models, normalizing flows, in addition to numerous hybrid approaches. These techniques are compared and contrasted, explaining the premises behind each and how they are interrelated, while reviewing current state-of-the-art advances and implementations.},
|
||||||
|
pages = {7327--7347},
|
||||||
|
number = {11},
|
||||||
|
journaltitle = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},
|
||||||
|
shortjournal = {{IEEE} Trans. Pattern Anal. Mach. Intell.},
|
||||||
|
author = {Bond-Taylor, Sam and Leach, Adam and Long, Yang and Willcocks, Chris G.},
|
||||||
|
urldate = {2023-10-11},
|
||||||
|
date = {2022-11-01},
|
||||||
|
langid = {english},
|
||||||
|
file = {Bond-Taylor et al. - 2022 - Deep Generative Modelling A Comparative Review of.pdf:/Users/victormylle/Zotero/storage/UNAST9UC/Bond-Taylor et al. - 2022 - Deep Generative Modelling A Comparative Review of.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{lecun_tutorial_nodate,
|
||||||
|
title = {A Tutorial on Energy-Based Learning},
|
||||||
|
abstract = {Energy-Based Models ({EBMs}) capture dependencies between variables by associating a scalar energy to each configuration of the variables. Inference consists in clamping the value of observed variables and finding configurations of the remaining variables that minimize the energy. Learning consists in finding an energy function in which observed configurations of the variables are given lower energies than unobserved ones. The {EBM} approach provides a common theoretical framework for many learning models, including traditional discriminative and generative approaches, as well as graph-transformer networks, conditional random fields, maximum margin Markov networks, and several manifold learning methods.},
|
||||||
|
author = {{LeCun}, Yann and Chopra, Sumit and Hadsell, Raia and Ranzato, Marc’Aurelio and Huang, Fu Jie},
|
||||||
|
langid = {english},
|
||||||
|
file = {LeCun et al. - A Tutorial on Energy-Based Learning.pdf:/Users/victormylle/Zotero/storage/8932975Z/LeCun et al. - A Tutorial on Energy-Based Learning.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{gatta_neural_2022,
|
||||||
|
title = {Neural networks generative models for time series},
|
||||||
|
volume = {34},
|
||||||
|
issn = {1319-1578},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S1319157822002361},
|
||||||
|
doi = {10.1016/j.jksuci.2022.07.010},
|
||||||
|
abstract = {Nowadays, time series are a widely-exploited methodology to describe phenomena belonging to different fields. In fact, electrical consumption can be explained, from a data analysis perspective, with a time series, as for healthcare, financial index, air pollution or parking occupancy rate. Applying time series to different areas of interest has contributed to the exponential rise in interest by both practitioners and academics. On the other side, especially regarding static data, a new trend is acquiring even more relevance in the data analysis community, namely neural network generative approaches. Generative approaches aim to generate new, fake samples given a dataset of real data by implicitly learning the probability distribution underlining data. In this way, several tasks can be addressed, such as data augmentation, class imbalance, anomaly detection or privacy. However, even if this topic is relatively well-established in the literature related to static data regarding time series, the debate is still open. This paper contributes to this debate by comparing four neural network-based generative approaches for time series belonging to the state-of-the-art methodologies in literature. The comparison has been carried out on five public and private datasets and on different time granularities, with a total number of 13 experimental scenario. Our work aims to provide a wide overview of the performances of the compared methodologies when working in different conditions like seasonality, strong autoregressive components and long or short sequences.},
|
||||||
|
pages = {7920--7939},
|
||||||
|
number = {10},
|
||||||
|
journaltitle = {Journal of King Saud University - Computer and Information Sciences},
|
||||||
|
shortjournal = {Journal of King Saud University - Computer and Information Sciences},
|
||||||
|
author = {Gatta, Federico and Giampaolo, Fabio and Prezioso, Edoardo and Mei, Gang and Cuomo, Salvatore and Piccialli, Francesco},
|
||||||
|
urldate = {2023-10-11},
|
||||||
|
date = {2022-11-01},
|
||||||
|
keywords = {Deep learning, Generative adversarial networks, Healthcare, Industry 4.0, Time series},
|
||||||
|
file = {Full Text:/Users/victormylle/Zotero/storage/ZU6BCM28/Gatta et al. - 2022 - Neural networks generative models for time series.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/2HSHCJN7/S1319157822002361.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{dumas_deep_2022,
|
||||||
|
title = {A deep generative model for probabilistic energy forecasting in power systems: normalizing flows},
|
||||||
|
volume = {305},
|
||||||
|
issn = {03062619},
|
||||||
|
url = {https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909},
|
||||||
|
doi = {10.1016/j.apenergy.2021.117871},
|
||||||
|
shorttitle = {A deep generative model for probabilistic energy forecasting in power systems},
|
||||||
|
abstract = {Greater direct electrification of end-use sectors with a higher share of renewables is one of the pillars to power a carbon-neutral society by 2050. However, in contrast to conventional power plants, renewable energy is subject to uncertainty raising challenges for their interaction with power systems. Scenario-based probabilistic forecasting models have become a vital tool to equip decision-makers. This paper presents to the power systems forecasting practitioners a recent deep learning technique, the normalizing flows, to produce accurate scenario-based probabilistic forecasts that are crucial to face the new challenges in power systems applications. The strength of this technique is to directly learn the stochastic multivariate distribution of the underlying process by maximizing the likelihood. Through comprehensive empirical evaluations using the open data of the Global Energy Forecasting Competition 2014, we demonstrate that this methodology is competitive with other state-of-the-art deep learning generative models: generative adversarial networks and variational autoencoders. The models producing weather-based wind, solar power, and load scenarios are properly compared in terms of forecast value by considering the case study of an energy retailer and quality using several complementary metrics. The numerical experiments are simple and easily reproducible. Thus, we hope it will encourage other forecasting practitioners to test and use normalizing flows in power system applications such as bidding on electricity markets, scheduling power systems with high renewable energy sources penetration, energy management of virtual power plan or microgrids, and unit commitment.},
|
||||||
|
pages = {117871},
|
||||||
|
journaltitle = {Applied Energy},
|
||||||
|
shortjournal = {Applied Energy},
|
||||||
|
author = {Dumas, Jonathan and Wehenkel, Antoine and Lanaspeze, Damien and Cornélusse, Bertrand and Sutera, Antonio},
|
||||||
|
urldate = {2023-10-11},
|
||||||
|
date = {2022-01},
|
||||||
|
langid = {english},
|
||||||
|
file = {Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:/Users/victormylle/Zotero/storage/3CW249QI/Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{lu_scenarios_2022,
|
||||||
|
title = {Scenarios modelling for forecasting day-ahead electricity prices: Case studies in Australia},
|
||||||
|
volume = {308},
|
||||||
|
issn = {0306-2619},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S0306261921015555},
|
||||||
|
doi = {10.1016/j.apenergy.2021.118296},
|
||||||
|
shorttitle = {Scenarios modelling for forecasting day-ahead electricity prices},
|
||||||
|
abstract = {Electricity prices in spot markets are volatile and can be affected by various factors, such as generation and demand, system contingencies, local weather patterns, bidding strategies of market participants, and uncertain renewable energy outputs. Because of these factors, electricity price forecasting is challenging. This paper proposes a scenario modeling approach to improve forecasting accuracy, conditioning time series generative adversarial networks on external factors. After data pre-processing and condition selection, a conditional {TSGAN} or {CTSGAN} is designed to forecast electricity prices. Wasserstein Distance, weights limitation, and {RMSProp} optimizer are used to ensure that the {CTGAN} training process is stable. By changing the dimensionality of random noise input, the point forecasting model can be transformed into a probabilistic forecasting model. For electricity price point forecasting, the proposed {CTSGAN} model has better accuracy and has better generalization ability than the {TSGAN} and other deep learning methods. For probabilistic forecasting, the proposed {CTSGAN} model can significantly improve the continuously ranked probability score and Winkler score. The effectiveness and superiority of the proposed {CTSGAN} forecasting model are verified by case studies.},
|
||||||
|
pages = {118296},
|
||||||
|
journaltitle = {Applied Energy},
|
||||||
|
shortjournal = {Applied Energy},
|
||||||
|
author = {Lu, Xin and Qiu, Jing and Lei, Gang and Zhu, Jianguo},
|
||||||
|
urldate = {2023-10-13},
|
||||||
|
date = {2022-02-15},
|
||||||
|
keywords = {Generative adversarial networks, Conditions, Electricity Price, Point forecasting, Probabilistic forecasting},
|
||||||
|
file = {Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:/Users/victormylle/Zotero/storage/3XL3T253/Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/9K2RFGGU/S0306261921015555.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{gabrielli_data-driven_2022,
|
||||||
|
title = {Data-driven modeling for long-term electricity price forecasting},
|
||||||
|
volume = {244},
|
||||||
|
issn = {03605442},
|
||||||
|
url = {https://linkinghub.elsevier.com/retrieve/pii/S036054422200010X},
|
||||||
|
doi = {10.1016/j.energy.2022.123107},
|
||||||
|
abstract = {Estimating the financial viability of renewable energy investments requires the availability of long-term, finely-resolved electricity prices over the investment lifespan. This entails, however, two major challenges: (i) the combination of extensive time horizons and fine time resolutions, and (ii) the prediction of out-of-sample electricity prices in future energy and market scenarios, or shifts in pricing regime, that were not observed in the past. This paper tackles such challenges by proposing a data-driven model for the long-term prediction of electricity market prices that is based on Fourier analysis. The electricity price is decomposed into components leading to its base evolution, which are described through the amplitudes of the main frequencies of the Fourier series, and components leading to high price volatility, which are described by the residual frequencies. The former are predicted via a regression model that uses as input annual values of relevant energy and market quantities, such as electricity generation, prices and demands. The proposed method shows capable of (i) predicting the most relevant dynamics of the electricity price; (ii) generalization by capturing the market mechanisms of previously unseen electricity markets. These findings support the relevance and validity of data-driven, finely-resolved, long-term predictions and highlight the potential for hybrid data-driven and market-based models.},
|
||||||
|
pages = {123107},
|
||||||
|
journaltitle = {Energy},
|
||||||
|
shortjournal = {Energy},
|
||||||
|
author = {Gabrielli, Paolo and Wüthrich, Moritz and Blume, Steffen and Sansavini, Giovanni},
|
||||||
|
urldate = {2023-10-15},
|
||||||
|
date = {2022-04},
|
||||||
|
langid = {english},
|
||||||
|
file = {Gabrielli et al. - 2022 - Data-driven modeling for long-term electricity pri.pdf:/Users/victormylle/Zotero/storage/YHDVP399/Gabrielli et al. - 2022 - Data-driven modeling for long-term electricity pri.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{kollovieh_predict_2023,
|
||||||
|
title = {Predict, Refine, Synthesize: Self-Guiding Diffusion Models for Probabilistic Time Series Forecasting},
|
||||||
|
url = {http://arxiv.org/abs/2307.11494},
|
||||||
|
shorttitle = {Predict, Refine, Synthesize},
|
||||||
|
abstract = {Diffusion models have achieved state-of-the-art performance in generative modeling tasks across various domains. Prior works on time series diffusion models have primarily focused on developing conditional models tailored to specific forecasting or imputation tasks. In this work, we explore the potential of task-agnostic, unconditional diffusion models for several time series applications. We propose {TSDiff}, an unconditionally trained diffusion model for time series. Our proposed self-guidance mechanism enables conditioning {TSDiff} for downstream tasks during inference, without requiring auxiliary networks or altering the training procedure. We demonstrate the effectiveness of our method on three different time series tasks: forecasting, refinement, and synthetic data generation. First, we show that {TSDiff} is competitive with several task-specific conditional forecasting methods (predict). Second, we leverage the learned implicit probability density of {TSDiff} to iteratively refine the predictions of base forecasters with reduced computational overhead over reverse diffusion (refine). Notably, the generative performance of the model remains intact -- downstream forecasters trained on synthetic samples from {TSDiff} outperform forecasters that are trained on samples from other state-of-the-art generative time series models, occasionally even outperforming models trained on real data (synthesize).},
|
||||||
|
number = {{arXiv}:2307.11494},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Kollovieh, Marcel and Ansari, Abdul Fatir and Bohlke-Schneider, Michael and Zschiegner, Jasper and Wang, Hao and Wang, Yuyang},
|
||||||
|
urldate = {2023-10-15},
|
||||||
|
date = {2023-07-21},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2307.11494 [cs, stat]},
|
||||||
|
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning, {TODO}},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/PBVHEPD9/2307.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QIBWKG57/Kollovieh et al. - 2023 - Predict, Refine, Synthesize Self-Guiding Diffusio.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{rasul_autoregressive_2021,
|
||||||
|
title = {Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting},
|
||||||
|
url = {http://arxiv.org/abs/2101.12072},
|
||||||
|
abstract = {In this work, we propose {\textbackslash}texttt\{{TimeGrad}\}, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.},
|
||||||
|
number = {{arXiv}:2101.12072},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Rasul, Kashif and Seward, Calvin and Schuster, Ingmar and Vollgraf, Roland},
|
||||||
|
urldate = {2023-10-15},
|
||||||
|
date = {2021-02-02},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2101.12072 [cs]},
|
||||||
|
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/8LIRWZ4G/2101.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QPPFJVR5/Rasul et al. - 2021 - Autoregressive Denoising Diffusion Models for Mult.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_spacy_nodate,
|
||||||
|
title = {{spaCy} · Industrial-strength Natural Language Processing in Python},
|
||||||
|
url = {https://spacy.io/},
|
||||||
|
abstract = {{spaCy} is a free open-source library for Natural Language Processing in Python. It features {NER}, {POS} tagging, dependency parsing, word vectors and more.},
|
||||||
|
urldate = {2023-10-17},
|
||||||
|
langid = {english},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/8WWDDEH4/spacy.io.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_intfloatmultilingual-e5-base_nodate,
|
||||||
|
title = {intfloat/multilingual-e5-base · Hugging Face},
|
||||||
|
url = {https://huggingface.co/intfloat/multilingual-e5-base},
|
||||||
|
abstract = {We’re on a journey to advance and democratize artificial intelligence through open source and open science.},
|
||||||
|
urldate = {2023-10-17},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/LYSDP8CD/multilingual-e5-base.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_googleflan-t5-base_nodate,
|
||||||
|
title = {google/flan-t5-base · Hugging Face},
|
||||||
|
url = {https://huggingface.co/google/flan-t5-base},
|
||||||
|
urldate = {2023-10-17},
|
||||||
|
file = {flan-t5-base · Hugging Face:/Users/victormylle/Zotero/storage/284DLNVT/flan-t5-base.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_openai_nodate,
|
||||||
|
title = {{OpenAI} Platform},
|
||||||
|
url = {https://platform.openai.com},
|
||||||
|
abstract = {Explore developer resources, tutorials, {API} docs, and dynamic examples to get the most out of {OpenAI}'s platform.},
|
||||||
|
urldate = {2023-10-17},
|
||||||
|
langid = {english},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/9NFW3FCP/gpt-3-5.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{cramer_normalizing_2022,
|
||||||
|
title = {Normalizing flow-based day-ahead wind power scenario generation for profitable and reliable delivery commitments by wind farm operators},
|
||||||
|
volume = {166},
|
||||||
|
issn = {0098-1354},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S0098135422002617},
|
||||||
|
doi = {10.1016/j.compchemeng.2022.107923},
|
||||||
|
abstract = {We present a specialized scenario generation method that utilizes forecast information to generate scenarios for day-ahead scheduling problems. In particular, we use normalizing flows to generate wind power scenarios by sampling from a conditional distribution that uses wind speed forecasts to tailor the scenarios to a specific day. We apply the generated scenarios in a stochastic day-ahead bidding problem of a wind electricity producer and analyze whether the scenarios yield profitable decisions. Compared to Gaussian copulas and Wasserstein-generative adversarial networks, the normalizing flow successfully narrows the range of scenarios around the daily trends while maintaining a diverse variety of possible realizations. In the stochastic day-ahead bidding problem, the conditional scenarios from all methods lead to significantly more stable profitable results compared to an unconditional selection of historical scenarios. The normalizing flow consistently obtains the highest profits, even for small sets scenarios.},
|
||||||
|
pages = {107923},
|
||||||
|
journaltitle = {Computers \& Chemical Engineering},
|
||||||
|
shortjournal = {Computers \& Chemical Engineering},
|
||||||
|
author = {Cramer, Eike and Paeleke, Leonard and Mitsos, Alexander and Dahmen, Manuel},
|
||||||
|
urldate = {2023-10-18},
|
||||||
|
date = {2022-10-01},
|
||||||
|
keywords = {Scenario generation, Stability, Stochastic programming, Wind power},
|
||||||
|
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/PT76E9DL/S0098135422002617.html:text/html;Submitted Version:/Users/victormylle/Zotero/storage/M9KFSG3M/Cramer et al. - 2022 - Normalizing flow-based day-ahead wind power scenar.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{zhang_diffusion_2021,
|
||||||
|
title = {Diffusion Normalizing Flow},
|
||||||
|
volume = {34},
|
||||||
|
url = {https://proceedings.neurips.cc/paper/2021/hash/876f1f9954de0aa402d91bb988d12cd4-Abstract.html},
|
||||||
|
abstract = {We present a novel generative modeling method called diffusion normalizing flow based on stochastic differential equations ({SDEs}). The algorithm consists of two neural {SDEs}: a forward {SDE} that gradually adds noise to the data to transform the data into Gaussian random noise, and a backward {SDE} that gradually removes the noise to sample from the data distribution. By jointly training the two neural {SDEs} to minimize a common cost function that quantifies the difference between the two, the backward {SDE} converges to a diffusion process the starts with a Gaussian distribution and ends with the desired data distribution. Our method is closely related to normalizing flow and diffusion probabilistic models, and can be viewed as a combination of the two. Compared with normalizing flow, diffusion normalizing flow is able to learn distributions with sharp boundaries. Compared with diffusion probabilistic models, diffusion normalizing flow requires fewer discretization steps and thus has better sampling efficiency. Our algorithm demonstrates competitive performance in both high-dimension data density estimation and image generation tasks.},
|
||||||
|
pages = {16280--16291},
|
||||||
|
booktitle = {Advances in Neural Information Processing Systems},
|
||||||
|
publisher = {Curran Associates, Inc.},
|
||||||
|
author = {Zhang, Qinsheng and Chen, Yongxin},
|
||||||
|
urldate = {2023-10-18},
|
||||||
|
date = {2021},
|
||||||
|
keywords = {{TODO}},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/U45EUFZU/Zhang and Chen - 2021 - Diffusion Normalizing Flow.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{rezende_variational_2016,
|
||||||
|
title = {Variational Inference with Normalizing Flows},
|
||||||
|
url = {http://arxiv.org/abs/1505.05770},
|
||||||
|
abstract = {The choice of approximate posterior distribution is one of the core problems in variational inference. Most applications of variational inference employ simple families of posterior approximations in order to allow for efficient inference, focusing on mean-field or other simple structured approximations. This restriction has a significant impact on the quality of inferences made using variational methods. We introduce a new approach for specifying flexible, arbitrarily complex and scalable approximate posterior distributions. Our approximations are distributions constructed through a normalizing flow, whereby a simple initial density is transformed into a more complex one by applying a sequence of invertible transformations until a desired level of complexity is attained. We use this view of normalizing flows to develop categories of finite and infinitesimal flows and provide a unified view of approaches for constructing rich posterior approximations. We demonstrate that the theoretical advantages of having posteriors that better match the true posterior, combined with the scalability of amortized variational approaches, provides a clear improvement in performance and applicability of variational inference.},
|
||||||
|
number = {{arXiv}:1505.05770},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Rezende, Danilo Jimenez and Mohamed, Shakir},
|
||||||
|
urldate = {2023-10-18},
|
||||||
|
date = {2016-06-14},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {1505.05770 [cs, stat]},
|
||||||
|
note = {version: 6},
|
||||||
|
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning, Statistics - Computation, Statistics - Methodology},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2J7MPVV5/1505.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/GQWIFAAN/Rezende and Mohamed - 2016 - Variational Inference with Normalizing Flows.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{gruver_large_2023,
|
||||||
|
title = {Large Language Models Are Zero-Shot Time Series Forecasters},
|
||||||
|
url = {http://arxiv.org/abs/2310.07820},
|
||||||
|
doi = {10.48550/arXiv.2310.07820},
|
||||||
|
abstract = {By encoding time series as a string of numerical digits, we can frame time series forecasting as next-token prediction in text. Developing this approach, we find that large language models ({LLMs}) such as {GPT}-3 and {LLaMA}-2 can surprisingly zero-shot extrapolate time series at a level comparable to or exceeding the performance of purpose-built time series models trained on the downstream tasks. To facilitate this performance, we propose procedures for effectively tokenizing time series data and converting discrete distributions over tokens into highly flexible densities over continuous values. We argue the success of {LLMs} for time series stems from their ability to naturally represent multimodal distributions, in conjunction with biases for simplicity, and repetition, which align with the salient features in many time series, such as repeated seasonal trends. We also show how {LLMs} can naturally handle missing data without imputation through non-numerical text, accommodate textual side information, and answer questions to help explain predictions. While we find that increasing model size generally improves performance on time series, we show {GPT}-4 can perform worse than {GPT}-3 because of how it tokenizes numbers, and poor uncertainty calibration, which is likely the result of alignment interventions such as {RLHF}.},
|
||||||
|
number = {{arXiv}:2310.07820},
|
||||||
|
author = {Gruver, Nate and Finzi, Marc and Qiu, Shikai and Wilson, Andrew Gordon},
|
||||||
|
urldate = {2023-10-13},
|
||||||
|
date = {2023-10-11},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2310.07820 [cs]},
|
||||||
|
keywords = {Computer Science - Machine Learning},
|
||||||
|
file = {Gruver et al. - 2023 - Large Language Models Are Zero-Shot Time Series Forecasters.pdf:/Users/victormylle/Zotero/storage/T5XZ227W/Gruver et al. - 2023 - Large Language Models Are Zero-Shot Time Series Forecasters.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{sweidan_probabilistic_nodate,
|
||||||
|
title = {Probabilistic Prediction in scikit-learn},
|
||||||
|
abstract = {Adding confidence measures to predictive models should increase the trustworthiness, but only if the models are well-calibrated. Historically, some algorithms like logistic regression, but also neural networks, have been considered to produce well-calibrated probability estimates off-the-shelf. Other techniques, like decision trees and Naive Bayes, on the other hand, are infamous for being significantly overconfident in their probabilistic predictions. In this paper, a large experimental study is conducted to investigate how well-calibrated models produced by a number of algorithms in the scikit-learn library are out-of-the-box, but also if either the built-in calibration techniques Platt scaling and isotonic regression, or Venn-Abers, can be used to improve the calibration. The results show that of the seven algorithms evaluated, the only one obtaining well-calibrated models without the external calibration is logistic regression. All other algorithms, i.e., decision trees, adaboost, gradient boosting, {kNN}, naive Bayes and random forest benefit from using any of the calibration techniques. In particular, decision trees, Naive Bayes and the boosted models are substantially improved using external calibration. From a practitioner’s perspective, the obvious recommendation becomes to incorporate calibration when using probabilistic prediction. Comparing the different calibration techniques, Platt scaling and {VennAbers} generally outperform isotonic regression, on these rather small datasets. Finally, the unique ability of Venn-Abers to output not only well-calibrated probability estimates, but also the confidence in these estimates is demonstrated.},
|
||||||
|
author = {Sweidan, Dirar and Johansson, Ulf},
|
||||||
|
langid = {english},
|
||||||
|
file = {Sweidan and Johansson - Probabilistic Prediction in scikit-learn.pdf:/Users/victormylle/Zotero/storage/8LDMB83T/Sweidan and Johansson - Probabilistic Prediction in scikit-learn.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{baskan_scenario-based_2023,
|
||||||
|
title = {A Scenario-Based Model Comparison for Short-Term Day-Ahead Electricity Prices in Times of Economic and Political Tension},
|
||||||
|
volume = {16},
|
||||||
|
issn = {1999-4893},
|
||||||
|
url = {https://www.mdpi.com/1999-4893/16/4/177},
|
||||||
|
doi = {10.3390/a16040177},
|
||||||
|
abstract = {In recent years, energy prices have become increasingly volatile, making it more challenging to predict them accurately. This uncertain market trend behavior makes it harder for market participants, e.g., power plant dispatchers, to make reliable decisions. Machine learning ({ML}) has recently emerged as a powerful artificial intelligence ({AI}) technique to get reliable predictions in particularly volatile and unforeseeable situations. This development makes {ML} models an attractive complement to other approaches that require more extensive human modeling effort and assumptions about market mechanisms. This study investigates the application of machine and deep learning approaches to predict day-ahead electricity prices for a 7-day horizon on the German spot market to give power plants enough time to ramp up or down. A qualitative and quantitative analysis is conducted, assessing model performance concerning the forecast horizon and their robustness depending on the selected hyperparameters. For evaluation purposes, three test scenarios with different characteristics are manually chosen. Various models are trained, optimized, and compared with each other using common performance metrics. This study shows that deep learning models outperform tree-based and statistical models despite or because of the volatile energy prices.},
|
||||||
|
pages = {177},
|
||||||
|
number = {4},
|
||||||
|
journaltitle = {Algorithms},
|
||||||
|
shortjournal = {Algorithms},
|
||||||
|
author = {Baskan, Denis E. and Meyer, Daniel and Mieck, Sebastian and Faubel, Leonhard and Klöpper, Benjamin and Strem, Nika and Wagner, Johannes A. and Koltermann, Jan J.},
|
||||||
|
urldate = {2023-10-22},
|
||||||
|
date = {2023-03-24},
|
||||||
|
langid = {english},
|
||||||
|
file = {Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:/Users/victormylle/Zotero/storage/TU5JX5D4/Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{tsaprounis_metrics_2023,
|
||||||
|
title = {Metrics for Distributional Forecasts},
|
||||||
|
url = {https://medium.com/trusted-data-science-haleon/metrics-for-distributional-forecasts-60e156c60177},
|
||||||
|
abstract = {How to evaluate distributional/probabilistic time series forecasts in Python.},
|
||||||
|
titleaddon = {Trusted Data Science @ Haleon},
|
||||||
|
author = {Tsaprounis, Leonidas},
|
||||||
|
urldate = {2023-10-24},
|
||||||
|
date = {2023-02-27},
|
||||||
|
langid = {english},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{roy_recent_2021,
|
||||||
|
title = {Recent Trends in Named Entity Recognition ({NER})},
|
||||||
|
url = {http://arxiv.org/abs/2101.11420},
|
||||||
|
doi = {10.48550/arXiv.2101.11420},
|
||||||
|
abstract = {The availability of large amounts of computer-readable textual data and hardware that can process the data has shifted the focus of knowledge projects towards deep learning architecture. Natural Language Processing, particularly the task of Named Entity Recognition is no exception. The bulk of the learning methods that have produced state-of-the-art results have changed the deep learning model, the training method used, the training data itself or the encoding of the output of the {NER} system. In this paper, we review significant learning methods that have been employed for {NER} in the recent past and how they came about from the linear learning methods of the past. We also cover the progress of related tasks that are upstream or downstream to {NER}, e.g., sequence tagging, entity linking, etc., wherever the processes in question have also improved {NER} results.},
|
||||||
|
number = {{arXiv}:2101.11420},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Roy, Arya},
|
||||||
|
urldate = {2023-10-24},
|
||||||
|
date = {2021-01-25},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2101.11420 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/AAZ3I43G/Roy - 2021 - Recent Trends in Named Entity Recognition (NER).pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/DWNPFLCX/2101.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_sentencetransformers_nodate,
|
||||||
|
title = {{SentenceTransformers} Documentation — Sentence-Transformers documentation},
|
||||||
|
url = {https://www.sbert.net/},
|
||||||
|
urldate = {2023-10-29},
|
||||||
|
file = {SentenceTransformers Documentation — Sentence-Transformers documentation:/Users/victormylle/Zotero/storage/7ZPK2DIZ/www.sbert.net.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_hugging_2023,
|
||||||
|
title = {Hugging Face – The {AI} community building the future.},
|
||||||
|
url = {https://huggingface.co/},
|
||||||
|
abstract = {We’re on a journey to advance and democratize artificial intelligence through open source and open science.},
|
||||||
|
urldate = {2023-10-29},
|
||||||
|
date = {2023-10-22},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/8U9I2BD9/huggingface.co.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{narayan_regularization_2021,
|
||||||
|
title = {Regularization Strategies for Quantile Regression},
|
||||||
|
url = {http://arxiv.org/abs/2102.05135},
|
||||||
|
abstract = {We investigate different methods for regularizing quantile regression when predicting either a subset of quantiles or the full inverse {CDF}. We show that minimizing an expected pinball loss over a continuous distribution of quantiles is a good regularizer even when only predicting a specific quantile. For predicting multiple quantiles, we propose achieving the classic goal of non-crossing quantiles by using deep lattice networks that treat the quantile as a monotonic input feature, and we discuss why monotonicity on other features is an apt regularizer for quantile regression. We show that lattice models enable regularizing the predicted distribution to a location-scale family. Lastly, we propose applying rate constraints to improve the calibration of the quantile predictions on specific subsets of interest and improve fairness metrics. We demonstrate our contributions on simulations, benchmark datasets, and real quantile regression problems.},
|
||||||
|
number = {{arXiv}:2102.05135},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Narayan, Taman and Wang, Serena and Canini, Kevin and Gupta, Maya},
|
||||||
|
urldate = {2023-11-14},
|
||||||
|
date = {2021-02-09},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2102.05135 [cs, stat]},
|
||||||
|
note = {version: 1},
|
||||||
|
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning, Statistics - Methodology},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/DQZGHBIS/2102.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/W6WTUZQ3/Narayan et al. - 2021 - Regularization Strategies for Quantile Regression.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{chung_beyond_2021,
|
||||||
|
title = {Beyond Pinball Loss: Quantile Methods for Calibrated Uncertainty Quantification},
|
||||||
|
url = {http://arxiv.org/abs/2011.09588},
|
||||||
|
shorttitle = {Beyond Pinball Loss},
|
||||||
|
abstract = {Among the many ways of quantifying uncertainty in a regression setting, specifying the full quantile function is attractive, as quantiles are amenable to interpretation and evaluation. A model that predicts the true conditional quantiles for each input, at all quantile levels, presents a correct and efficient representation of the underlying uncertainty. To achieve this, many current quantile-based methods focus on optimizing the so-called pinball loss. However, this loss restricts the scope of applicable regression models, limits the ability to target many desirable properties (e.g. calibration, sharpness, centered intervals), and may produce poor conditional quantiles. In this work, we develop new quantile methods that address these shortcomings. In particular, we propose methods that can apply to any class of regression model, allow for selecting a trade-off between calibration and sharpness, optimize for calibration of centered intervals, and produce more accurate conditional quantiles. We provide a thorough experimental evaluation of our methods, which includes a high dimensional uncertainty quantification task in nuclear fusion.},
|
||||||
|
number = {{arXiv}:2011.09588},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Chung, Youngseog and Neiswanger, Willie and Char, Ian and Schneider, Jeff},
|
||||||
|
urldate = {2023-12-14},
|
||||||
|
date = {2021-12-09},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2011.09588 [cs, stat]},
|
||||||
|
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/WWFHI3UN/2011.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/SHMRZ3Q7/Chung et al. - 2021 - Beyond Pinball Loss Quantile Methods for Calibrat.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{van_hautte_bad_2019,
|
||||||
|
title = {Bad Form: Comparing Context-Based and Form-Based Few-Shot Learning in Distributional Semantic Models},
|
||||||
|
url = {http://arxiv.org/abs/1910.00275},
|
||||||
|
shorttitle = {Bad Form},
|
||||||
|
abstract = {Word embeddings are an essential component in a wide range of natural language processing applications. However, distributional semantic models are known to struggle when only a small number of context sentences are available. Several methods have been proposed to obtain higher-quality vectors for these words, leveraging both this context information and sometimes the word forms themselves through a hybrid approach. We show that the current tasks do not suffice to evaluate models that use word-form information, as such models can easily leverage word forms in the training data that are related to word forms in the test data. We introduce 3 new tasks, allowing for a more balanced comparison between models. Furthermore, we show that hyperparameters that have largely been ignored in previous work can consistently improve the performance of both baseline and advanced models, achieving a new state of the art on 4 out of 6 tasks.},
|
||||||
|
number = {{arXiv}:1910.00275},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Van Hautte, Jeroen and Emerson, Guy and Rei, Marek},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2019-10-01},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {1910.00275 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/PUBS4DRK/1910.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/VY5YGVXU/Van Hautte et al. - 2019 - Bad Form Comparing Context-Based and Form-Based F.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{decorte_jobbert_2021-1,
|
||||||
|
title = {{JobBERT}: Understanding Job Titles through Skills},
|
||||||
|
url = {http://arxiv.org/abs/2109.09605},
|
||||||
|
shorttitle = {{JobBERT}},
|
||||||
|
abstract = {Job titles form a cornerstone of today's human resources ({HR}) processes. Within online recruitment, they allow candidates to understand the contents of a vacancy at a glance, while internal {HR} departments use them to organize and structure many of their processes. As job titles are a compact, convenient, and readily available data source, modeling them with high accuracy can greatly benefit many {HR} tech applications. In this paper, we propose a neural representation model for job titles, by augmenting a pre-trained language model with co-occurrence information from skill labels extracted from vacancies. Our {JobBERT} method leads to considerable improvements compared to using generic sentence encoders, for the task of job title normalization, for which we release a new evaluation benchmark.},
|
||||||
|
number = {{arXiv}:2109.09605},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Demeester, Thomas and Develder, Chris},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2021-09-20},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2109.09605 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/6FMYQ68Y/2109.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/2SD3P252/Decorte et al. - 2021 - JobBERT Understanding Job Titles through Skills.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{van_hautte_leveraging_2020,
|
||||||
|
title = {Leveraging the Inherent Hierarchy of Vacancy Titles for Automated Job Ontology Expansion},
|
||||||
|
url = {http://arxiv.org/abs/2004.02814},
|
||||||
|
abstract = {Machine learning plays an ever-bigger part in online recruitment, powering intelligent matchmaking and job recommendations across many of the world's largest job platforms. However, the main text is rarely enough to fully understand a job posting: more often than not, much of the required information is condensed into the job title. Several organised efforts have been made to map job titles onto a hand-made knowledge base as to provide this information, but these only cover around 60{\textbackslash}\% of online vacancies. We introduce a novel, purely data-driven approach towards the detection of new job titles. Our method is conceptually simple, extremely efficient and competitive with traditional {NER}-based approaches. Although the standalone application of our method does not outperform a finetuned {BERT} model, it can be applied as a preprocessing step as well, substantially boosting accuracy across several architectures.},
|
||||||
|
number = {{arXiv}:2004.02814},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Van Hautte, Jeroen and Schelstraete, Vincent and Wornoo, Mikaël},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2020-04-06},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2004.02814 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/6FAKZYDM/2004.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/EY3RNC6S/Van Hautte et al. - 2020 - Leveraging the Inherent Hierarchy of Vacancy Title.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{decorte_design_2022,
|
||||||
|
title = {Design of Negative Sampling Strategies for Distantly Supervised Skill Extraction},
|
||||||
|
url = {http://arxiv.org/abs/2209.05987},
|
||||||
|
abstract = {Skills play a central role in the job market and many human resources ({HR}) processes. In the wake of other digital experiences, today's online job market has candidates expecting to see the right opportunities based on their skill set. Similarly, enterprises increasingly need to use data to guarantee that the skills within their workforce remain future-proof. However, structured information about skills is often missing, and processes building on self- or manager-assessment have shown to struggle with issues around adoption, completeness, and freshness of the resulting data. Extracting skills is a highly challenging task, given the many thousands of possible skill labels mentioned either explicitly or merely described implicitly and the lack of finely annotated training corpora. Previous work on skill extraction overly simplifies the task to an explicit entity detection task or builds on manually annotated training data that would be infeasible if applied to a complete vocabulary of skills. We propose an end-to-end system for skill extraction, based on distant supervision through literal matching. We propose and evaluate several negative sampling strategies, tuned on a small validation dataset, to improve the generalization of skill extraction towards implicitly mentioned skills, despite the lack of such implicit skills in the distantly supervised data. We observe that using the {ESCO} taxonomy to select negative examples from related skills yields the biggest improvements, and combining three different strategies in one model further increases the performance, up to 8 percentage points in {RP}@5. We introduce a manually annotated evaluation benchmark for skill extraction based on the {ESCO} taxonomy, on which we validate our models. We release the benchmark dataset for research purposes to stimulate further research on the task.},
|
||||||
|
number = {{arXiv}:2209.05987},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2022-09-13},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2209.05987 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/E79F2EV8/2209.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/SBEAYV66/Decorte et al. - 2022 - Design of Negative Sampling Strategies for Distant.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{decorte_extreme_2023,
|
||||||
|
title = {Extreme Multi-Label Skill Extraction Training using Large Language Models},
|
||||||
|
url = {http://arxiv.org/abs/2307.10778},
|
||||||
|
abstract = {Online job ads serve as a valuable source of information for skill requirements, playing a crucial role in labor market analysis and e-recruitment processes. Since such ads are typically formatted in free text, natural language processing ({NLP}) technologies are required to automatically process them. We specifically focus on the task of detecting skills (mentioned literally, or implicitly described) and linking them to a large skill ontology, making it a challenging case of extreme multi-label classification ({XMLC}). Given that there is no sizable labeled (training) dataset are available for this specific {XMLC} task, we propose techniques to leverage general Large Language Models ({LLMs}). We describe a cost-effective approach to generate an accurate, fully synthetic labeled dataset for skill extraction, and present a contrastive learning strategy that proves effective in the task. Our results across three skill extraction benchmarks show a consistent increase of between 15 to 25 percentage points in {\textbackslash}textit\{R-Precision@5\} compared to previously published results that relied solely on distant supervision through literal matches.},
|
||||||
|
number = {{arXiv}:2307.10778},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Decorte, Jens-Joris and Verlinden, Severine and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2023-07-20},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2307.10778 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/VVZZQW45/2307.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/8U7P43IE/Decorte et al. - 2023 - Extreme Multi-Label Skill Extraction Training usin.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{decorte_career_2023,
|
||||||
|
title = {Career Path Prediction using Resume Representation Learning and Skill-based Matching},
|
||||||
|
url = {http://arxiv.org/abs/2310.15636},
|
||||||
|
abstract = {The impact of person-job fit on job satisfaction and performance is widely acknowledged, which highlights the importance of providing workers with next steps at the right time in their career. This task of predicting the next step in a career is known as career path prediction, and has diverse applications such as turnover prevention and internal job mobility. Existing methods to career path prediction rely on large amounts of private career history data to model the interactions between job titles and companies. We propose leveraging the unexplored textual descriptions that are part of work experience sections in resumes. We introduce a structured dataset of 2,164 anonymized career histories, annotated with {ESCO} occupation labels. Based on this dataset, we present a novel representation learning approach, {CareerBERT}, specifically designed for work history data. We develop a skill-based model and a text-based model for career path prediction, which achieve 35.24\% and 39.61\% recall@10 respectively on our dataset. Finally, we show that both approaches are complementary as a hybrid approach achieves the strongest result with 43.01\% recall@10.},
|
||||||
|
number = {{arXiv}:2310.15636},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
|
||||||
|
urldate = {2024-03-09},
|
||||||
|
date = {2023-10-24},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2310.15636 [cs]},
|
||||||
|
keywords = {Computer Science - Computation and Language, Computer Science - Artificial Intelligence},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/I6AMKGVA/2310.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/AKTKCWBR/Decorte et al. - 2023 - Career Path Prediction using Resume Representation.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_liberalised_nodate,
|
||||||
|
title = {The liberalised electricity market includes many parties who all have to work together and at the same time try to make a profit. An overview of the most...},
|
||||||
|
url = {https://www.next-kraftwerke.be/en/knowledge-hub/players-in-the-belgian-power-market/},
|
||||||
|
abstract = {The liberalised electricity market includes many parties who all have to work together and at the same time try to make a profit. An overview of the most...},
|
||||||
|
urldate = {2024-03-20},
|
||||||
|
file = {Snapshot:/Users/victormylle/Zotero/storage/M9XWVY6F/players-in-the-belgian-power-market.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{ho_denoising_2020,
|
||||||
|
title = {Denoising Diffusion Probabilistic Models},
|
||||||
|
url = {http://arxiv.org/abs/2006.11239},
|
||||||
|
doi = {10.48550/arXiv.2006.11239},
|
||||||
|
abstract = {We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional {CIFAR}10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art {FID} score of 3.17. On 256x256 {LSUN}, we obtain sample quality similar to {ProgressiveGAN}. Our implementation is available at https://github.com/hojonathanho/diffusion},
|
||||||
|
number = {{arXiv}:2006.11239},
|
||||||
|
publisher = {{arXiv}},
|
||||||
|
author = {Ho, Jonathan and Jain, Ajay and Abbeel, Pieter},
|
||||||
|
urldate = {2024-04-02},
|
||||||
|
date = {2020-12-16},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2006.11239 [cs, stat]},
|
||||||
|
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
|
||||||
|
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/CYMHCMUT/Ho et al. - 2020 - Denoising Diffusion Probabilistic Models.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/CE8R84V5/2006.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{dumas_probabilistic_2019,
|
||||||
|
title = {Probabilistic Forecasting of Imbalance Prices in the Belgian Context},
|
||||||
|
url = {http://arxiv.org/abs/2106.07361},
|
||||||
|
doi = {10.1109/EEM.2019.8916375},
|
||||||
|
abstract = {Forecasting imbalance prices is essential for strategic participation in the short-term energy markets. A novel two-step probabilistic approach is proposed, with a particular focus on the Belgian case. The first step consists of computing the net regulation volume state transition probabilities. It is modeled as a matrix computed using historical data. This matrix is then used to infer the imbalance prices since the net regulation volume can be related to the level of reserves activated and the corresponding marginal prices for each activation level are published by the Belgian Transmission System Operator one day before electricity delivery. This approach is compared to a deterministic model, a multi-layer perceptron, and a widely used probabilistic technique, Gaussian Processes.},
|
||||||
|
pages = {1--7},
|
||||||
|
booktitle = {2019 16th International Conference on the European Energy Market ({EEM})},
|
||||||
|
author = {Dumas, Jonathan and Boukas, Ioannis and de Villena, Miguel Manuel and Mathieu, Sébastien and Cornélusse, Bertrand},
|
||||||
|
urldate = {2024-04-17},
|
||||||
|
date = {2019-09},
|
||||||
|
eprinttype = {arxiv},
|
||||||
|
eprint = {2106.07361 [cs, eess, q-fin]},
|
||||||
|
keywords = {Computer Science - Machine Learning, Electrical Engineering and Systems Science - Signal Processing, Quantitative Finance - Statistical Finance},
|
||||||
|
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3N56FPYP/2106.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/958MBH5M/Dumas et al. - 2019 - Probabilistic Forecasting of Imbalance Prices in t.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@online{noauthor_ghent_nodate,
|
||||||
|
title = {Ghent University: Master of Science in Computer Science Engineering},
|
||||||
|
url = {https://studiekiezer.ugent.be/2024/master-of-science-in-computer-science-engineering-en},
|
||||||
|
urldate = {2024-04-17},
|
||||||
|
file = {master-of-science-in-computer-science-engineering-en:/Users/victormylle/Zotero/storage/JCELQ9VV/master-of-science-in-computer-science-engineering-en.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{gunduz_transfer_2023,
|
||||||
|
title = {Transfer learning for electricity price forecasting},
|
||||||
|
volume = {34},
|
||||||
|
issn = {2352-4677},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S2352467723000048},
|
||||||
|
doi = {10.1016/j.segan.2023.100996},
|
||||||
|
abstract = {Electricity price forecasting is an essential task in all the deregulated markets of the world. The accurate prediction of day-ahead electricity prices is an active research field and available data from various markets can be used as input for forecasting. A collection of models have been proposed for this task, but the fundamental question on how to use the available big data is often neglected. In this paper, we propose to use transfer learning as a tool for utilizing information from other electricity price markets for forecasting. We pre-train a neural network model on source markets and finally do a fine-tuning for the target market. Moreover, we test different ways to use the rich input data from various electricity price markets to forecast 24 steps ahead in hourly frequency. Our experiments on four different day-ahead markets indicate that transfer learning improves the electricity price forecasting performance in a statistically significant manner. Furthermore, we compare our results with state-of-the-art methods in a rolling window scheme to demonstrate the performance of the transfer learning approach. Our method improves the performance of the state-of-the-art algorithms by 7\% for the French market and 3\% for the German market.},
|
||||||
|
pages = {100996},
|
||||||
|
journaltitle = {Sustainable Energy, Grids and Networks},
|
||||||
|
shortjournal = {Sustainable Energy, Grids and Networks},
|
||||||
|
author = {Gunduz, Salih and Ugurlu, Umut and Oksuz, Ilkay},
|
||||||
|
urldate = {2024-04-17},
|
||||||
|
date = {2023-06-01},
|
||||||
|
keywords = {Artificial neural networks, Electricity price forecasting, Market integration, Transfer learning},
|
||||||
|
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/BWI5FHS4/S2352467723000048.html:text/html;Submitted Version:/Users/victormylle/Zotero/storage/62FHBWJ8/Gunduz et al. - 2023 - Transfer learning for electricity price forecastin.pdf:application/pdf},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{lago_forecasting_2018,
|
||||||
|
title = {Forecasting spot electricity prices: Deep learning approaches and empirical comparison of traditional algorithms},
|
||||||
|
volume = {221},
|
||||||
|
issn = {0306-2619},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S030626191830196X},
|
||||||
|
doi = {10.1016/j.apenergy.2018.02.069},
|
||||||
|
shorttitle = {Forecasting spot electricity prices},
|
||||||
|
abstract = {In this paper, a novel modeling framework for forecasting electricity prices is proposed. While many predictive models have been already proposed to perform this task, the area of deep learning algorithms remains yet unexplored. To fill this scientific gap, we propose four different deep learning models for predicting electricity prices and we show how they lead to improvements in predictive accuracy. In addition, we also consider that, despite the large number of proposed methods for predicting electricity prices, an extensive benchmark is still missing. To tackle that, we compare and analyze the accuracy of 27 common approaches for electricity price forecasting. Based on the benchmark results, we show how the proposed deep learning models outperform the state-of-the-art methods and obtain results that are statistically significant. Finally, using the same results, we also show that: (i) machine learning methods yield, in general, a better accuracy than statistical models; (ii) moving average terms do not improve the predictive accuracy; (iii) hybrid models do not outperform their simpler counterparts.},
|
||||||
|
pages = {386--405},
|
||||||
|
journaltitle = {Applied Energy},
|
||||||
|
shortjournal = {Applied Energy},
|
||||||
|
author = {Lago, Jesus and De Ridder, Fjo and De Schutter, Bart},
|
||||||
|
urldate = {2024-04-17},
|
||||||
|
date = {2018-07-01},
|
||||||
|
keywords = {Deep learning, Electricity price forecasting, Benchmark study},
|
||||||
|
file = {Full Text:/Users/victormylle/Zotero/storage/SZAAF5RK/Lago et al. - 2018 - Forecasting spot electricity prices Deep learning.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/5JH9JLSM/S030626191830196X.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{weron_electricity_2014,
|
||||||
|
title = {Electricity price forecasting: A review of the state-of-the-art with a look into the future},
|
||||||
|
volume = {30},
|
||||||
|
issn = {0169-2070},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S0169207014001083},
|
||||||
|
doi = {10.1016/j.ijforecast.2014.08.008},
|
||||||
|
shorttitle = {Electricity price forecasting},
|
||||||
|
abstract = {A variety of methods and ideas have been tried for electricity price forecasting ({EPF}) over the last 15 years, with varying degrees of success. This review article aims to explain the complexity of available solutions, their strengths and weaknesses, and the opportunities and threats that the forecasting tools offer or that may be encountered. The paper also looks ahead and speculates on the directions {EPF} will or should take in the next decade or so. In particular, it postulates the need for objective comparative {EPF} studies involving (i) the same datasets, (ii) the same robust error evaluation procedures, and (iii) statistical testing of the significance of one model’s outperformance of another.},
|
||||||
|
pages = {1030--1081},
|
||||||
|
number = {4},
|
||||||
|
journaltitle = {International Journal of Forecasting},
|
||||||
|
shortjournal = {International Journal of Forecasting},
|
||||||
|
author = {Weron, Rafał},
|
||||||
|
urldate = {2024-05-02},
|
||||||
|
date = {2014-10-01},
|
||||||
|
keywords = {Autoregression, Day-ahead market, Electricity price forecasting, Factor model, Forecast combination, Neural network, Probabilistic forecast, Seasonality},
|
||||||
|
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/DDGF263F/S0169207014001083.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{poggi_electricity_2023,
|
||||||
|
title = {Electricity Price Forecasting via Statistical and Deep Learning Approaches: The German Case},
|
||||||
|
volume = {3},
|
||||||
|
rights = {http://creativecommons.org/licenses/by/3.0/},
|
||||||
|
issn = {2673-9909},
|
||||||
|
url = {https://www.mdpi.com/2673-9909/3/2/18},
|
||||||
|
doi = {10.3390/appliedmath3020018},
|
||||||
|
shorttitle = {Electricity Price Forecasting via Statistical and Deep Learning Approaches},
|
||||||
|
abstract = {Our research involves analyzing the latest models used for electricity price forecasting, which include both traditional inferential statistical methods and newer deep learning techniques. Through our analysis of historical data and the use of multiple weekday dummies, we have proposed an innovative solution for forecasting electricity spot prices. This solution involves breaking down the spot price series into two components: a seasonal trend component and a stochastic component. By utilizing this approach, we are able to provide highly accurate predictions for all considered time frames.},
|
||||||
|
pages = {316--342},
|
||||||
|
number = {2},
|
||||||
|
journaltitle = {{AppliedMath}},
|
||||||
|
author = {Poggi, Aurora and Di Persio, Luca and Ehrhardt, Matthias},
|
||||||
|
urldate = {2024-05-02},
|
||||||
|
date = {2023-06},
|
||||||
|
langid = {english},
|
||||||
|
note = {Number: 2
|
||||||
|
Publisher: Multidisciplinary Digital Publishing Institute},
|
||||||
|
keywords = {autoregressive, deep learning, electricity price forecasting, machine learning, neural network, statistical method, univariate model},
|
||||||
|
file = {Full Text PDF:/Users/victormylle/Zotero/storage/3IR29RU3/Poggi et al. - 2023 - Electricity Price Forecasting via Statistical and .pdf:application/pdf},
|
||||||
|
}
|
||||||
@@ -1,22 +1,37 @@
|
|||||||
\relax
|
\relax
|
||||||
\providecommand\hyper@newdestlabel[2]{}
|
\providecommand\hyper@newdestlabel[2]{}
|
||||||
\@writefile{toc}{\contentsline {section}{\numberline {2}Background}{3}{section.2}\protected@file@percent }
|
\@writefile{toc}{\contentsline {section}{\numberline {2}Electricity market}{3}{section.2}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Electricity market}{3}{subsection.2.1}\protected@file@percent }
|
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Overview of the most important parties in the electricity market\relax }}{3}{table.caption.1}\protected@file@percent }
|
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Overview of the most important parties in the electricity market\relax }}{3}{table.caption.1}\protected@file@percent }
|
||||||
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
|
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
|
||||||
\newlabel{tab:parties}{{1}{3}{Overview of the most important parties in the electricity market\relax }{table.caption.1}{}}
|
\newlabel{tab:parties}{{1}{3}{Overview of the most important parties in the electricity market\relax }{table.caption.1}{}}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Prices paid by the BRPs\relax }}{5}{table.caption.2}\protected@file@percent }
|
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Prices paid by the BRPs\relax }}{5}{table.caption.2}\protected@file@percent }
|
||||||
\newlabel{tab:imbalance_price}{{2}{5}{Prices paid by the BRPs\relax }{table.caption.2}{}}
|
\newlabel{tab:imbalance_price}{{2}{5}{Prices paid by the BRPs\relax }{table.caption.2}{}}
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Generative modeling}{5}{subsection.2.2}\protected@file@percent }
|
\@writefile{toc}{\contentsline {section}{\numberline {3}Generative modeling}{5}{section.3}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Diffusion models}{6}{subsection.2.3}\protected@file@percent }
|
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Quantile Regression}{6}{subsection.3.1}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {2.3.1}Overview}{6}{subsubsection.2.3.1}\protected@file@percent }
|
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Example of quantiles\relax }}{6}{figure.caption.3}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {2.3.2}Applications}{7}{subsubsection.2.3.2}\protected@file@percent }
|
\newlabel{fig:quantile_example}{{1}{6}{Example of quantiles\relax }{figure.caption.3}{}}
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {2.3.3}Generation process}{7}{subsubsection.2.3.3}\protected@file@percent }
|
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }}{7}{figure.caption.4}\protected@file@percent }
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Diffusion process}}{8}{figure.caption.3}\protected@file@percent }
|
\newlabel{fig:quantile_regression_example}{{2}{7}{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }{figure.caption.4}{}}
|
||||||
\newlabel{fig:diffusion_process}{{1}{8}{Diffusion process}{figure.caption.3}{}}
|
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{8}{subsection.3.2}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Model Types}{9}{subsection.3.3}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{9}{subsubsection.3.3.1}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{10}{subsubsection.3.3.2}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{10}{subsubsection.3.3.3}\protected@file@percent }
|
||||||
|
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces RNN model input and output visualization\relax }}{11}{figure.caption.5}\protected@file@percent }
|
||||||
|
\newlabel{fig:rnn_model_visualization}{{3}{11}{RNN model input and output visualization\relax }{figure.caption.5}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Diffusion models}{11}{subsection.3.4}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Overview}{11}{subsubsection.3.4.1}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.2}Applications}{12}{subsubsection.3.4.2}\protected@file@percent }
|
||||||
|
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }}{12}{figure.caption.6}\protected@file@percent }
|
||||||
|
\newlabel{fig:diffusion_example}{{4}{12}{Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }{figure.caption.6}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{12}{subsubsection.3.4.3}\protected@file@percent }
|
||||||
|
\newlabel{fig:diffusion_process}{{\caption@xref {fig:diffusion_process}{ on input line 279}}{14}{Generation process}{figure.caption.7}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Evaluation}{14}{subsection.3.5}\protected@file@percent }
|
||||||
|
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Visualization of the CRPS metric\relax }}{15}{figure.caption.8}\protected@file@percent }
|
||||||
|
\newlabel{fig:crps_visualization}{{5}{15}{Visualization of the CRPS metric\relax }{figure.caption.8}{}}
|
||||||
\@setckpt{sections/background}{
|
\@setckpt{sections/background}{
|
||||||
\setcounter{page}{9}
|
\setcounter{page}{16}
|
||||||
\setcounter{equation}{0}
|
\setcounter{equation}{7}
|
||||||
\setcounter{enumi}{0}
|
\setcounter{enumi}{0}
|
||||||
\setcounter{enumii}{0}
|
\setcounter{enumii}{0}
|
||||||
\setcounter{enumiii}{0}
|
\setcounter{enumiii}{0}
|
||||||
@@ -24,12 +39,12 @@
|
|||||||
\setcounter{footnote}{0}
|
\setcounter{footnote}{0}
|
||||||
\setcounter{mpfootnote}{0}
|
\setcounter{mpfootnote}{0}
|
||||||
\setcounter{part}{0}
|
\setcounter{part}{0}
|
||||||
\setcounter{section}{2}
|
\setcounter{section}{3}
|
||||||
\setcounter{subsection}{3}
|
\setcounter{subsection}{5}
|
||||||
\setcounter{subsubsection}{3}
|
\setcounter{subsubsection}{0}
|
||||||
\setcounter{paragraph}{0}
|
\setcounter{paragraph}{0}
|
||||||
\setcounter{subparagraph}{0}
|
\setcounter{subparagraph}{0}
|
||||||
\setcounter{figure}{1}
|
\setcounter{figure}{5}
|
||||||
\setcounter{table}{2}
|
\setcounter{table}{2}
|
||||||
\setcounter{parentequation}{0}
|
\setcounter{parentequation}{0}
|
||||||
\setcounter{float@type}{4}
|
\setcounter{float@type}{4}
|
||||||
@@ -164,5 +179,5 @@
|
|||||||
\setcounter{section@level}{0}
|
\setcounter{section@level}{0}
|
||||||
\setcounter{Item}{0}
|
\setcounter{Item}{0}
|
||||||
\setcounter{Hfootnote}{0}
|
\setcounter{Hfootnote}{0}
|
||||||
\setcounter{bookmark@seq@number}{8}
|
\setcounter{bookmark@seq@number}{14}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
\section{Background}
|
|
||||||
% Achtergrond informatie
|
% Achtergrond informatie
|
||||||
% Generatief modelleren
|
% Generatief modelleren
|
||||||
% -> enkel forecast is vaak brak -> reinforcement learning is lastig -> generatief modelleren, veel generaties om mee te trainen
|
% -> enkel forecast is vaak brak -> reinforcement learning is lastig -> generatief modelleren, veel generaties om mee te trainen
|
||||||
@@ -6,7 +5,7 @@
|
|||||||
% - Achtergrond Generatief modelleren (van NRV)
|
% - Achtergrond Generatief modelleren (van NRV)
|
||||||
% - TODO: Achtergrond RNN?
|
% - TODO: Achtergrond RNN?
|
||||||
|
|
||||||
\subsection{Electricity market}
|
\section{Electricity market}
|
||||||
The electricity market consists of many different parties who all work together and want to make a profit in the end. An overview of the most important parties can be found in Table \ref{tab:parties}.
|
The electricity market consists of many different parties who all work together and want to make a profit in the end. An overview of the most important parties can be found in Table \ref{tab:parties}.
|
||||||
|
|
||||||
% table
|
% table
|
||||||
@@ -35,25 +34,25 @@ The electricity market consists of many different parties who all work together
|
|||||||
\end{table}
|
\end{table}
|
||||||
|
|
||||||
Elia, the Transmission system operator (TSO) in Belgium is responsible for keeping the grid stable. They do this by balancing the electricity consumption and generation. If there is an imbalance, Elia will use reserves to balance the grid. These reserves are expensive and are paid by the market participants. The prices paid for the activations of these reserves is called the imbalance price. Keeping the grid balanced is a very important but also a very difficult task. If the grid is not balanced, it can lead to blackouts but also other problems like damage to equipment and so on.
|
Elia, the Transmission system operator (TSO) in Belgium is responsible for keeping the grid stable. They do this by balancing the electricity consumption and generation. If there is an imbalance, Elia will use reserves to balance the grid. These reserves are expensive and are paid by the market participants. The prices paid for the activations of these reserves is called the imbalance price. Keeping the grid balanced is a very important but also a very difficult task. If the grid is not balanced, it can lead to blackouts but also other problems like damage to equipment and so on.
|
||||||
\\\\
|
|
||||||
Balance responsible parties (BRPs) forecast the electricity consumption and generation of their portfolio to effectively manage the balance between supply and demand within the grid they operate in. They submit a daily balance schedule for their portfolio the day before to the transmission system operator. This consists of the expected physical injections and offtakes from the grid and the commercial power trades. The power trades can be purchases and sales between BRPs or they can even be trades with other countries. BRPs must provide and deploy all reasonable resources to be balanced on a quarter-hourly basis. They can exchange electricity with other BRPs for the following day or the same day. There is one exception where a BRP can deviate from the balance schedule. This is when the grid is not balanced and they can help Elia to stabilize the grid. In this case, they will receive a compensation for their help. When a BRP deviates from the balance schedule in a way that destabilizes the grid, it will need to pay the imbalance price for the deviation.
|
Balance responsible parties (BRPs) forecast the electricity consumption and generation of their portfolio to effectively manage the balance between supply and demand within the grid they operate in. They submit a daily balance schedule for their portfolio the day before to the transmission system operator. This consists of the expected physical injections and offtakes from the grid and the commercial power trades. The power trades can be purchases and sales between BRPs or they can even be trades with other countries. BRPs must provide and deploy all reasonable resources to be balanced on a quarter-hourly basis. They can exchange electricity with other BRPs for the following day or the same day. There is one exception where a BRP can deviate from the balance schedule. This is when the grid is not balanced and they can help Elia to stabilize the grid. In this case, they will receive a compensation for their help. When a BRP deviates from the balance schedule in a way that destabilizes the grid, it will need to pay the imbalance price for the deviation.
|
||||||
\\\\
|
|
||||||
The imbalance price is determined based on which reserves Elia needs to activate to stabilize the grid. The imbalance of a BRP is the quarter-hourly difference between total injections and offtakes from the grid. The Net Regulation Volume (NRV) is the net control volume of energy that Elia applies to maintain balance in the Elia control area. The Area Control Error is the current difference between the scheduled values and the actual values of power exchanged in the Belgian control area. The imbalance of the system (SI) is the Area Control Error minus the NRV. Using the System Imbalance, the imbalance price is calculated.
|
The imbalance price is determined based on which reserves Elia needs to activate to stabilize the grid. The imbalance of a BRP is the quarter-hourly difference between total injections and offtakes from the grid. The Net Regulation Volume (NRV) is the net control volume of energy that Elia applies to maintain balance in the Elia control area. The Area Control Error is the current difference between the scheduled values and the actual values of power exchanged in the Belgian control area. The imbalance of the system (SI) is the Area Control Error minus the NRV. Using the System Imbalance, the imbalance price is calculated.
|
||||||
\\\\
|
|
||||||
Elia, the Transmission System Operator (TSO) in Belgium, maintains grid stability by activating three types of reserves, each designed to address specific conditions of imbalance. These reserves are crucial for ensuring that the electricity supply continuously meets the demand, thereby maintaining the frequency within the required operational limits. The reserves include:
|
Elia, the Transmission System Operator (TSO) in Belgium, maintains grid stability by activating three types of reserves, each designed to address specific conditions of imbalance. These reserves are crucial for ensuring that the electricity supply continuously meets the demand, thereby maintaining the frequency within the required operational limits. The reserves include:
|
||||||
|
|
||||||
1) \textbf{Frequency Containment Reserve (FCR)} \\
|
1) \textbf{Frequency Containment Reserve (FCR)} \\
|
||||||
FCR is a reserve that responds automatically to frequency deviations in the grid. The reserve responds automatically in seconds and provides a proportional response to the frequency deviation. Elia must provide a minimal share of this volume within the Belgian control area. This type of volume can also be offered by the BSPs.
|
FCR is a reserve that responds automatically to frequency deviations in the grid. The reserve responds automatically in seconds and provides a proportional response to the frequency deviation. Elia must provide a minimal share of this volume within the Belgian control area. This type of volume can also be offered by the BSPs.
|
||||||
\\\\
|
|
||||||
2) \textbf{Automatic Frequency Restoration Process (aFRR)} \\
|
2) \textbf{Automatic Frequency Restoration Process (aFRR)} \\
|
||||||
aFRR is the second reserve that Elia can activate to restore the frequency to 50Hz. The aFRR is activated when the FCR is not sufficient to restore the frequency. Every 4 seconds, Elia sends a set-point to the BSPs. The BSPs use this set-point to adjust their production or consumption. The BSPs have a 7.5-minute window to activate the full requested energy volume.
|
aFRR is the second reserve that Elia can activate to restore the frequency to 50Hz. The aFRR is activated when the FCR is not sufficient to restore the frequency. Every 4 seconds, Elia sends a set-point to the BSPs. The BSPs use this set-point to adjust their production or consumption. The BSPs have a 7.5-minute window to activate the full requested energy volume.
|
||||||
\\\\
|
|
||||||
3) \textbf{Manual Frequency Restoration (mFRR)} \\
|
3) \textbf{Manual Frequency Restoration (mFRR)} \\
|
||||||
Sometimes the FCR and aFRR are not enough to restore the imbalance between generation and consumption. Elia activates the mFRR manually and the requested energy volume is to be activated in 15 minutes.
|
Sometimes the FCR and aFRR are not enough to restore the imbalance between generation and consumption. Elia activates the mFRR manually and the requested energy volume is to be activated in 15 minutes.
|
||||||
|
|
||||||
The order in which the reserves are activated is as follows: FCR, aFRR and mFRR. BSPs provide bids for the aFRR and mFRR volumes. The provided bids consist of the type (aFRR or mFRR), bid volume (MW), bid price (per MWh) and start price (per MWh).
|
The order in which the reserves are activated is as follows: FCR, aFRR and mFRR. BSPs provide bids for the aFRR and mFRR volumes. The provided bids consist of the type (aFRR or mFRR), bid volume (MW), bid price (per MWh) and start price (per MWh).
|
||||||
The start price is used to cover the costs of starting a unit.
|
The start price is used to cover the costs of starting a unit.
|
||||||
\\\\
|
|
||||||
Elia selects the bids based on the order of activation and then the price. The highest marginal price paid for upward or downward activation determines the imbalance price. This means that the last bid that is activated determines the imbalance price. This price is paid by the BRPs that are not balanced. The imbalance price calculation is shown in Table \ref{tab:imbalance_price}.
|
Elia selects the bids based on the order of activation and then the price. The highest marginal price paid for upward or downward activation determines the imbalance price. This means that the last bid that is activated determines the imbalance price. This price is paid by the BRPs that are not balanced. The imbalance price calculation is shown in Table \ref{tab:imbalance_price}.
|
||||||
|
|
||||||
\begin{table}[h]
|
\begin{table}[h]
|
||||||
@@ -83,13 +82,13 @@ TODO: Add more information about the imbalance price calculation, alpha?
|
|||||||
|
|
||||||
The imbalance price can be reconstructed given the bids of a certain quarter/day and the System Imbalance. During this thesis, the system imbalance is assumed to be almost the same as the Net Regulation Volume. This is a simplification but it is a good approximation. The goal of this thesis is to model the Net Regulation Volume which can then be used to reconstruct the imbalance price and to make decisions on when to buy or sell electricity.
|
The imbalance price can be reconstructed given the bids of a certain quarter/day and the System Imbalance. During this thesis, the system imbalance is assumed to be almost the same as the Net Regulation Volume. This is a simplification but it is a good approximation. The goal of this thesis is to model the Net Regulation Volume which can then be used to reconstruct the imbalance price and to make decisions on when to buy or sell electricity.
|
||||||
|
|
||||||
\subsection{Generative modeling}
|
\section{Generative modeling}
|
||||||
Simple forecasting of the NRV is often not accurate and defining a policy using this forecast will lead to wrong decisions. A better method would be to try to model the NRV and sample multiple generations of the NRV. This should give better predictions and confidence intervals can be calculated from these.
|
Simple forecasting of the NRV is often not accurate and defining a policy using this forecast will lead to wrong decisions. A better method would be to try to model the NRV and sample multiple generations of the NRV for a whole day. This can give a better understanding of the uncertainty of the NRV. Better decisions can then be made based on multiple generations of the NRV.
|
||||||
\\\\
|
|
||||||
Generative modeling is a type of machine learning that is used to generate new data samples that look like the training data. The goal of generative modeling is to learn the true data distribution and use this distribution to generate new samples. Generative modeling is used in many different fields including image generation, text generation etc.
|
Generative modeling is a type of machine learning that is used to generate new data samples that look like the training data. The goal of generative modeling is to learn the true data distribution and use this distribution to generate new samples. Generative modeling is used in many different fields including image generation, text generation etc.
|
||||||
\\\\
|
|
||||||
In this thesis, generative modeling can be used to model the NRV of the Belgian electricity market using different conditional input features like the weather, the load forecast etc. The model can then be used to generate new samples of the NRV.
|
In this thesis, generative modeling can be used to model the NRV of the Belgian electricity market using different conditional input features like the weather, the load forecast etc. The model can then be used to generate new samples of the NRV.
|
||||||
\\\\
|
|
||||||
There exist many different types of generative models. Some of the most popular ones are:
|
There exist many different types of generative models. Some of the most popular ones are:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Generative Adversarial Networks (GANs)
|
\item Generative Adversarial Networks (GANs)
|
||||||
@@ -98,9 +97,124 @@ There exist many different types of generative models. Some of the most popular
|
|||||||
\item Diffusion models
|
\item Diffusion models
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
In this thesis, autoregressive models will be used to model the NRV. Autoregressive models are models that predict the next value in a sequence based on the previous values. The model can be trained to predict the next value in the NRV sequence based on the previous values of the NRV, the weather, the load forecast etc. Using this method, the model will always generate the same sequence of values given the same input features. Instead of using the autoregressive model to predict the next value in the sequence, the model can also be trained to predict the distribution of the next value. This way, the model can generate multiple generations of the NRV given the same input features. For example, Quantile Regression can be used to predict the distribution of the next value in the sequence.
|
\subsection{Quantile Regression}
|
||||||
\\\\
|
Another method can be used to use any feedforward neural network as a generative model. This method is called quantile regression. This method enables the model to output values to reconstruct the distribution of the target variable instead of a single value for a quarter. This distribution can then be used to sample the NRV value for a quarter. The sampling allows for multiple full-day generations of the NRV.
|
||||||
In this thesis, the utilization of diffusion models is also explored. Diffusion models are a type of generative model that can be used to generate new data samples that follow the distribution of the input data set. Using a structured training process, diffusion models learn to reverse a diffusion process. Starting from a random noise distribution, the model learns to transform the noise into a sample from the data distribution using multiple denoising steps.
|
|
||||||
|
When quantile regression is used, the model outputs the values for multiple quantiles for the target value of a certain quarter. A quantile is a statistical value of a random variable below which a certain proportion of observations fall. Figure \ref{fig:quantile_example} shows the cumulative distribution function of a normal distribution. The figure shows the 25th, 50th and 75th quantiles. The 25th quantile is the value below which 25\% of the observations fall. In the example, this value is -0.67. The other quantiles work in the same way.
|
||||||
|
|
||||||
|
\begin{figure}[H]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=0.8\textwidth]{images/quantile_regression/cdf_quantiles_example.png}
|
||||||
|
\caption{Example of quantiles}
|
||||||
|
\label{fig:quantile_example}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
|
Using the outputted quantiles, the cumulative distribution function can be reconstructed and used to sample the NRV value for the quarter to predict. An example of the output of a quantile regression model is shown in figure \ref{fig:quantile_regression_example}. The output values of the different quantiles are plotted and these are interpolated to get the cumulative distribution function. In this thesis, the quantiles used are 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These are chosen to get a good approximation of the cumulative distribution function. More quantiles at the tails of the distribution are used because the edges of the distribution are more important for the imbalance price calculation. The outputted quantile values are then interpolated using cubic spline and samples can be drawn from the reconstructed cumulative distribution function.
|
||||||
|
|
||||||
|
TODO: figure goes under 0, maybe use other values or other interpolation? + inverse the values to real values
|
||||||
|
\begin{figure}[H]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=0.8\textwidth]{images/quantile_regression/reconstructed_cdf.png}
|
||||||
|
\caption{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.}
|
||||||
|
\label{fig:quantile_regression_example}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
|
The NRV value for a quarter can be sampled from the reconstructed cumulative distribution function. A full-day prediction for the NRV exists of 96 values. This means 96 cumulative distributions need to be reconstructed and samples need to be drawn from each of the distributions.
|
||||||
|
|
||||||
|
The quantile regression model is trained using the pinball loss function, also known as the quantile loss. The model outputs the quantile values for the NRV. The quantile values themselves are not available in the training data. Only the real NRV values are known. The loss function is defined as:
|
||||||
|
\begin{equation}
|
||||||
|
L_\tau(y, \hat{y}) = \begin{cases}
|
||||||
|
\tau(y - \hat{y}) & \text{if } y \geq \hat{y} \\
|
||||||
|
(1 - \tau)(\hat{y} - y) & \text{if } y < \hat{y}
|
||||||
|
\end{cases}
|
||||||
|
\end{equation}
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
\tau & = \text{Quantile of interest} \\
|
||||||
|
y & = \text{Actual observed value of NRV} \\
|
||||||
|
\hat{y} & = \text{Predicted quantile value of NRV} \\
|
||||||
|
\end{align*}
|
||||||
|
The loss function works by penalizing underestimation and overestimation differently. When a quantile is predicted that is lower than or equal to the actual value, the loss is calculated as the difference between the actual value and the predicted quantile value multiplied by the quantile of interest. This means that underestimations for high quantiles are penalized higher than for lower quantiles.
|
||||||
|
|
||||||
|
When the quantile value prediction is higher than the real NRV value, the loss is calculated as the difference between the predicted quantile value and the real NRV multiplied by $(1-\tau)$. This means that overestimations are penalized less for high quantiles of interest.
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
L = \frac{1}{N} \sum_{i=1}^{N} \sum_{\tau \in T} L_\tau(y_i, \hat{y}_i)
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
N & = \text{Number of samples} \\
|
||||||
|
T & = \text{Quantiles of interest} \\
|
||||||
|
y_i & = \text{Actual observed value of NRV for sample i} \\
|
||||||
|
\hat{y}_i & = \text{Predicted quantile value of NRV for sample i} \\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
To calculate the pinball loss, the mean over the quantiles of interest and samples need to be taken. This gives a scalar loss value which can be used to do backpropagation. The lower this value, the better the NRV distribution is modeled.
|
||||||
|
|
||||||
|
\subsection{Autoregressive vs Non-Autoregressive models}
|
||||||
|
Two types of generative models exist, autoregressive and non-autoregressive models. Autoregressive models generate samples by sampling from the model one step at a time. The model generates the next value based on the previous values. This means that the model generates samples sequentially. Non-autoregressive models on the other hand generate samples in one step. The model generates the whole sample existing of multiple values at once. This means that the model can generate samples in parallel which can be done way faster than autoregressive models. The downside of non-autoregressive models is that the model itself is more complex and harder to train. It needs to predict all values at once which can be harder than predicting one value at a time.
|
||||||
|
|
||||||
|
The quantile regression method can be used with both types of models. The autoregressive model will only output the quantiles for the next quarter based on the given input features. The cumulative distribution function can be reconstructed from these and be used to sample the NRV value. To obtain a full-day sample, the model needs to be run 96 times sequentially. The sample for the next quarter depends on the sample of the previous quarter.
|
||||||
|
|
||||||
|
The non-autoregressive model will output the quantiles for all quarters of the day based on the input features. The cumulative distribution functions all need to be reconstructed and samples can be drawn from each of the distributions. When sampling from the distributions at once, the samples are independent of each other. The sample for the next quarter does not depend on the sample of the previous quarter which can result in some unrealistic samples.
|
||||||
|
|
||||||
|
\subsection{Model Types}
|
||||||
|
\subsubsection{Linear Model}
|
||||||
|
A simple linear model can be used as a baseline to compare the more complex models. This model assumes a linear relation exists between the input features and the output. The relationship is modeled using the following formula:
|
||||||
|
\begin{equation}
|
||||||
|
y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_n x_n
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
y & = \text{Output value} \\
|
||||||
|
\beta_0 & = \text{Intercept} \\
|
||||||
|
\beta_1, ..., \beta_n & = \text{Coefficients} \\
|
||||||
|
x_1, ..., x_n & = \text{Input features} \\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
This model needs to be adapted to be used for quantile regression. The model needs to output the quantiles for the target value. This can be done by training multiple linear models for each of the quantiles. The model can be trained using the pinball loss function. The number of parameters in this model is quite low which makes it easier and faster to train. The downside of this model is that it is very simple and might not be able to capture the complexity of the data. The number of parameters of this model is $\text{number of quantiles} \times (\text{number of input features} + 1)$.
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
\hat{y}_\tau = \beta_{0, \tau} + \beta_{1, \tau} x_1 + \beta_{2, \tau} x_2 + ... + \beta_{n, \tau} x_n
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
\tau & = \text{Quantile of interest} \\
|
||||||
|
\hat{y}_\tau & = \text{Predicted quantile value for the target value} \\
|
||||||
|
\beta_{0, \tau} & = \text{Intercept for the quantile of interest} \\
|
||||||
|
\beta_{1, \tau}, ..., \beta_{n, \tau} & = \text{Coefficients for the quantile of interest} \\
|
||||||
|
x_1, ..., x_n & = \text{Input features} \\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
\subsubsection{Non-Linear Model}
|
||||||
|
A more complex model can be used to model the NRV. A feedforward neural network with multiple hidden layers and activation functions can be used. This model can then capture the non-linear relationships between the input features and the output. This model has more parameters and is harder to train than the linear model. The non-linear model also has some hyperparameters that need to be chosen like the number of hidden layers, the number of neurons in each layer, the activation function etc. The model can be trained to output the quantiles for the NRV based on the input features. The same pinball loss function can be used to train the model.
|
||||||
|
|
||||||
|
\subsubsection{Recurrent Neural Network (RNN)}
|
||||||
|
Another more complex model that can be used is a Recurrent Neural Network (RNN). The RNN can be used to model the NRV data because of the sequential nature of the input features. The RNN keeps a hidden state that is updated at every time step using the new input data. The hidden state contains information about the previous time steps and can be used to make predictions for the next time step. These models are used in multiple fields like natural language processing, time series forecasting etc.
|
||||||
|
|
||||||
|
The RNN model can be used to model the NRV data. The input features are structured in a way that the model can learn the sequential patterns in the data. The model can be trained to output the quantiles for the NRV based on the input features using the pinball loss function.
|
||||||
|
|
||||||
|
Multiple types of RNN models exist. The two most common types of RNNs are the Long Short-Term Memory (LSTM) and the Gated Recurrent Unit (GRU). The GRU is a simpler version of the LSTM. The GRU has fewer parameters which results in faster training times. The GRU still can capture long-term dependencies in the data and can achieve similar performance to the LSTM.
|
||||||
|
|
||||||
|
\begin{figure}[H]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=0.8\textwidth]{images/quantile_regression/rnn/RNN_diagram.png}
|
||||||
|
\caption{RNN model input and output visualization}
|
||||||
|
\label{fig:rnn_model_visualization}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
|
The input features for the RNN model are carefully structured to capture the relevant information from the previous quarters and the forecasted values. Each input feature vector represents a quarter and consists of the following components:
|
||||||
|
|
||||||
|
\begin{itemize}
|
||||||
|
\item The actual NRV value from the current quarter (T-1), which provides the model with the historical context of the NRV.
|
||||||
|
\item The forecasted or real values for the next quarter (T), including load, PV, wind, and net position. If the next quarter is not the quarter to predict, the real values for that quarter are used. If the next quarter is the quarter to predict, the forecasted values are used.
|
||||||
|
\item A quarter embedding vector representing the current quarter (T-1). The embedding vector gives the model information about the time of day, which can help it learn the daily patterns in the NRV data.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
The input feature structure is designed to provide the model with a comprehensive view of the previous quarters and the forecasted values for the current quarter. By incorporating both historical and forecasted information sequentially, the model can learn to predict the NRV quantiles for the next quarter more accurately.
|
||||||
|
|
||||||
\subsection{Diffusion models}
|
\subsection{Diffusion models}
|
||||||
TODO: reference the paper
|
TODO: reference the paper
|
||||||
@@ -110,32 +224,49 @@ Diffusion models are a type of probabilistic model designed to generate high-qua
|
|||||||
|
|
||||||
\subsubsection{Applications}
|
\subsubsection{Applications}
|
||||||
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
|
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
|
||||||
\\\\
|
|
||||||
This method can also be applied to other fields like audio generation, text generation etc. In this thesis, diffusion models are explored to model time series data conditioned on additional information.
|
This method can also be applied to other fields like audio generation, text generation etc. In this thesis, diffusion models are explored to model time series data conditioned on additional information. A small example of the diffusion process is shown in Figure \ref{fig:diffusion_example}. An image of a cat is generated by starting from noise and iteratively denoising the image.
|
||||||
|
|
||||||
|
\begin{figure}[H]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=0.8\textwidth]{images/diffusion/Generation-with-Diffusion-Models.png}
|
||||||
|
\caption{Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.}
|
||||||
|
\label{fig:diffusion_example}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
\subsubsection{Generation process}
|
\subsubsection{Generation process}
|
||||||
The generation process is quite different in comparison to other models. For example, GANs and VAE generate samples by sampling from a noise distribution and then transforming the noise into a sample that looks like the training data in one step using a generator network. Diffusion models generate samples by starting from a noise distribution and then applying a series of denoising steps to the noise. The diffusion process consists of 3 main components: the forward process, the reverse process and the sampling process.
|
The generation process is quite different in comparison to other models. For example, GANs and VAE generate samples by sampling from a noise distribution and then transforming the noise into a sample that looks like the training data in one step using a generator network. Diffusion models generate samples by starting from a noise distribution and then applying a series of denoising steps to the noise. The diffusion process consists of 3 main components: the forward process, the reverse process and the sampling process.
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item \textbf{Forward process} \\
|
\item \textbf{Forward process} \\
|
||||||
During this process, Gaussian noise is added to the data in each of the T time steps according to a variance schedule $\beta_1, ..., \beta_T$. \\\\
|
This forward process is a Markov chain that starts from the data and applies a series of diffusion steps to the data. During this process, Gaussian noise is added to the data in each of the T time steps according to a variance schedule $\beta_1, ..., \beta_T$.
|
||||||
|
|
||||||
$q(\mathbf{x}_{1:T}|\mathbf{x}_0) \coloneqq \prod_{t=1}^{T} q(\mathbf{x}_t|\mathbf{x}_{t-1}) \quad$ with $\quad q(\mathbf{x}_t|\mathbf{x}_{t-1}) \coloneqq \mathcal{N}(\mathbf{x}_t; \sqrt{1-\beta_t}\mathbf{x}_{t-1}, \beta_t\mathbf{I})$
|
$q(\mathbf{x}_{1:T}|\mathbf{x}_0) \coloneqq \prod_{t=1}^{T} q(\mathbf{x}_t|\mathbf{x}_{t-1}) \quad$ with $\quad q(\mathbf{x}_t|\mathbf{x}_{t-1}) \coloneqq \mathcal{N}(\mathbf{x}_t; \sqrt{1-\beta_t}\mathbf{x}_{t-1}, \beta_t\mathbf{I})$
|
||||||
\\\\
|
|
||||||
|
|
||||||
This formula shows that the noisy data distribution after T diffusion steps is the product of the transition probabilities at each step t. The noise added in each time step is a Gaussian distribution with mean $\sqrt{1-\beta_t}\mathbf{x}_{t-1}$ and variance $\beta_t\mathbf{I}$. The variance schedule $\beta_1, ..., \beta_T$ is a hyperparameter that needs to be chosen or optimized during training.
|
This formula shows that the noisy data distribution after T diffusion steps is the product of the transition probabilities at each step t. The noise added in each time step is a Gaussian distribution with mean $\sqrt{1-\beta_t}\mathbf{x}_{t-1}$ and variance $\beta_t\mathbf{I}$. The variance schedule $\beta_1, ..., \beta_T$ is a hyperparameter that needs to be chosen or optimized during training.
|
||||||
|
|
||||||
\item \textbf{Reverse process} \\
|
\item \textbf{Reverse process} \\
|
||||||
The diffusion process must then be reversed. The model is trained to model the noise distribution given the data and timestep. \\\\
|
The diffusion process must then be reversed. The model is trained to model the noise distribution given the data and timestep.
|
||||||
|
|
||||||
$p_{\theta}(\mathbf{x}_{0:T}) \coloneqq p(\mathbf{x}_T) \prod_{t=1}^{T} p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t) \quad$ with $\quad p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t) \coloneqq \mathcal{N}(\mathbf{x}_{t-1}; \mu_{\theta}(\mathbf{x}_t, t), \Sigma_{\theta}(\mathbf{x}_t, t))$
|
$p_{\theta}(\mathbf{x}_{0:T}) \coloneqq p(\mathbf{x}_T) \prod_{t=1}^{T} p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t) \quad$ with $\quad p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t) \coloneqq \mathcal{N}(\mathbf{x}_{t-1}; \mu_{\theta}(\mathbf{x}_t, t), \Sigma_{\theta}(\mathbf{x}_t, t))$
|
||||||
\\\\
|
|
||||||
|
|
||||||
In the reverse process, each step aims to undo the diffusion by estimating what the previous, less noisy state might have been. This is done using a series of conditional Gaussian distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$. For each of these Gaussians, a neural network with parameters $\theta$ is used to estimate the mean $\mu_{\theta}(\mathbf{x}_t, t)$ and the covariance $\Sigma_{\theta}(\mathbf{x}_t, t)$ of the distribution. The joint distribution $p_{\theta}(\mathbf{x}_{0:T})$ is then the product the marginal distribution of the last timestep $p(\mathbf{x}_T)$ and the conditional distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$ for each timestep.
|
In the reverse process, each step aims to undo the diffusion by estimating what the previous, less noisy state might have been. This is done using a series of conditional Gaussian distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$. For each of these Gaussians, a neural network with parameters $\theta$ is used to estimate the mean $\mu_{\theta}(\mathbf{x}_t, t)$ and the covariance $\Sigma_{\theta}(\mathbf{x}_t, t)$ of the distribution. The joint distribution $p_{\theta}(\mathbf{x}_{0:T})$ is then the product the marginal distribution of the last timestep $p(\mathbf{x}_T)$ and the conditional distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$ for each timestep.
|
||||||
|
|
||||||
\item \textbf{Training} \\
|
\item \textbf{Training} \\
|
||||||
The model training is done by optimizing the variational bound of the negative log-likelihood. This is also called the evidence lower bound (ELBO) in the context of generative models. \\\\
|
TODO: explain better! \\
|
||||||
TODO: add formula and explain?
|
The model training is done by optimizing the variational bound of the negative log-likelihood. This is also called the evidence lower bound (ELBO) in the context of generative models.
|
||||||
|
\begin{align*}
|
||||||
|
\log p(x) \geq & \mathbb{E}_q \left[ \log p_{\theta} (x_0 | x_1) | x_1 , x_0 \right] \\
|
||||||
|
& - D_{KL} \left( q(x_T | x_0) || p(x_T) \right) \\
|
||||||
|
& - \sum_{t=2}^{T} \mathbb{E}_q \left[ D_{KL} \left( q(x_{t-1} | x_t, x_0) || p_{\theta}(x_{t-1} | x_t) \right) | x_t, x_0 \right] \\
|
||||||
|
= & L_0 - L_T - \sum_{t=2}^{T} L_{t-1}
|
||||||
|
\end{align*}
|
||||||
|
The formula shows that maximizing the likelihood can be done by minimizing the KL divergence between the noise distribution and the data distribution for each timestep. After a lot of math, it can be proven that this can be simplified further to minimize the mean squared error between the predicted noise by the model and the actual noise added in each timestep.
|
||||||
|
|
||||||
\item \textbf{Conditioning} \\
|
\item \textbf{Conditioning} \\
|
||||||
The model can be conditioned on additional information. This can be used to guide the generation process. In the context of image generation, this can be used to generate images of a certain class or with certain attributes. This requires some changes in the model architecture and training process.
|
The model can be conditioned on additional information. This can be used to guide the generation process. In the context of image generation, this can be used to generate images of a certain class or with certain attributes. This requires some changes in the model architecture and training process. A simple way to condition the model is to add additional information to the input of the model. This can be done by concatenating the additional information to the input of the model. The model can then learn to generate samples that follow the distribution of the data conditioned on the additional information.
|
||||||
TODO: add more information about conditioning
|
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
The diffusion process can be seen in Figure \ref{fig:diffusion_process}. The model is trained to reverse this process. Starting from the noise, the model learns to generate samples that look like the data.
|
The diffusion process can be seen in Figure \ref{fig:diffusion_process}. The model is trained to reverse this process. Starting from the noise, the model learns to generate samples that look like the data.
|
||||||
@@ -144,8 +275,52 @@ The diffusion process can be seen in Figure \ref{fig:diffusion_process}. The mod
|
|||||||
\centering
|
\centering
|
||||||
\includegraphics[width=0.8\textwidth]{images/diffusion/diffusion_graphical_model.png}
|
\includegraphics[width=0.8\textwidth]{images/diffusion/diffusion_graphical_model.png}
|
||||||
TODO: fix citation
|
TODO: fix citation
|
||||||
\caption[Diffusion process]{Diffusion process (adapted from \cite{ho2020denoising}).}
|
%\caption[Diffusion process]{Diffusion process (adapted from \cite{ho2020denoising}).}
|
||||||
\label{fig:diffusion_process}
|
\label{fig:diffusion_process}
|
||||||
\end{figure}
|
\end{figure}
|
||||||
|
|
||||||
|
\subsection{Evaluation}
|
||||||
|
To evaluate the performance of the quantile regression models, multiple metrics can be used. The pinball loss itself can be used to compare models on the test set. Other metrics that can be used are the mean absolute error (MAE) and the mean squared error (MSE). This can be done by generating multiple full-day NRV samples for each day of the test set and calculating the error metrics for each of the samples. The mean can then be taken over the different samples to get a single value for the error metrics.
|
||||||
|
|
||||||
|
MAE does not consider the direction of the error. It is the average of the absolute differences between the predicted and actual values. The formula in our case with full-day NRV samples is:
|
||||||
|
\begin{equation}
|
||||||
|
MAE = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{96} \sum_{j=1}^{96} |y_{ij} - \hat{y}_{ij}|
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
N & = \text{Number of samples} \\
|
||||||
|
y_{ij} & = \text{Actual observed value of NRV for sample i and quarter j} \\
|
||||||
|
\hat{y}_{ij} & = \text{Sampled value of NRV for sample i and quarter j} \\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
MSE is more sensitive to outliers than MAE because it squares the error between the predicted and actual values. The formula in our case with full-day NRV samples is:
|
||||||
|
\begin{equation}
|
||||||
|
MSE = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{96} \sum_{j=1}^{96} (y_{ij} - \hat{y}_{ij})^2
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
The MAE and MSE metrics do not compare the distribution of the NRV to the real NRV value but only take into account the sampled values. Evaluating the outputted distribution for the NRV must be done differently. The Continuous Ranked Probability Score (CRPS) can be used to evaluate the distribution to the real NRV value. The CRPS metric is used to evaluate the accuracy of the predicted cumulative distribution function. The CRPS can be seen as a generalization of the MAE for probabilistic forecasts. The formula for the CRPS is:
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
CRPS(F, x) = \int_{-\infty}^{\infty} (F(y) - \mathbbm{1}(y - x))^2 \, dy
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\textbf{Where:} \\
|
||||||
|
F & = \text{Predicted cumulative distribution function} \\
|
||||||
|
x & = \text{Real NRV value} \\
|
||||||
|
\mathbbm{1}(x) & = \text{Heavyside function} = \begin{cases}
|
||||||
|
1 & \text{if } x \geq 0 \\
|
||||||
|
0 & \text{if } x < 0
|
||||||
|
\end{cases} \\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
The mean CRPS can be calculated over the different days to get a single value. The lower this value, the better the NRV is modeled. The CRPS metric can be visualized as shown in figure \ref{fig:crps_visualization}. The CRPS is the area between the predicted cumulative distribution function and the Heavyside function. The lower the area between the curves, the better the NRV is modeled.
|
||||||
|
|
||||||
|
TODO: improve visualisation? -> echte NRV + y as cummulative prob
|
||||||
|
\begin{figure}[H]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=0.8\textwidth]{images/quantile_regression/crps_visualization.png}
|
||||||
|
\caption{Visualization of the CRPS metric}
|
||||||
|
\label{fig:crps_visualization}
|
||||||
|
\end{figure}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
\relax
|
\relax
|
||||||
\providecommand\hyper@newdestlabel[2]{}
|
\providecommand\hyper@newdestlabel[2]{}
|
||||||
\@writefile{toc}{\contentsline {section}{\numberline {3}Literature Study}{9}{section.3}\protected@file@percent }
|
\@writefile{toc}{\contentsline {section}{\numberline {5}Literature Study}{18}{section.5}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Electricity Price Forecasting}{18}{subsection.5.1}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{19}{subsection.5.2}\protected@file@percent }
|
||||||
\@setckpt{sections/literature_study}{
|
\@setckpt{sections/literature_study}{
|
||||||
\setcounter{page}{10}
|
\setcounter{page}{20}
|
||||||
\setcounter{equation}{0}
|
\setcounter{equation}{7}
|
||||||
\setcounter{enumi}{0}
|
\setcounter{enumi}{0}
|
||||||
\setcounter{enumii}{0}
|
\setcounter{enumii}{0}
|
||||||
\setcounter{enumiii}{0}
|
\setcounter{enumiii}{0}
|
||||||
@@ -11,12 +13,12 @@
|
|||||||
\setcounter{footnote}{0}
|
\setcounter{footnote}{0}
|
||||||
\setcounter{mpfootnote}{0}
|
\setcounter{mpfootnote}{0}
|
||||||
\setcounter{part}{0}
|
\setcounter{part}{0}
|
||||||
\setcounter{section}{3}
|
\setcounter{section}{5}
|
||||||
\setcounter{subsection}{0}
|
\setcounter{subsection}{2}
|
||||||
\setcounter{subsubsection}{0}
|
\setcounter{subsubsection}{0}
|
||||||
\setcounter{paragraph}{0}
|
\setcounter{paragraph}{0}
|
||||||
\setcounter{subparagraph}{0}
|
\setcounter{subparagraph}{0}
|
||||||
\setcounter{figure}{1}
|
\setcounter{figure}{5}
|
||||||
\setcounter{table}{2}
|
\setcounter{table}{2}
|
||||||
\setcounter{parentequation}{0}
|
\setcounter{parentequation}{0}
|
||||||
\setcounter{float@type}{4}
|
\setcounter{float@type}{4}
|
||||||
@@ -33,7 +35,7 @@
|
|||||||
\setcounter{citetotal}{0}
|
\setcounter{citetotal}{0}
|
||||||
\setcounter{multicitecount}{0}
|
\setcounter{multicitecount}{0}
|
||||||
\setcounter{multicitetotal}{0}
|
\setcounter{multicitetotal}{0}
|
||||||
\setcounter{instcount}{0}
|
\setcounter{instcount}{5}
|
||||||
\setcounter{maxnames}{2}
|
\setcounter{maxnames}{2}
|
||||||
\setcounter{minnames}{1}
|
\setcounter{minnames}{1}
|
||||||
\setcounter{maxitems}{999}
|
\setcounter{maxitems}{999}
|
||||||
@@ -151,5 +153,5 @@
|
|||||||
\setcounter{section@level}{0}
|
\setcounter{section@level}{0}
|
||||||
\setcounter{Item}{0}
|
\setcounter{Item}{0}
|
||||||
\setcounter{Hfootnote}{0}
|
\setcounter{Hfootnote}{0}
|
||||||
\setcounter{bookmark@seq@number}{9}
|
\setcounter{bookmark@seq@number}{20}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,15 @@
|
|||||||
\section{Literature Study}
|
\section{Literature Study}
|
||||||
% - Literatuur forecasting imbalance price
|
% - Literatuur forecasting imbalance price
|
||||||
% - Literatuur policies adhv forecasts
|
% - Literatuur policies adhv forecasts
|
||||||
Forecasting the electricity price is a challenging task that has been researched extensively. Knowing the future electricity price is crucial for market participants to make informed decisions and optimize their operations and profit.
|
|
||||||
|
|
||||||
|
\subsection{Electricity Price Forecasting}
|
||||||
|
Forecasting the electricity price is a challenging task that has been researched extensively. Knowing the future electricity price is crucial for market participants to make informed decisions and optimize their operations and profit. Already since the early 2000s, researchers have been trying to predict the electricity price. The first models were based on time series analysis, but with the rise of machine learning, more advanced models have been developed. A rise in publications on this topic can be observed since 2005. This is described in the literature review by \parencite{weron_electricity_2014}. An overview is given of the evolution of the methods used for electricity price forecasting. A significant shift can be observed towards integrating machine learning techniques with traditional statistical methods. The earliest models were based on time series analysis involving methods like autoregression, moving averages and their combinations (ARMA, ARIMA). These methods are not always able to capture the complex patterns in the electricity price. Therefore, researchers started to use more advanced models like neural networks, support vector machines, and random forests. The combination of statistical and machine learning models is more accurate. The statistical models are used to capture the linear patterns, while the machine learning models are used to capture the more complex non-linear patterns. This results in a more accurate and robust model. The more recent paper \parencite{poggi_electricity_2023} compares the performance of statistical and machine learning methods for electricity price forecasting. The authors use ARIMA and SARIMA as statistical methods and XGBoost as a machine learning method. They also compare the performance of Long Short-Term Memory (LSTM) networks for electricity price forecasting.
|
||||||
|
|
||||||
|
Because forecasting the electricity price is a challenging task with a lot of uncertainty, other generative methods to model the electricity price were researched. Generative modeling is a type of unsupervised learning that can be used to generate new samples from the same distribution as the training data. This can be used to generate new electricity price samples. The authors of \parencite{lu_scenarios_2022} use General Adversarial Networks (GANs) to generate new electricity price scenarios. They introduce a deep learning framework called Conditional Time Series Generative Adversarial Networks (CTSGAN) to generate electricity price scenarios. This enhances the traditional forecasting models by allowing the generation of a diverse set of potential future scenarios. This capability allows the modeling of the uncertainty in the electricity price. The authors show that the CTSGAN model outperforms traditional forecasting models in terms of forecasting accuracy. Other generative models like normalizing flows can also be used to generate new electricity price samples. The authors of \parencite{dumas_deep_2022} use normalizing flows to generate new electricity price samples. They show that normalizing flow models for electricity price forecasting are more accurate in quality than other generative models like GANs and Variational Autoencoders (VAEs). Not a lot of research has been done on using diffusion models for electricity price forecasting. The authors of \parencite{rasul_autoregressive_2021}, however, show that autoregressive diffusion models can be used for time series forecasting and achieve good results. They apply the model on multiple datasets which includes an electricity price dataset. The use of diffusion models for NRV modeling is further explored in this thesis.
|
||||||
|
|
||||||
|
Most research on forecasting for the electricity market focuses on the electricity price for consumers. Another important aspect of the electricity market is the imbalance price. Not many papers have been published on forecasting the imbalance price. One paper \parencite{dumas_deep_2022} describes the forecasting of the imbalance price. They do not forecast the price itself but rather forecast the NRV and use this to reconstruct the imbalance price. This approach will also be used in this thesis.
|
||||||
|
|
||||||
|
TODO: more information?
|
||||||
|
|
||||||
|
\subsection{Policies for Battery Optimization}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
\section{NRV Prediction}
|
\section{Results \& Discussion}
|
||||||
As discussed in the background information, the imbalance prices are based on the Net Regulation Volume (NRV). This means that the imbalance prices can be reconstructed from the sampled NRV. Multiple baselines and models will be compared that forecast and model the NRV using different metrics. The data utilized in this thesis is provided by Elia. Elia makes a lot of data public and provides them in quarterly hour or minute intervals. The data used in this thesis is on a quarterly hourly basis. This makes the number of input features and output features way more manageable and makes the training more computationally efficient. A full-day sample of the NRV exists of 96 values. One value for every quarter. Further research could be done using smaller data intervals to see if this improves the models.
|
As discussed in the background information, the imbalance prices are based on the Net Regulation Volume (NRV). This means that the imbalance prices can be reconstructed from the sampled NRV. Multiple baselines and models will be compared that forecast and model the NRV using different metrics. The data utilized in this thesis is provided by Elia. Elia makes a lot of data public and provides them in quarterly hour or minute intervals. The data used in this thesis is on a quarterly hourly basis. This makes the number of input features and output features way more manageable and makes the training more computationally efficient. A full-day sample of the NRV exists of 96 values. One value for every quarter. Further research could be done using smaller data intervals to see if this improves the models.
|
||||||
|
|
||||||
\subsection{Data}
|
\subsection{Data}
|
||||||
@@ -40,129 +40,7 @@ TODO: ask Jonas: add urls to the correct data? via citation?
|
|||||||
|
|
||||||
A lot of data is available but only the most relevant data needs to be used. Experiments will be done to identify which data and features improve the NRV modeling. The data will be split into a training and test set. The training dataset starts depending on which data features are used but ends on 31-12-2022. The test set starts on 01-01-2023 and ends on (TODO: check the end date). This makes sure enough data is available to train the models and the test set is large enough to evaluate the models. The year 2023 is chosen as the test set because it is the most recent data available when the thesis experiments were conducted. Using data from 2022 in the test set also does not make a lot of sense because the trained models would be used to predict the future. Data from 2022 is not relevant anymore to evaluate the models.
|
A lot of data is available but only the most relevant data needs to be used. Experiments will be done to identify which data and features improve the NRV modeling. The data will be split into a training and test set. The training dataset starts depending on which data features are used but ends on 31-12-2022. The test set starts on 01-01-2023 and ends on (TODO: check the end date). This makes sure enough data is available to train the models and the test set is large enough to evaluate the models. The year 2023 is chosen as the test set because it is the most recent data available when the thesis experiments were conducted. Using data from 2022 in the test set also does not make a lot of sense because the trained models would be used to predict the future. Data from 2022 is not relevant anymore to evaluate the models.
|
||||||
|
|
||||||
\subsection{Quantile Regression}
|
|
||||||
% TODO: Talk about the different number and which quantiles are used
|
|
||||||
Forecasting the NRV is very difficult and most of the time not accurate. It is a very volatile time series and is hard to predict. Instead of just forecasting the NRV, a generative model can be trained and used to sample multiple full NRV samples for the next day. Sampling multiple times can give a better understanding of the uncertainty of the NRV. To be able to sample multiple times, a distribution of the NRV is needed. Only one value for the NRV for each quarter is available in the training and test data. There is no information on the distribution of this value.
|
|
||||||
\\\\
|
|
||||||
Quantile regression can be used to model the distribution of the NRV. This is a technique that estimates the conditional quantiles of the target variable. A quantile is a statistical value of a random variable below which a certain proportion of observations fall. Figure \ref{fig:quantile_example} shows the cumulative distribution function of a normal distribution. The figure shows the 25th, 50th and 75th quantiles. The 25th quantile is the value below which 25\% of the observations fall. In the example, this value is -0.67. The other quantiles work in the same way.
|
|
||||||
|
|
||||||
\begin{figure}[H]
|
|
||||||
\centering
|
|
||||||
\includegraphics[width=0.8\textwidth]{images/quantile_regression/cdf_quantiles_example.png}
|
|
||||||
\caption{Example of quantiles}
|
|
||||||
\label{fig:quantile_example}
|
|
||||||
\end{figure}
|
|
||||||
|
|
||||||
Instead of training the model to output the NRV forecast, the model is trained to output the values for different quantiles. The amount and which quantiles can be chosen. Experiments are done to identify what quantiles are the most useful. Using the outputted quantiles, the cumulative distribution function can be reconstructed and used to sample the NRV value for the quarter to predict. An example of the output of a quantile regression model is shown in figure \ref{fig:quantile_regression_example}. The values of the different quantiles are plotted and these are interpolated to get the cumulative distribution function.
|
|
||||||
|
|
||||||
TODO: figure goes under 0, maybe use other values or other interpolation?
|
|
||||||
\begin{figure}[H]
|
|
||||||
\centering
|
|
||||||
\includegraphics[width=0.8\textwidth]{images/quantile_regression/reconstructed_cdf.png}
|
|
||||||
\caption{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.}
|
|
||||||
\label{fig:quantile_regression_example}
|
|
||||||
\end{figure}
|
|
||||||
|
|
||||||
|
|
||||||
TODO: reduce the use of the world NRV and cumulative distribution function
|
|
||||||
The NRV value for a quarter can be sampled from the reconstructed cumulative distribution function. A full-day prediction for the NRV exists of 96 values. The cumulative distribution function which is sampled is only used for a certain quarter.
|
|
||||||
\\\\
|
|
||||||
TODO: Explain non autoregressive and autoregressive models\\\\
|
|
||||||
Two methods exist to sample full-day NRV values. The first method is a non-autoregressive model. This model outputs the quantiles for every quarter. For each quarter, the cumulative distribution function is reconstructed and sampled. The model is conditioned on the NRV timeline of the previous day. This consists of 96 values. The second method is an autoregressive model. This model outputs the quantiles for the next quarter for which the NRV distribution is wanted. This model is conditioned on the 96 previous NRV values. When a full-day sample of the NRV is wanted, the model is used recursively. The model predicts the quantiles for the next quarter, the cumulative distribution function is reconstructed and the NRV value is sampled. This value can then be used as input for the next quarter. This process is repeated until a full-day sample is obtained. Autoregressive problems suffer from the problem that errors in the prediction of early quarters, propagate through the model and can lead to larger errors in later quarters.
|
|
||||||
\\\\
|
|
||||||
\subsubsection{Training} \label{subsubsec:quantile_regression_training}
|
|
||||||
The quantile regression model is trained using the pinball loss function, also known as the quantile loss. The model outputs the quantile values for the NRV. The quantile values themselves are not available in the training data. Only the real NRV values are known. The loss function is defined as:
|
|
||||||
\begin{equation}
|
|
||||||
L_\tau(y, \hat{y}) = \begin{cases}
|
|
||||||
\tau(y - \hat{y}) & \text{if } y \geq \hat{y} \\
|
|
||||||
(1 - \tau)(\hat{y} - y) & \text{if } y < \hat{y}
|
|
||||||
\end{cases}
|
|
||||||
\end{equation}
|
|
||||||
\begin{align*}
|
|
||||||
\textbf{Where:} \\
|
|
||||||
\tau & = \text{Quantile of interest} \\
|
|
||||||
y & = \text{Actual observed value of NRV} \\
|
|
||||||
\hat{y} & = \text{Predicted quantile value of NRV} \\
|
|
||||||
\end{align*}
|
|
||||||
The loss function works by penalizing underestimation and overestimation differently. When a quantile is predicted that is lower than or equal to the actual value, the loss is calculated as the difference between the actual value and the predicted quantile value multiplied by the quantile of interest. This means that underestimations for high quantiles are penalized higher than for lower quantiles.
|
|
||||||
\\\\
|
|
||||||
When the quantile value prediction is higher than the real NRV value, the loss is calculated as the difference between the predicted quantile value and the real NRV multiplied by $(1-\tau)$. This means that overestimations are penalized less for high quantiles of interest.
|
|
||||||
|
|
||||||
\begin{equation}
|
|
||||||
L = \frac{1}{N} \sum_{i=1}^{N} \sum_{\tau \in T} L_\tau(y_i, \hat{y}_i)
|
|
||||||
\end{equation}
|
|
||||||
|
|
||||||
\begin{align*}
|
|
||||||
\textbf{Where:} \\
|
|
||||||
N & = \text{Number of samples} \\
|
|
||||||
T & = \text{Quantiles of interest} \\
|
|
||||||
y_i & = \text{Actual observed value of NRV for sample i} \\
|
|
||||||
\hat{y}_i & = \text{Predicted quantile value of NRV for sample i} \\
|
|
||||||
\end{align*}
|
|
||||||
|
|
||||||
To calculate the pinball loss, the mean over the quantiles of interest and samples need to be taken. This gives a scalar loss value which can be used to do backpropagation. The lower this value, the better the NRV distribution is modeled.
|
|
||||||
|
|
||||||
\subsubsection{Evaluation}
|
|
||||||
To evaluate the performance of the quantile regression models, multiple metrics can be used. The pinball loss itself can be used to compare models on the test set. Other metrics that can be used are the mean absolute error (MAE) and the mean squared error (MSE). This can be done by generating multiple full-day NRV samples for each day of the test set and calculating the error metrics for each of the samples. The mean can then be taken over the different samples to get a single value for the error metrics.
|
|
||||||
\\\\
|
|
||||||
MAE does not consider the direction of the error. It is the average of the absolute differences between the predicted and actual values. The formula in our case with full-day NRV samples is:
|
|
||||||
\begin{equation}
|
|
||||||
MAE = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{96} \sum_{j=1}^{96} |y_{ij} - \hat{y}_{ij}|
|
|
||||||
\end{equation}
|
|
||||||
|
|
||||||
\begin{align*}
|
|
||||||
\textbf{Where:} \\
|
|
||||||
N & = \text{Number of samples} \\
|
|
||||||
y_{ij} & = \text{Actual observed value of NRV for sample i and quarter j} \\
|
|
||||||
\hat{y}_{ij} & = \text{Sampled value of NRV for sample i and quarter j} \\
|
|
||||||
\end{align*}
|
|
||||||
|
|
||||||
MSE is more sensitive to outliers than MAE because it squares the error between the predicted and actual values. The formula in our case with full-day NRV samples is:
|
|
||||||
\begin{equation}
|
|
||||||
MSE = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{96} \sum_{j=1}^{96} (y_{ij} - \hat{y}_{ij})^2
|
|
||||||
\end{equation}
|
|
||||||
|
|
||||||
The MAE and MSE metrics do not compare the distribution of the NRV to the real NRV value but only take into account the sampled values. Evaluating the outputted distribution for the NRV must be done differently. The Continuous Ranked Probability Score (CRPS) can be used to evaluate the distribution to the real NRV value. The CRPS metric is used to evaluate the accuracy of the predicted cumulative distribution function. The CRPS can be seen as a generalization of the MAE for probabilistic forecasts. The formula for the CRPS is:
|
|
||||||
|
|
||||||
\begin{equation}
|
|
||||||
CRPS(F, x) = \int_{-\infty}^{\infty} (F(y) - \mathbbm{1}(y - x))^2 \, dy
|
|
||||||
\end{equation}
|
|
||||||
|
|
||||||
\begin{align*}
|
|
||||||
\textbf{Where:} \\
|
|
||||||
F & = \text{Predicted cumulative distribution function} \\
|
|
||||||
x & = \text{Real NRV value} \\
|
|
||||||
\mathbbm{1}(x) & = \text{Heavyside function} = \begin{cases}
|
|
||||||
1 & \text{if } x \geq 0 \\
|
|
||||||
0 & \text{if } x < 0
|
|
||||||
\end{cases} \\
|
|
||||||
\end{align*}
|
|
||||||
|
|
||||||
The mean CRPS can be calculated over the different days to get a single value. The lower this value, the better the NRV is modeled. The CRPS metric can be visualized as shown in figure \ref{fig:crps_visualization}. The CRPS is the area between the predicted cumulative distribution function and the Heavyside function. The lower the area between the curves, the better the NRV is modeled.
|
|
||||||
|
|
||||||
TODO: improve visualisation? -> echte NRV + y as cummulative prob
|
|
||||||
\begin{figure}[H]
|
|
||||||
\centering
|
|
||||||
\includegraphics[width=0.8\textwidth]{images/quantile_regression/crps_visualization.png}
|
|
||||||
\caption{Visualization of the CRPS metric}
|
|
||||||
\label{fig:crps_visualization}
|
|
||||||
\end{figure}
|
|
||||||
|
|
||||||
\subsubsection{Linear Model}
|
\subsubsection{Linear Model}
|
||||||
A linear quantile regression model is used as a baseline. This model is very simple and can be used to compare the more complex models. Starting with a simple autoregressive model, the model is conditioned on the 96 previous NRV values. The model outputs the quantiles for the next quarter. The model is trained using the pinball loss function and the MAE, MSE, CRPS metrics are calculated on the test set. The linear model for the quantile regression is defined as:
|
|
||||||
|
|
||||||
TODO: better equation for linear quantile regression model
|
|
||||||
\begin{equation}
|
|
||||||
Q_{\tau}(Y | \mathbf{X}) = \beta_{0,\tau} + \beta_{1,\tau}x_1 + \beta_{2,\tau}x_2 + \ldots + \beta_{n,\tau}x_n
|
|
||||||
\end{equation}
|
|
||||||
|
|
||||||
where:
|
|
||||||
\begin{itemize}
|
|
||||||
\item \( Q_{\tau}(y | \mathbf{x}) \) is the \( \tau \)-th quantile of the conditional distribution of the NRV
|
|
||||||
\item \( \mathbf{X} \) is the input features (e.g. the 96 previous NRV values)
|
|
||||||
\item \(\beta_{0,\tau}, \beta_{1,\tau}, \beta_{2,\tau}, \ldots, \beta_{n,\tau} \) are the coefficients including the bias
|
|
||||||
\end{itemize}
|
|
||||||
|
|
||||||
% TODO: is it necessary to provide the parameter calculation?
|
% TODO: is it necessary to provide the parameter calculation?
|
||||||
The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is $96 * 13 + 13 = 1261$. The linear model is trained using the Adam optimizer with a learning rate of 1e-4. Early stopping is used with a patience of 5 epochs. Different sets of input features are experimented with and are compared to each other based on the previously mentioned metrics. All results are shown in Table \ref{tab:autoregressive_linear_model_baseline_results}.
|
The linear model outputs the values for the chosen quantiles. The total amount of parameters depends on the input features and the number of chosen quantiles. Assuming the input features are the 96 previous NRV values and 13 quantiles are chosen, the total amount of parameters is $96 * 13 + 13 = 1261$. The linear model is trained using the Adam optimizer with a learning rate of 1e-4. Early stopping is used with a patience of 5 epochs. Different sets of input features are experimented with and are compared to each other based on the previously mentioned metrics. All results are shown in Table \ref{tab:autoregressive_linear_model_baseline_results}.
|
||||||
\\\\
|
\\\\
|
||||||
@@ -189,8 +67,15 @@ NRV + Load + PV + Wind + Net Position & 29034.53 & \textbf{35725.42} & 131.87 &
|
|||||||
\label{tab:autoregressive_linear_model_baseline_results}
|
\label{tab:autoregressive_linear_model_baseline_results}
|
||||||
\end{table}
|
\end{table}
|
||||||
|
|
||||||
The linear model outputs the quantiles for the next quarter based on the given input features. The input features consist of previous history values of a certain feature or forecasts of a certain feature. The model, however, does not know which quarter of the day it is modeling.
|
The linear model outputs the quantiles for the next quarter based on the given input features. The input features consist of previous history values of a certain feature or forecasts of a certain feature. The model, however, does not know which quarter of the day it is modeling. This is important information because as seen in Figure \ref{fig:nrv_mean_std_over_quarter}, the mean and standard deviation of the NRV values change over the day. The model should be able to capture this information given the quarter of the day.
|
||||||
\\\\
|
|
||||||
|
\begin{figure}[ht]
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=\textwidth]{images/quantile_regression/nrv_mean_std_over_quarter.png}
|
||||||
|
\caption{Mean and standard deviation of the NRV values over the quarter of the day}
|
||||||
|
\label{fig:nrv_mean_std_over_quarter}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
Multiple methods exist to provide such information to the model. The quarter of the day can be provided as a one-hot encoded vector. The cyclic nature of the quarter would not be captured using a one-hot encoded vector. The vectors for quarter 0 and quarter 95 would be very different while they should be very close to each other. Other methods exist that do take the cyclic property of the quarter into account. Trigonometric functions can be used to provide the quarter of the day information. The quarter of the day can be mapped to a sine and cosine value which can be used as input features. The sine and cosine values are calculated as follows:
|
Multiple methods exist to provide such information to the model. The quarter of the day can be provided as a one-hot encoded vector. The cyclic nature of the quarter would not be captured using a one-hot encoded vector. The vectors for quarter 0 and quarter 95 would be very different while they should be very close to each other. Other methods exist that do take the cyclic property of the quarter into account. Trigonometric functions can be used to provide the quarter of the day information. The quarter of the day can be mapped to a sine and cosine value which can be used as input features. The sine and cosine values are calculated as follows:
|
||||||
|
|
||||||
\begin{equation}
|
\begin{equation}
|
||||||
@@ -361,8 +246,6 @@ Because of error propagation in the autoregressive model, the outputted quantile
|
|||||||
% TODO: Talk about the over/underestimation of the quantiles for the models. Plots have been made for this.
|
% TODO: Talk about the over/underestimation of the quantiles for the models. Plots have been made for this.
|
||||||
|
|
||||||
\subsubsection{Non-linear Model}
|
\subsubsection{Non-linear Model}
|
||||||
More complex models, such as non-linear neural networks, offer the possibility of capturing more complex patterns in the NRV data. These patterns can include nonlinear relationships between the input features and the NRV. The training and evaluation process for these non-linear models follows the same procedure as that used for the linear model. This ensures the different models can be compared to one another using the same metrics.
|
|
||||||
\\\\
|
|
||||||
In this context, a simple feedforward neural network is trained to predict the quantiles for the NRV. The quantiles are then used to reconstruct the cumulative distribution function (CDF) for the NRV of a quarter. Predictions for the NRV can then be sampled from this reconstructed CDF. The neural network is trained using the pinball loss function explained in section \ref{subsubsec:quantile_regression_training}.
|
In this context, a simple feedforward neural network is trained to predict the quantiles for the NRV. The quantiles are then used to reconstruct the cumulative distribution function (CDF) for the NRV of a quarter. Predictions for the NRV can then be sampled from this reconstructed CDF. The neural network is trained using the pinball loss function explained in section \ref{subsubsec:quantile_regression_training}.
|
||||||
\\\\
|
\\\\
|
||||||
The architecture of the non-linear model is illustrated in Table \ref{tab:non_linear_model_architecture}. The model begins with an input layer that converts the quarter of the day into an embedding. This layer concatenates the other input features with the quarter embedding. These combined features are then processed through a sequence of layers:
|
The architecture of the non-linear model is illustrated in Table \ref{tab:non_linear_model_architecture}. The model begins with an input layer that converts the quarter of the day into an embedding. This layer concatenates the other input features with the quarter embedding. These combined features are then processed through a sequence of layers:
|
||||||
@@ -469,29 +352,8 @@ Figure \ref{fig:linear_non_linear_sample_comparison} shows a comparison between
|
|||||||
TODO: non autoregressive non-linear model for quantile regression
|
TODO: non autoregressive non-linear model for quantile regression
|
||||||
|
|
||||||
\subsubsection{GRU Model}
|
\subsubsection{GRU Model}
|
||||||
Another more complex model that can be used is a Recurrent Neural Network (RNN). The RNN can be used to model the NRV data because of the sequential nature of the input features. The RNN keeps a hidden state that is updated at every time step using the new input data. The hidden state is then used to make the prediction. At each time step, the data needed to predict the NRV of the following quarter is fed into the model. This is done for the 96 quarters in a day. The last hidden state can then be extracted from the GRU model and be used to make a prediction for the quantiles of the next quarter.
|
|
||||||
\\\\
|
|
||||||
The two most common types of RNNs are the Long Short-Term Memory (LSTM) and the Gated Recurrent Unit (GRU). The GRU is a simpler version of the LSTM. The GRU has fewer parameters which results in faster training times. The GRU still can capture long-term dependencies in the data and can achieve similar performance to the LSTM.
|
|
||||||
\\\\
|
|
||||||
\begin{figure}[H]
|
|
||||||
\centering
|
|
||||||
\includegraphics[width=0.8\textwidth]{images/quantile_regression/rnn/RNN_diagram.png}
|
|
||||||
\caption{RNN model input and output visualization}
|
|
||||||
\label{fig:rnn_model_visualization}
|
|
||||||
\end{figure}
|
|
||||||
|
|
||||||
The input features for the RNN model are carefully structured to capture the relevant information from the previous quarters and the forecasted values. Each input feature vector represents a quarter and consists of the following components:
|
|
||||||
|
|
||||||
\begin{itemize}
|
|
||||||
\item The actual NRV value from the current quarter (T-1), which provides the model with the historical context of the NRV.
|
|
||||||
\item The forecasted or real values for the next quarter (T), including load, PV, wind, and net position. If the next quarter is not the quarter to predict, the real values for that quarter are used. If the next quarter is the quarter to predict, the forecasted values are used.
|
|
||||||
\item A quarter embedding vector representing the current quarter (T-1). The embedding vector gives the model information about the time of day, which can help it learn the daily patterns in the NRV data.
|
|
||||||
\end{itemize}
|
|
||||||
|
|
||||||
The input feature structure is designed to provide the model with a comprehensive view of the previous quarters and the forecasted values for the current quarter. By incorporating both historical and forecasted information sequentially, the model can learn to predict the NRV quantiles for the next quarter more accurately.
|
|
||||||
\\\\
|
|
||||||
The GRU model architecture is shown in Table \ref{tab:gru_model_architecture}. The model starts with an embedding layer that converts the quarter of the day into an embedding. This layer concatenates the other input features with the quarter embedding. The input of the TimeEmbedding is of shape (Batch Size, Time Steps, Input Features Size). The output of this layer is then passed to the GRU layer. The GRU layer outputs the hidden state for every time step. This results in a tensor of shape (Batch Size, Time Steps, Hidden Size). Only the last hidden state is relevant for the prediction of the NRV quantiles for the next quarter. The last hidden state should contain all the necessary information from the previous quarters to make the prediction. The last hidden state is then passed through a linear layer to output the quantiles for the NRV prediction.
|
The GRU model architecture is shown in Table \ref{tab:gru_model_architecture}. The model starts with an embedding layer that converts the quarter of the day into an embedding. This layer concatenates the other input features with the quarter embedding. The input of the TimeEmbedding is of shape (Batch Size, Time Steps, Input Features Size). The output of this layer is then passed to the GRU layer. The GRU layer outputs the hidden state for every time step. This results in a tensor of shape (Batch Size, Time Steps, Hidden Size). Only the last hidden state is relevant for the prediction of the NRV quantiles for the next quarter. The last hidden state should contain all the necessary information from the previous quarters to make the prediction. The last hidden state is then passed through a linear layer to output the quantiles for the NRV prediction.
|
||||||
\\\\
|
|
||||||
TODO: Zielige visualisatie van model nu
|
TODO: Zielige visualisatie van model nu
|
||||||
\begin{table}[H]
|
\begin{table}[H]
|
||||||
\centering
|
\centering
|
||||||
|
|||||||
158
Reports/Thesis/sections/policies.aux
Normal file
158
Reports/Thesis/sections/policies.aux
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
\relax
|
||||||
|
\providecommand\hyper@newdestlabel[2]{}
|
||||||
|
\@writefile{toc}{\contentsline {section}{\numberline {4}Policies}{16}{section.4}\protected@file@percent }
|
||||||
|
\newlabel{sec:policies}{{4}{16}{Policies}{section.4}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Baselines}{16}{subsection.4.1}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{17}{subsection.4.2}\protected@file@percent }
|
||||||
|
\@setckpt{sections/policies}{
|
||||||
|
\setcounter{page}{18}
|
||||||
|
\setcounter{equation}{7}
|
||||||
|
\setcounter{enumi}{0}
|
||||||
|
\setcounter{enumii}{0}
|
||||||
|
\setcounter{enumiii}{0}
|
||||||
|
\setcounter{enumiv}{0}
|
||||||
|
\setcounter{footnote}{0}
|
||||||
|
\setcounter{mpfootnote}{0}
|
||||||
|
\setcounter{part}{0}
|
||||||
|
\setcounter{section}{4}
|
||||||
|
\setcounter{subsection}{2}
|
||||||
|
\setcounter{subsubsection}{0}
|
||||||
|
\setcounter{paragraph}{0}
|
||||||
|
\setcounter{subparagraph}{0}
|
||||||
|
\setcounter{figure}{5}
|
||||||
|
\setcounter{table}{2}
|
||||||
|
\setcounter{parentequation}{0}
|
||||||
|
\setcounter{float@type}{4}
|
||||||
|
\setcounter{caption@flags}{2}
|
||||||
|
\setcounter{continuedfloat}{0}
|
||||||
|
\setcounter{subfigure}{0}
|
||||||
|
\setcounter{subtable}{0}
|
||||||
|
\setcounter{tabx@nest}{0}
|
||||||
|
\setcounter{listtotal}{0}
|
||||||
|
\setcounter{listcount}{0}
|
||||||
|
\setcounter{liststart}{0}
|
||||||
|
\setcounter{liststop}{0}
|
||||||
|
\setcounter{citecount}{0}
|
||||||
|
\setcounter{citetotal}{0}
|
||||||
|
\setcounter{multicitecount}{0}
|
||||||
|
\setcounter{multicitetotal}{0}
|
||||||
|
\setcounter{instcount}{0}
|
||||||
|
\setcounter{maxnames}{2}
|
||||||
|
\setcounter{minnames}{1}
|
||||||
|
\setcounter{maxitems}{999}
|
||||||
|
\setcounter{minitems}{1}
|
||||||
|
\setcounter{citecounter}{0}
|
||||||
|
\setcounter{maxcitecounter}{0}
|
||||||
|
\setcounter{savedcitecounter}{0}
|
||||||
|
\setcounter{uniquelist}{0}
|
||||||
|
\setcounter{uniquename}{0}
|
||||||
|
\setcounter{refsection}{0}
|
||||||
|
\setcounter{refsegment}{0}
|
||||||
|
\setcounter{maxextratitle}{0}
|
||||||
|
\setcounter{maxextratitleyear}{0}
|
||||||
|
\setcounter{maxextraname}{0}
|
||||||
|
\setcounter{maxextradate}{0}
|
||||||
|
\setcounter{maxextraalpha}{0}
|
||||||
|
\setcounter{abbrvpenalty}{50}
|
||||||
|
\setcounter{highnamepenalty}{50}
|
||||||
|
\setcounter{lownamepenalty}{25}
|
||||||
|
\setcounter{maxparens}{3}
|
||||||
|
\setcounter{parenlevel}{0}
|
||||||
|
\setcounter{blx@maxsection}{0}
|
||||||
|
\setcounter{mincomprange}{10}
|
||||||
|
\setcounter{maxcomprange}{100000}
|
||||||
|
\setcounter{mincompwidth}{1}
|
||||||
|
\setcounter{afterword}{0}
|
||||||
|
\setcounter{savedafterword}{0}
|
||||||
|
\setcounter{annotator}{0}
|
||||||
|
\setcounter{savedannotator}{0}
|
||||||
|
\setcounter{author}{0}
|
||||||
|
\setcounter{savedauthor}{0}
|
||||||
|
\setcounter{bookauthor}{0}
|
||||||
|
\setcounter{savedbookauthor}{0}
|
||||||
|
\setcounter{commentator}{0}
|
||||||
|
\setcounter{savedcommentator}{0}
|
||||||
|
\setcounter{editor}{0}
|
||||||
|
\setcounter{savededitor}{0}
|
||||||
|
\setcounter{editora}{0}
|
||||||
|
\setcounter{savededitora}{0}
|
||||||
|
\setcounter{editorb}{0}
|
||||||
|
\setcounter{savededitorb}{0}
|
||||||
|
\setcounter{editorc}{0}
|
||||||
|
\setcounter{savededitorc}{0}
|
||||||
|
\setcounter{foreword}{0}
|
||||||
|
\setcounter{savedforeword}{0}
|
||||||
|
\setcounter{holder}{0}
|
||||||
|
\setcounter{savedholder}{0}
|
||||||
|
\setcounter{introduction}{0}
|
||||||
|
\setcounter{savedintroduction}{0}
|
||||||
|
\setcounter{namea}{0}
|
||||||
|
\setcounter{savednamea}{0}
|
||||||
|
\setcounter{nameb}{0}
|
||||||
|
\setcounter{savednameb}{0}
|
||||||
|
\setcounter{namec}{0}
|
||||||
|
\setcounter{savednamec}{0}
|
||||||
|
\setcounter{translator}{0}
|
||||||
|
\setcounter{savedtranslator}{0}
|
||||||
|
\setcounter{shortauthor}{0}
|
||||||
|
\setcounter{savedshortauthor}{0}
|
||||||
|
\setcounter{shorteditor}{0}
|
||||||
|
\setcounter{savedshorteditor}{0}
|
||||||
|
\setcounter{narrator}{0}
|
||||||
|
\setcounter{savednarrator}{0}
|
||||||
|
\setcounter{execproducer}{0}
|
||||||
|
\setcounter{savedexecproducer}{0}
|
||||||
|
\setcounter{execdirector}{0}
|
||||||
|
\setcounter{savedexecdirector}{0}
|
||||||
|
\setcounter{with}{0}
|
||||||
|
\setcounter{savedwith}{0}
|
||||||
|
\setcounter{labelname}{0}
|
||||||
|
\setcounter{savedlabelname}{0}
|
||||||
|
\setcounter{institution}{0}
|
||||||
|
\setcounter{savedinstitution}{0}
|
||||||
|
\setcounter{lista}{0}
|
||||||
|
\setcounter{savedlista}{0}
|
||||||
|
\setcounter{listb}{0}
|
||||||
|
\setcounter{savedlistb}{0}
|
||||||
|
\setcounter{listc}{0}
|
||||||
|
\setcounter{savedlistc}{0}
|
||||||
|
\setcounter{listd}{0}
|
||||||
|
\setcounter{savedlistd}{0}
|
||||||
|
\setcounter{liste}{0}
|
||||||
|
\setcounter{savedliste}{0}
|
||||||
|
\setcounter{listf}{0}
|
||||||
|
\setcounter{savedlistf}{0}
|
||||||
|
\setcounter{location}{0}
|
||||||
|
\setcounter{savedlocation}{0}
|
||||||
|
\setcounter{organization}{0}
|
||||||
|
\setcounter{savedorganization}{0}
|
||||||
|
\setcounter{origlocation}{0}
|
||||||
|
\setcounter{savedoriglocation}{0}
|
||||||
|
\setcounter{origpublisher}{0}
|
||||||
|
\setcounter{savedorigpublisher}{0}
|
||||||
|
\setcounter{publisher}{0}
|
||||||
|
\setcounter{savedpublisher}{0}
|
||||||
|
\setcounter{language}{0}
|
||||||
|
\setcounter{savedlanguage}{0}
|
||||||
|
\setcounter{origlanguage}{0}
|
||||||
|
\setcounter{savedoriglanguage}{0}
|
||||||
|
\setcounter{citation}{0}
|
||||||
|
\setcounter{savedcitation}{0}
|
||||||
|
\setcounter{pageref}{0}
|
||||||
|
\setcounter{savedpageref}{0}
|
||||||
|
\setcounter{textcitecount}{0}
|
||||||
|
\setcounter{textcitetotal}{0}
|
||||||
|
\setcounter{textcitemaxnames}{0}
|
||||||
|
\setcounter{biburlbigbreakpenalty}{100}
|
||||||
|
\setcounter{biburlbreakpenalty}{200}
|
||||||
|
\setcounter{biburlnumpenalty}{0}
|
||||||
|
\setcounter{biburlucpenalty}{0}
|
||||||
|
\setcounter{biburllcpenalty}{0}
|
||||||
|
\setcounter{smartand}{1}
|
||||||
|
\setcounter{bbx:relatedcount}{0}
|
||||||
|
\setcounter{bbx:relatedtotal}{0}
|
||||||
|
\setcounter{section@level}{0}
|
||||||
|
\setcounter{Item}{0}
|
||||||
|
\setcounter{Hfootnote}{0}
|
||||||
|
\setcounter{bookmark@seq@number}{17}
|
||||||
|
}
|
||||||
18
Reports/Thesis/sections/policies.tex
Normal file
18
Reports/Thesis/sections/policies.tex
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
\section{Policies}
|
||||||
|
\label{sec:policies}
|
||||||
|
Organizations that own a battery and are active in the electricity market have to make decisions on when to charge and discharge their battery. These decisions are based on the current state of the battery, the current state of the market, and the future state of the market. The future state of the market can be predicted using generative models like the ones discussed in previous sections. The organizations want to maximize their profit by buying electricity when it is cheap and selling electricity when it is expensive. The policies used decide when to charge and discharge the battery. Another important aspect of these policies is to keep the battery in a healthy state. Charging and discharging a battery too much can reduce its lifetime. The policies have to take this into account.
|
||||||
|
|
||||||
|
In this thesis, a simple policy is used to optimize the profit made by charging and discharging a battery. The policy is based on the Net Regulation Volume (NRV) predictions for the next day. This shows the potential of using NRV predictions to optimize the policy. In the real world, more complex policies can be used to optimize the profit. These policies can be trained using reinforcement learning or other optimization techniques. Multiple baseline policies are defined to compare the performance of the policy based on NRV predictions.
|
||||||
|
|
||||||
|
The simple policy uses two thresholds to decide when to charge and discharge the battery based on the imbalance price. When the imbalance price is below the charging threshold, the battery is fully charged. When the imbalance price is above the discharging threshold, the battery is fully discharged again. This policy is very simple and does not take into account some important aspects.
|
||||||
|
|
||||||
|
\subsection{Baselines}
|
||||||
|
% Baseline fixed thresholds
|
||||||
|
The most simple baseline policy is to define two fixed thresholds for charging and discharging the battery. These thresholds can be determined by the historical data of the imbalance price. The thresholds can be found by doing a simple grid search for the best thresholds. The thresholds that maximize the profit on the historical data are used as the fixed thresholds. During the optimization, a penalty parameter can be added to the profit function to penalize when the battery is charged or discharged too much.
|
||||||
|
|
||||||
|
% Baseline thresholds determined on the previous day
|
||||||
|
Another baseline policy is to determine the thresholds for charging and discharging the battery based on the NRV of the previous day. This policy is based on the assumption that the NRV of the next day will be similar to the NRV of the previous day. The NRV of the previous day can be seen as the NRV prediction for the next day. The thresholds can then be determined by doing a simple grid search for the best thresholds over the NRV prediction. The same penalty parameter can be added to the profit function to reduce the charge cycles of the battery.
|
||||||
|
|
||||||
|
\subsection{Policies based on NRV generations}
|
||||||
|
% Policy based on NRV generations
|
||||||
|
The simple baseline policy can be used with the NRV predictions for the next day. First, multiple full-day NRV samples are generated using a generative model. Each of these samples will be seen as a prediction for the NRV of the next day. The charge and discharge thresholds are determined for each of these samples using a simple grid search like in the baseline policy. The mean is taken over all the thresholds to determine the final thresholds for the next day. This results in a policy that uses the NRV samples of the generative model. This policy also uses the penalty parameter to reduce the charge cycles of the battery.
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -20,73 +20,89 @@
|
|||||||
\babel@aux{english}{}
|
\babel@aux{english}{}
|
||||||
\@input{sections/introduction.aux}
|
\@input{sections/introduction.aux}
|
||||||
\@input{sections/background.aux}
|
\@input{sections/background.aux}
|
||||||
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
\@input{sections/policies.aux}
|
||||||
\abx@aux@cite{0}{ho2020denoising}
|
|
||||||
\abx@aux@segm{0}{0}{ho2020denoising}
|
|
||||||
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
|
||||||
\abx@aux@cite{0}{ho2020denoising}
|
|
||||||
\abx@aux@segm{0}{0}{ho2020denoising}
|
|
||||||
\@input{sections/literature_study.aux}
|
\@input{sections/literature_study.aux}
|
||||||
\@writefile{toc}{\contentsline {section}{\numberline {4}TODO: Better title for this section}{10}{section.4}\protected@file@percent }
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\@writefile{toc}{\contentsline {section}{\numberline {5}NRV Prediction}{10}{section.5}\protected@file@percent }
|
\abx@aux@cite{0}{weron_electricity_2014}
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Data}{10}{subsection.5.1}\protected@file@percent }
|
\abx@aux@segm{0}{0}{weron_electricity_2014}
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Quantile Regression}{12}{subsection.5.2}\protected@file@percent }
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of quantiles\relax }}{12}{figure.caption.4}\protected@file@percent }
|
\abx@aux@cite{0}{poggi_electricity_2023}
|
||||||
\newlabel{fig:quantile_example}{{2}{12}{Example of quantiles\relax }{figure.caption.4}{}}
|
\abx@aux@segm{0}{0}{poggi_electricity_2023}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }}{13}{figure.caption.5}\protected@file@percent }
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\newlabel{fig:quantile_regression_example}{{3}{13}{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }{figure.caption.5}{}}
|
\abx@aux@cite{0}{lu_scenarios_2022}
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.2.1}Training}{14}{subsubsection.5.2.1}\protected@file@percent }
|
\abx@aux@segm{0}{0}{lu_scenarios_2022}
|
||||||
\newlabel{subsubsec:quantile_regression_training}{{5.2.1}{14}{Training}{subsubsection.5.2.1}{}}
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.2.2}Evaluation}{15}{subsubsection.5.2.2}\protected@file@percent }
|
\abx@aux@cite{0}{dumas_deep_2022}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Visualization of the CRPS metric\relax }}{16}{figure.caption.6}\protected@file@percent }
|
\abx@aux@segm{0}{0}{dumas_deep_2022}
|
||||||
\newlabel{fig:crps_visualization}{{4}{16}{Visualization of the CRPS metric\relax }{figure.caption.6}{}}
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.2.3}Linear Model}{16}{subsubsection.5.2.3}\protected@file@percent }
|
\abx@aux@cite{0}{rasul_autoregressive_2021}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Autoregressive linear model results\relax }}{17}{table.caption.7}\protected@file@percent }
|
\abx@aux@segm{0}{0}{rasul_autoregressive_2021}
|
||||||
\newlabel{tab:autoregressive_linear_model_baseline_results}{{3}{17}{Autoregressive linear model results\relax }{table.caption.7}{}}
|
\abx@aux@page{1}{18}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{18}{table.caption.8}\protected@file@percent }
|
\abx@aux@page{2}{18}
|
||||||
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{18}{Autoregressive linear model results with time features\relax }{table.caption.8}{}}
|
\abx@aux@page{3}{18}
|
||||||
\newlabel{fig:autoregressive_linear_model_sample_1}{{5a}{19}{Sample 1\relax }{figure.caption.9}{}}
|
\abx@aux@page{4}{18}
|
||||||
\newlabel{sub@fig:autoregressive_linear_model_sample_1}{{a}{19}{Sample 1\relax }{figure.caption.9}{}}
|
\abx@aux@refcontext{nyt/apasortcite//global/global}
|
||||||
\newlabel{fig:autoregressive_linear_model_sample_2}{{5b}{19}{Sample 2\relax }{figure.caption.9}{}}
|
\abx@aux@cite{0}{dumas_deep_2022}
|
||||||
\newlabel{sub@fig:autoregressive_linear_model_sample_2}{{b}{19}{Sample 2\relax }{figure.caption.9}{}}
|
\abx@aux@segm{0}{0}{dumas_deep_2022}
|
||||||
\newlabel{fig:autoregressive_linear_model_sample_3}{{5c}{19}{Sample 3\relax }{figure.caption.9}{}}
|
\abx@aux@page{5}{19}
|
||||||
\newlabel{sub@fig:autoregressive_linear_model_sample_3}{{c}{19}{Sample 3\relax }{figure.caption.9}{}}
|
\@writefile{toc}{\contentsline {section}{\numberline {6}TODO: Better title for this section}{20}{section.6}\protected@file@percent }
|
||||||
\newlabel{fig:autoregressive_linear_model_sample_4}{{5d}{19}{Sample 4\relax }{figure.caption.9}{}}
|
\@writefile{toc}{\contentsline {section}{\numberline {7}Results \& Discussion}{20}{section.7}\protected@file@percent }
|
||||||
\newlabel{sub@fig:autoregressive_linear_model_sample_4}{{d}{19}{Sample 4\relax }{figure.caption.9}{}}
|
\@writefile{toc}{\contentsline {subsection}{\numberline {7.1}Data}{20}{subsection.7.1}\protected@file@percent }
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Test examples of the autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding.\relax }}{19}{figure.caption.9}\protected@file@percent }
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {7.1.1}Linear Model}{22}{subsubsection.7.1.1}\protected@file@percent }
|
||||||
\newlabel{fig:autoregressive_linear_model_samples}{{5}{19}{Test examples of the autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding.\relax }{figure.caption.9}{}}
|
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Autoregressive linear model results\relax }}{22}{table.caption.9}\protected@file@percent }
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {5}{\ignorespaces Non-Autoregressive linear model results\relax }}{20}{table.caption.10}\protected@file@percent }
|
\newlabel{tab:autoregressive_linear_model_baseline_results}{{3}{22}{Autoregressive linear model results\relax }{table.caption.9}{}}
|
||||||
\newlabel{tab:non_autoregressive_linear_model_baseline_results}{{5}{20}{Non-Autoregressive linear model results\relax }{table.caption.10}{}}
|
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Mean and standard deviation of the NRV values over the quarter of the day\relax }}{23}{figure.caption.10}\protected@file@percent }
|
||||||
\newlabel{fig:non_autoregressive_linear_model_sample_1}{{6a}{20}{Sample 1\relax }{figure.caption.11}{}}
|
\newlabel{fig:nrv_mean_std_over_quarter}{{6}{23}{Mean and standard deviation of the NRV values over the quarter of the day\relax }{figure.caption.10}{}}
|
||||||
\newlabel{sub@fig:non_autoregressive_linear_model_sample_1}{{a}{20}{Sample 1\relax }{figure.caption.11}{}}
|
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{24}{table.caption.11}\protected@file@percent }
|
||||||
\newlabel{fig:non_autoregressive_linear_model_sample_2}{{6b}{20}{Sample 2\relax }{figure.caption.11}{}}
|
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{24}{Autoregressive linear model results with time features\relax }{table.caption.11}{}}
|
||||||
\newlabel{sub@fig:non_autoregressive_linear_model_sample_2}{{b}{20}{Sample 2\relax }{figure.caption.11}{}}
|
\newlabel{fig:autoregressive_linear_model_sample_1}{{7a}{25}{Sample 1\relax }{figure.caption.12}{}}
|
||||||
\newlabel{fig:non_autoregressive_linear_model_sample_3}{{6c}{20}{Sample 3\relax }{figure.caption.11}{}}
|
\newlabel{sub@fig:autoregressive_linear_model_sample_1}{{a}{25}{Sample 1\relax }{figure.caption.12}{}}
|
||||||
\newlabel{sub@fig:non_autoregressive_linear_model_sample_3}{{c}{20}{Sample 3\relax }{figure.caption.11}{}}
|
\newlabel{fig:autoregressive_linear_model_sample_2}{{7b}{25}{Sample 2\relax }{figure.caption.12}{}}
|
||||||
\newlabel{fig:non_autoregressive_linear_model_sample_4}{{6d}{20}{Sample 4\relax }{figure.caption.11}{}}
|
\newlabel{sub@fig:autoregressive_linear_model_sample_2}{{b}{25}{Sample 2\relax }{figure.caption.12}{}}
|
||||||
\newlabel{sub@fig:non_autoregressive_linear_model_sample_4}{{d}{20}{Sample 4\relax }{figure.caption.11}{}}
|
\newlabel{fig:autoregressive_linear_model_sample_3}{{7c}{25}{Sample 3\relax }{figure.caption.12}{}}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Test examples of the non-autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV and the Net Position.\relax }}{20}{figure.caption.11}\protected@file@percent }
|
\newlabel{sub@fig:autoregressive_linear_model_sample_3}{{c}{25}{Sample 3\relax }{figure.caption.12}{}}
|
||||||
\newlabel{fig:non_autoregressive_linear_model_samples}{{6}{20}{Test examples of the non-autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV and the Net Position.\relax }{figure.caption.11}{}}
|
\newlabel{fig:autoregressive_linear_model_sample_4}{{7d}{25}{Sample 4\relax }{figure.caption.12}{}}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear models samples. The plots show ten samples of the full-day NRV for the autoregressive and non-autoregressive linear models. The samples were generated using input features NRV, Load, Wind, PV, and the Net Position. The autoregressive model also uses the quarter embedding with 5 dimensions.\relax }}{21}{figure.caption.12}\protected@file@percent }
|
\newlabel{sub@fig:autoregressive_linear_model_sample_4}{{d}{25}{Sample 4\relax }{figure.caption.12}{}}
|
||||||
\newlabel{fig:linear_model_sample_comparison}{{7}{21}{Comparison of the autoregressive and non-autoregressive linear models samples. The plots show ten samples of the full-day NRV for the autoregressive and non-autoregressive linear models. The samples were generated using input features NRV, Load, Wind, PV, and the Net Position. The autoregressive model also uses the quarter embedding with 5 dimensions.\relax }{figure.caption.12}{}}
|
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Test examples of the autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding.\relax }}{25}{figure.caption.12}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.2.4}Non-linear Model}{22}{subsubsection.5.2.4}\protected@file@percent }
|
\newlabel{fig:autoregressive_linear_model_samples}{{7}{25}{Test examples of the autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding.\relax }{figure.caption.12}{}}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces Non-linear Quantile Regression Model Architecture\relax }}{23}{table.caption.13}\protected@file@percent }
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {7.1.2}Non-linear Model}{25}{subsubsection.7.1.2}\protected@file@percent }
|
||||||
\newlabel{tab:non_linear_model_architecture}{{6}{23}{Non-linear Quantile Regression Model Architecture\relax }{table.caption.13}{}}
|
\@writefile{lot}{\contentsline {table}{\numberline {5}{\ignorespaces Non-Autoregressive linear model results\relax }}{26}{table.caption.13}\protected@file@percent }
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces Autoregressive non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }}{23}{table.caption.14}\protected@file@percent }
|
\newlabel{tab:non_autoregressive_linear_model_baseline_results}{{5}{26}{Non-Autoregressive linear model results\relax }{table.caption.13}{}}
|
||||||
\newlabel{tab:autoregressive_non_linear_model_results}{{7}{23}{Autoregressive non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.14}{}}
|
\newlabel{fig:non_autoregressive_linear_model_sample_1}{{8a}{27}{Sample 1\relax }{figure.caption.14}{}}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Comparison for examples from test set between the autoregressive linear and non-linear models. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding. The non-linear model used 8 layers with a hidden size of 256 and a dropout rate of 0.2.\relax }}{24}{figure.caption.15}\protected@file@percent }
|
\newlabel{sub@fig:non_autoregressive_linear_model_sample_1}{{a}{27}{Sample 1\relax }{figure.caption.14}{}}
|
||||||
\newlabel{fig:linear_non_linear_sample_comparison}{{8}{24}{Comparison for examples from test set between the autoregressive linear and non-linear models. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding. The non-linear model used 8 layers with a hidden size of 256 and a dropout rate of 0.2.\relax }{figure.caption.15}{}}
|
\newlabel{fig:non_autoregressive_linear_model_sample_2}{{8b}{27}{Sample 2\relax }{figure.caption.14}{}}
|
||||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.2.5}GRU Model}{25}{subsubsection.5.2.5}\protected@file@percent }
|
\newlabel{sub@fig:non_autoregressive_linear_model_sample_2}{{b}{27}{Sample 2\relax }{figure.caption.14}{}}
|
||||||
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces RNN model input and output visualization\relax }}{25}{figure.caption.16}\protected@file@percent }
|
\newlabel{fig:non_autoregressive_linear_model_sample_3}{{8c}{27}{Sample 3\relax }{figure.caption.14}{}}
|
||||||
\newlabel{fig:rnn_model_visualization}{{9}{25}{RNN model input and output visualization\relax }{figure.caption.16}{}}
|
\newlabel{sub@fig:non_autoregressive_linear_model_sample_3}{{c}{27}{Sample 3\relax }{figure.caption.14}{}}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces GRU Model Architecture\relax }}{26}{table.caption.17}\protected@file@percent }
|
\newlabel{fig:non_autoregressive_linear_model_sample_4}{{8d}{27}{Sample 4\relax }{figure.caption.14}{}}
|
||||||
\newlabel{tab:gru_model_architecture}{{8}{26}{GRU Model Architecture\relax }{table.caption.17}{}}
|
\newlabel{sub@fig:non_autoregressive_linear_model_sample_4}{{d}{27}{Sample 4\relax }{figure.caption.14}{}}
|
||||||
\@writefile{lot}{\contentsline {table}{\numberline {9}{\ignorespaces Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }}{27}{table.caption.18}\protected@file@percent }
|
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Test examples of the non-autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV and the Net Position.\relax }}{27}{figure.caption.14}\protected@file@percent }
|
||||||
\newlabel{tab:autoregressive_gru_model_results}{{9}{27}{Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.18}{}}
|
\newlabel{fig:non_autoregressive_linear_model_samples}{{8}{27}{Test examples of the non-autoregressive linear model. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV and the Net Position.\relax }{figure.caption.14}{}}
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Diffusion}{28}{subsection.5.3}\protected@file@percent }
|
\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces Non-linear Quantile Regression Model Architecture\relax }}{27}{table.caption.16}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {section}{\numberline {6}Policies for battery optimization}{28}{section.6}\protected@file@percent }
|
\newlabel{tab:non_linear_model_architecture}{{6}{27}{Non-linear Quantile Regression Model Architecture\relax }{table.caption.16}{}}
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Baselines}{28}{subsection.6.1}\protected@file@percent }
|
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear models samples. The plots show ten samples of the full-day NRV for the autoregressive and non-autoregressive linear models. The samples were generated using input features NRV, Load, Wind, PV, and the Net Position. The autoregressive model also uses the quarter embedding with 5 dimensions.\relax }}{28}{figure.caption.15}\protected@file@percent }
|
||||||
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Policies using NRV predictions}{28}{subsection.6.2}\protected@file@percent }
|
\newlabel{fig:linear_model_sample_comparison}{{9}{28}{Comparison of the autoregressive and non-autoregressive linear models samples. The plots show ten samples of the full-day NRV for the autoregressive and non-autoregressive linear models. The samples were generated using input features NRV, Load, Wind, PV, and the Net Position. The autoregressive model also uses the quarter embedding with 5 dimensions.\relax }{figure.caption.15}{}}
|
||||||
\abx@aux@read@bbl@mdfivesum{D41D8CD98F00B204E9800998ECF8427E}
|
\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces Autoregressive non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }}{28}{table.caption.17}\protected@file@percent }
|
||||||
|
\newlabel{tab:autoregressive_non_linear_model_results}{{7}{28}{Autoregressive non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.17}{}}
|
||||||
|
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Comparison for examples from test set between the autoregressive linear and non-linear models. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding. The non-linear model used 8 layers with a hidden size of 256 and a dropout rate of 0.2.\relax }}{29}{figure.caption.18}\protected@file@percent }
|
||||||
|
\newlabel{fig:linear_non_linear_sample_comparison}{{10}{29}{Comparison for examples from test set between the autoregressive linear and non-linear models. The plots show the confidence intervals calculated from 1000 generated full-day NRV samples. The samples were generated using input features NRV, Load, Wind, PV, Net Position and the quarter embedding. The non-linear model used 8 layers with a hidden size of 256 and a dropout rate of 0.2.\relax }{figure.caption.18}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsubsection}{\numberline {7.1.3}GRU Model}{30}{subsubsection.7.1.3}\protected@file@percent }
|
||||||
|
\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces GRU Model Architecture\relax }}{30}{table.caption.19}\protected@file@percent }
|
||||||
|
\newlabel{tab:gru_model_architecture}{{8}{30}{GRU Model Architecture\relax }{table.caption.19}{}}
|
||||||
|
\@writefile{lot}{\contentsline {table}{\numberline {9}{\ignorespaces Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }}{31}{table.caption.20}\protected@file@percent }
|
||||||
|
\newlabel{tab:autoregressive_gru_model_results}{{9}{31}{Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.20}{}}
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {7.2}Diffusion}{32}{subsection.7.2}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {section}{\numberline {8}Policies for battery optimization}{32}{section.8}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {8.1}Baselines}{32}{subsection.8.1}\protected@file@percent }
|
||||||
|
\@writefile{toc}{\contentsline {subsection}{\numberline {8.2}Policies using NRV predictions}{32}{subsection.8.2}\protected@file@percent }
|
||||||
|
\abx@aux@page{6}{33}
|
||||||
|
\abx@aux@page{7}{33}
|
||||||
|
\abx@aux@page{8}{33}
|
||||||
|
\abx@aux@page{9}{33}
|
||||||
|
\abx@aux@read@bbl@mdfivesum{5DC935CC8C8FAB8A3CAF97A486ED2386}
|
||||||
\abx@aux@read@bblrerun
|
\abx@aux@read@bblrerun
|
||||||
\gdef \@abspage@last{29}
|
\abx@aux@defaultrefcontext{0}{dumas_deep_2022}{nyt/global//global/global}
|
||||||
|
\abx@aux@defaultrefcontext{0}{lu_scenarios_2022}{nyt/global//global/global}
|
||||||
|
\abx@aux@defaultrefcontext{0}{poggi_electricity_2023}{nyt/global//global/global}
|
||||||
|
\abx@aux@defaultrefcontext{0}{weron_electricity_2014}{nyt/global//global/global}
|
||||||
|
\gdef \@abspage@last{34}
|
||||||
|
|||||||
@@ -0,0 +1,561 @@
|
|||||||
|
% $ biblatex auxiliary file $
|
||||||
|
% $ biblatex bbl format version 3.2 $
|
||||||
|
% Do not modify the above lines!
|
||||||
|
%
|
||||||
|
% This is an auxiliary file used by the 'biblatex' package.
|
||||||
|
% This file may safely be deleted. It will be recreated by
|
||||||
|
% biber as required.
|
||||||
|
%
|
||||||
|
\begingroup
|
||||||
|
\makeatletter
|
||||||
|
\@ifundefined{ver@biblatex.sty}
|
||||||
|
{\@latex@error
|
||||||
|
{Missing 'biblatex' package}
|
||||||
|
{The bibliography requires the 'biblatex' package.}
|
||||||
|
\aftergroup\endinput}
|
||||||
|
{}
|
||||||
|
\endgroup
|
||||||
|
|
||||||
|
|
||||||
|
\refsection{0}
|
||||||
|
\datalist[entry]{nyt/apasortcite//global/global}
|
||||||
|
\entry{dumas_deep_2022}{article}{}
|
||||||
|
\name{author}{5}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
|
||||||
|
family={Dumas},
|
||||||
|
familyi={D\bibinitperiod},
|
||||||
|
given={Jonathan},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=8de9ff5d0722fc618c35312130499a63}{%
|
||||||
|
family={Wehenkel},
|
||||||
|
familyi={W\bibinitperiod},
|
||||||
|
given={Antoine},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=26d23470f883355413d99765a97d26f9}{%
|
||||||
|
family={Lanaspeze},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Damien},
|
||||||
|
giveni={D\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=62ac593786e28c2e56fab28d272346bf}{%
|
||||||
|
family={Cornélusse},
|
||||||
|
familyi={C\bibinitperiod},
|
||||||
|
given={Bertrand},
|
||||||
|
giveni={B\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=2b5aabf60e51678797e395af8c441fd7}{%
|
||||||
|
family={Sutera},
|
||||||
|
familyi={S\bibinitperiod},
|
||||||
|
given={Antonio},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{cac0f5a33afb491da6830cd9b00071a2}
|
||||||
|
\strng{fullhash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{bibnamehash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{authorbibnamehash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
|
||||||
|
\strng{authorfullhash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\field{sortinit}{D}
|
||||||
|
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Greater direct electrification of end-use sectors with a higher share of renewables is one of the pillars to power a carbon-neutral society by 2050. However, in contrast to conventional power plants, renewable energy is subject to uncertainty raising challenges for their interaction with power systems. Scenario-based probabilistic forecasting models have become a vital tool to equip decision-makers. This paper presents to the power systems forecasting practitioners a recent deep learning technique, the normalizing flows, to produce accurate scenario-based probabilistic forecasts that are crucial to face the new challenges in power systems applications. The strength of this technique is to directly learn the stochastic multivariate distribution of the underlying process by maximizing the likelihood. Through comprehensive empirical evaluations using the open data of the Global Energy Forecasting Competition 2014, we demonstrate that this methodology is competitive with other state-of-the-art deep learning generative models: generative adversarial networks and variational autoencoders. The models producing weather-based wind, solar power, and load scenarios are properly compared in terms of forecast value by considering the case study of an energy retailer and quality using several complementary metrics. The numerical experiments are simple and easily reproducible. Thus, we hope it will encourage other forecasting practitioners to test and use normalizing flows in power system applications such as bidding on electricity markets, scheduling power systems with high renewable energy sources penetration, energy management of virtual power plan or microgrids, and unit commitment.}
|
||||||
|
\field{issn}{03062619}
|
||||||
|
\field{journaltitle}{Applied Energy}
|
||||||
|
\field{langid}{english}
|
||||||
|
\field{month}{1}
|
||||||
|
\field{shortjournal}{Applied Energy}
|
||||||
|
\field{shorttitle}{A deep generative model for probabilistic energy forecasting in power systems}
|
||||||
|
\field{title}{A deep generative model for probabilistic energy forecasting in power systems: normalizing flows}
|
||||||
|
\field{urlday}{11}
|
||||||
|
\field{urlmonth}{10}
|
||||||
|
\field{urlyear}{2023}
|
||||||
|
\field{volume}{305}
|
||||||
|
\field{year}{2022}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{117871}
|
||||||
|
\range{pages}{1}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.apenergy.2021.117871
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:/Users/victormylle/Zotero/storage/3CW249QI/Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:application/pdf
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
|
||||||
|
\endverb
|
||||||
|
\endentry
|
||||||
|
\entry{lu_scenarios_2022}{article}{}
|
||||||
|
\name{author}{4}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=e20b6fceb410a42e1abe17804a826487}{%
|
||||||
|
family={Lu},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Xin},
|
||||||
|
giveni={X\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=b8739ebbf3f871d471b6720a02ed541f}{%
|
||||||
|
family={Qiu},
|
||||||
|
familyi={Q\bibinitperiod},
|
||||||
|
given={Jing},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=0747ee8b32f11d5702821c50e1913c80}{%
|
||||||
|
family={Lei},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Gang},
|
||||||
|
giveni={G\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=182338f1495a183751feca529c98f0b8}{%
|
||||||
|
family={Zhu},
|
||||||
|
familyi={Z\bibinitperiod},
|
||||||
|
given={Jianguo},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{81685244731524c537a08650b8b97caa}
|
||||||
|
\strng{fullhash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{bibnamehash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{authorbibnamehash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{authornamehash}{81685244731524c537a08650b8b97caa}
|
||||||
|
\strng{authorfullhash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\field{sortinit}{L}
|
||||||
|
\field{sortinithash}{7c47d417cecb1f4bd38d1825c427a61a}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Electricity prices in spot markets are volatile and can be affected by various factors, such as generation and demand, system contingencies, local weather patterns, bidding strategies of market participants, and uncertain renewable energy outputs. Because of these factors, electricity price forecasting is challenging. This paper proposes a scenario modeling approach to improve forecasting accuracy, conditioning time series generative adversarial networks on external factors. After data pre-processing and condition selection, a conditional {TSGAN} or {CTSGAN} is designed to forecast electricity prices. Wasserstein Distance, weights limitation, and {RMSProp} optimizer are used to ensure that the {CTGAN} training process is stable. By changing the dimensionality of random noise input, the point forecasting model can be transformed into a probabilistic forecasting model. For electricity price point forecasting, the proposed {CTSGAN} model has better accuracy and has better generalization ability than the {TSGAN} and other deep learning methods. For probabilistic forecasting, the proposed {CTSGAN} model can significantly improve the continuously ranked probability score and Winkler score. The effectiveness and superiority of the proposed {CTSGAN} forecasting model are verified by case studies.}
|
||||||
|
\field{day}{15}
|
||||||
|
\field{issn}{0306-2619}
|
||||||
|
\field{journaltitle}{Applied Energy}
|
||||||
|
\field{month}{2}
|
||||||
|
\field{shortjournal}{Applied Energy}
|
||||||
|
\field{shorttitle}{Scenarios modelling for forecasting day-ahead electricity prices}
|
||||||
|
\field{title}{Scenarios modelling for forecasting day-ahead electricity prices: Case studies in Australia}
|
||||||
|
\field{urlday}{13}
|
||||||
|
\field{urlmonth}{10}
|
||||||
|
\field{urlyear}{2023}
|
||||||
|
\field{volume}{308}
|
||||||
|
\field{year}{2022}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{118296}
|
||||||
|
\range{pages}{1}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.apenergy.2021.118296
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:/Users/victormylle/Zotero/storage/3XL3T253/Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/9K2RFGGU/S0306261921015555.html:text/html
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0306261921015555
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0306261921015555
|
||||||
|
\endverb
|
||||||
|
\keyw{Generative adversarial networks,Conditions,Electricity Price,Point forecasting,Probabilistic forecasting}
|
||||||
|
\endentry
|
||||||
|
\entry{poggi_electricity_2023}{article}{}
|
||||||
|
\name{author}{3}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=d5449fc584ab2f2182b0b791e9e2524e}{%
|
||||||
|
family={Poggi},
|
||||||
|
familyi={P\bibinitperiod},
|
||||||
|
given={Aurora},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=2263c4fd4598d3f295a8f0f20909866b}{%
|
||||||
|
family={Di\bibnamedelima Persio},
|
||||||
|
familyi={D\bibinitperiod\bibinitdelim P\bibinitperiod},
|
||||||
|
given={Luca},
|
||||||
|
giveni={L\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=06162f840633e2adae140efa3535ba7a}{%
|
||||||
|
family={Ehrhardt},
|
||||||
|
familyi={E\bibinitperiod},
|
||||||
|
given={Matthias},
|
||||||
|
giveni={M\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{29cf9f4a8a7a7514cf77120f6bd399d1}
|
||||||
|
\strng{fullhash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{bibnamehash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{authorbibnamehash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{authornamehash}{29cf9f4a8a7a7514cf77120f6bd399d1}
|
||||||
|
\strng{authorfullhash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\field{sortinit}{P}
|
||||||
|
\field{sortinithash}{ff3bcf24f47321b42cb156c2cc8a8422}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Our research involves analyzing the latest models used for electricity price forecasting, which include both traditional inferential statistical methods and newer deep learning techniques. Through our analysis of historical data and the use of multiple weekday dummies, we have proposed an innovative solution for forecasting electricity spot prices. This solution involves breaking down the spot price series into two components: a seasonal trend component and a stochastic component. By utilizing this approach, we are able to provide highly accurate predictions for all considered time frames.}
|
||||||
|
\field{issn}{2673-9909}
|
||||||
|
\field{journaltitle}{{AppliedMath}}
|
||||||
|
\field{langid}{english}
|
||||||
|
\field{month}{6}
|
||||||
|
\field{note}{Number: 2 Publisher: Multidisciplinary Digital Publishing Institute}
|
||||||
|
\field{number}{2}
|
||||||
|
\field{shorttitle}{Electricity Price Forecasting via Statistical and Deep Learning Approaches}
|
||||||
|
\field{title}{Electricity Price Forecasting via Statistical and Deep Learning Approaches: The German Case}
|
||||||
|
\field{urlday}{2}
|
||||||
|
\field{urlmonth}{5}
|
||||||
|
\field{urlyear}{2024}
|
||||||
|
\field{volume}{3}
|
||||||
|
\field{year}{2023}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{316\bibrangedash 342}
|
||||||
|
\range{pages}{27}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.3390/appliedmath3020018
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Full Text PDF:/Users/victormylle/Zotero/storage/3IR29RU3/Poggi et al. - 2023 - Electricity Price Forecasting via Statistical and .pdf:application/pdf
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.mdpi.com/2673-9909/3/2/18
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.mdpi.com/2673-9909/3/2/18
|
||||||
|
\endverb
|
||||||
|
\keyw{autoregressive,deep learning,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
|
||||||
|
\endentry
|
||||||
|
\entry{weron_electricity_2014}{article}{}
|
||||||
|
\name{author}{1}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=e1f0ef6630db56bf45bb6ba2fbf8f108}{%
|
||||||
|
family={Weron},
|
||||||
|
familyi={W\bibinitperiod},
|
||||||
|
given={Rafał},
|
||||||
|
giveni={R\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{fullhash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{bibnamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authorbibnamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authornamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authorfullhash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\field{sortinit}{W}
|
||||||
|
\field{sortinithash}{4315d78024d0cea9b57a0c6f0e35ed0d}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{A variety of methods and ideas have been tried for electricity price forecasting ({EPF}) over the last 15 years, with varying degrees of success. This review article aims to explain the complexity of available solutions, their strengths and weaknesses, and the opportunities and threats that the forecasting tools offer or that may be encountered. The paper also looks ahead and speculates on the directions {EPF} will or should take in the next decade or so. In particular, it postulates the need for objective comparative {EPF} studies involving (i) the same datasets, (ii) the same robust error evaluation procedures, and (iii) statistical testing of the significance of one model’s outperformance of another.}
|
||||||
|
\field{day}{1}
|
||||||
|
\field{issn}{0169-2070}
|
||||||
|
\field{journaltitle}{International Journal of Forecasting}
|
||||||
|
\field{month}{10}
|
||||||
|
\field{number}{4}
|
||||||
|
\field{shortjournal}{International Journal of Forecasting}
|
||||||
|
\field{shorttitle}{Electricity price forecasting}
|
||||||
|
\field{title}{Electricity price forecasting: A review of the state-of-the-art with a look into the future}
|
||||||
|
\field{urlday}{2}
|
||||||
|
\field{urlmonth}{5}
|
||||||
|
\field{urlyear}{2024}
|
||||||
|
\field{volume}{30}
|
||||||
|
\field{year}{2014}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{1030\bibrangedash 1081}
|
||||||
|
\range{pages}{52}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.ijforecast.2014.08.008
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/DDGF263F/S0169207014001083.html:text/html
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
|
||||||
|
\endverb
|
||||||
|
\keyw{Autoregression,Day-ahead market,Electricity price forecasting,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
|
||||||
|
\endentry
|
||||||
|
\enddatalist
|
||||||
|
\datalist[entry]{nyt/global//global/global}
|
||||||
|
\entry{dumas_deep_2022}{article}{}
|
||||||
|
\name{author}{5}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
|
||||||
|
family={Dumas},
|
||||||
|
familyi={D\bibinitperiod},
|
||||||
|
given={Jonathan},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=8de9ff5d0722fc618c35312130499a63}{%
|
||||||
|
family={Wehenkel},
|
||||||
|
familyi={W\bibinitperiod},
|
||||||
|
given={Antoine},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=26d23470f883355413d99765a97d26f9}{%
|
||||||
|
family={Lanaspeze},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Damien},
|
||||||
|
giveni={D\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=62ac593786e28c2e56fab28d272346bf}{%
|
||||||
|
family={Cornélusse},
|
||||||
|
familyi={C\bibinitperiod},
|
||||||
|
given={Bertrand},
|
||||||
|
giveni={B\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=2b5aabf60e51678797e395af8c441fd7}{%
|
||||||
|
family={Sutera},
|
||||||
|
familyi={S\bibinitperiod},
|
||||||
|
given={Antonio},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{cac0f5a33afb491da6830cd9b00071a2}
|
||||||
|
\strng{fullhash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{bibnamehash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{authorbibnamehash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
|
||||||
|
\strng{authorfullhash}{176facf650dcf7220eec24af7e81dc53}
|
||||||
|
\field{sortinit}{D}
|
||||||
|
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Greater direct electrification of end-use sectors with a higher share of renewables is one of the pillars to power a carbon-neutral society by 2050. However, in contrast to conventional power plants, renewable energy is subject to uncertainty raising challenges for their interaction with power systems. Scenario-based probabilistic forecasting models have become a vital tool to equip decision-makers. This paper presents to the power systems forecasting practitioners a recent deep learning technique, the normalizing flows, to produce accurate scenario-based probabilistic forecasts that are crucial to face the new challenges in power systems applications. The strength of this technique is to directly learn the stochastic multivariate distribution of the underlying process by maximizing the likelihood. Through comprehensive empirical evaluations using the open data of the Global Energy Forecasting Competition 2014, we demonstrate that this methodology is competitive with other state-of-the-art deep learning generative models: generative adversarial networks and variational autoencoders. The models producing weather-based wind, solar power, and load scenarios are properly compared in terms of forecast value by considering the case study of an energy retailer and quality using several complementary metrics. The numerical experiments are simple and easily reproducible. Thus, we hope it will encourage other forecasting practitioners to test and use normalizing flows in power system applications such as bidding on electricity markets, scheduling power systems with high renewable energy sources penetration, energy management of virtual power plan or microgrids, and unit commitment.}
|
||||||
|
\field{issn}{03062619}
|
||||||
|
\field{journaltitle}{Applied Energy}
|
||||||
|
\field{langid}{english}
|
||||||
|
\field{month}{1}
|
||||||
|
\field{shortjournal}{Applied Energy}
|
||||||
|
\field{shorttitle}{A deep generative model for probabilistic energy forecasting in power systems}
|
||||||
|
\field{title}{A deep generative model for probabilistic energy forecasting in power systems: normalizing flows}
|
||||||
|
\field{urlday}{11}
|
||||||
|
\field{urlmonth}{10}
|
||||||
|
\field{urlyear}{2023}
|
||||||
|
\field{volume}{305}
|
||||||
|
\field{year}{2022}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{117871}
|
||||||
|
\range{pages}{1}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.apenergy.2021.117871
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:/Users/victormylle/Zotero/storage/3CW249QI/Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:application/pdf
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
|
||||||
|
\endverb
|
||||||
|
\endentry
|
||||||
|
\entry{lu_scenarios_2022}{article}{}
|
||||||
|
\name{author}{4}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=e20b6fceb410a42e1abe17804a826487}{%
|
||||||
|
family={Lu},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Xin},
|
||||||
|
giveni={X\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=b8739ebbf3f871d471b6720a02ed541f}{%
|
||||||
|
family={Qiu},
|
||||||
|
familyi={Q\bibinitperiod},
|
||||||
|
given={Jing},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=0747ee8b32f11d5702821c50e1913c80}{%
|
||||||
|
family={Lei},
|
||||||
|
familyi={L\bibinitperiod},
|
||||||
|
given={Gang},
|
||||||
|
giveni={G\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=182338f1495a183751feca529c98f0b8}{%
|
||||||
|
family={Zhu},
|
||||||
|
familyi={Z\bibinitperiod},
|
||||||
|
given={Jianguo},
|
||||||
|
giveni={J\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{81685244731524c537a08650b8b97caa}
|
||||||
|
\strng{fullhash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{bibnamehash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{authorbibnamehash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\strng{authornamehash}{81685244731524c537a08650b8b97caa}
|
||||||
|
\strng{authorfullhash}{eb24983b890cffceb6c682e309bc533b}
|
||||||
|
\field{sortinit}{L}
|
||||||
|
\field{sortinithash}{7c47d417cecb1f4bd38d1825c427a61a}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Electricity prices in spot markets are volatile and can be affected by various factors, such as generation and demand, system contingencies, local weather patterns, bidding strategies of market participants, and uncertain renewable energy outputs. Because of these factors, electricity price forecasting is challenging. This paper proposes a scenario modeling approach to improve forecasting accuracy, conditioning time series generative adversarial networks on external factors. After data pre-processing and condition selection, a conditional {TSGAN} or {CTSGAN} is designed to forecast electricity prices. Wasserstein Distance, weights limitation, and {RMSProp} optimizer are used to ensure that the {CTGAN} training process is stable. By changing the dimensionality of random noise input, the point forecasting model can be transformed into a probabilistic forecasting model. For electricity price point forecasting, the proposed {CTSGAN} model has better accuracy and has better generalization ability than the {TSGAN} and other deep learning methods. For probabilistic forecasting, the proposed {CTSGAN} model can significantly improve the continuously ranked probability score and Winkler score. The effectiveness and superiority of the proposed {CTSGAN} forecasting model are verified by case studies.}
|
||||||
|
\field{day}{15}
|
||||||
|
\field{issn}{0306-2619}
|
||||||
|
\field{journaltitle}{Applied Energy}
|
||||||
|
\field{month}{2}
|
||||||
|
\field{shortjournal}{Applied Energy}
|
||||||
|
\field{shorttitle}{Scenarios modelling for forecasting day-ahead electricity prices}
|
||||||
|
\field{title}{Scenarios modelling for forecasting day-ahead electricity prices: Case studies in Australia}
|
||||||
|
\field{urlday}{13}
|
||||||
|
\field{urlmonth}{10}
|
||||||
|
\field{urlyear}{2023}
|
||||||
|
\field{volume}{308}
|
||||||
|
\field{year}{2022}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{118296}
|
||||||
|
\range{pages}{1}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.apenergy.2021.118296
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:/Users/victormylle/Zotero/storage/3XL3T253/Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/9K2RFGGU/S0306261921015555.html:text/html
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0306261921015555
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0306261921015555
|
||||||
|
\endverb
|
||||||
|
\keyw{Generative adversarial networks,Conditions,Electricity Price,Point forecasting,Probabilistic forecasting}
|
||||||
|
\endentry
|
||||||
|
\entry{poggi_electricity_2023}{article}{}
|
||||||
|
\name{author}{3}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=d5449fc584ab2f2182b0b791e9e2524e}{%
|
||||||
|
family={Poggi},
|
||||||
|
familyi={P\bibinitperiod},
|
||||||
|
given={Aurora},
|
||||||
|
giveni={A\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=2263c4fd4598d3f295a8f0f20909866b}{%
|
||||||
|
family={Di\bibnamedelima Persio},
|
||||||
|
familyi={D\bibinitperiod\bibinitdelim P\bibinitperiod},
|
||||||
|
given={Luca},
|
||||||
|
giveni={L\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
{{un=0,uniquepart=base,hash=06162f840633e2adae140efa3535ba7a}{%
|
||||||
|
family={Ehrhardt},
|
||||||
|
familyi={E\bibinitperiod},
|
||||||
|
given={Matthias},
|
||||||
|
giveni={M\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{29cf9f4a8a7a7514cf77120f6bd399d1}
|
||||||
|
\strng{fullhash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{bibnamehash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{authorbibnamehash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\strng{authornamehash}{29cf9f4a8a7a7514cf77120f6bd399d1}
|
||||||
|
\strng{authorfullhash}{7f87567729d3046538f23b0d733313fe}
|
||||||
|
\field{sortinit}{P}
|
||||||
|
\field{sortinithash}{ff3bcf24f47321b42cb156c2cc8a8422}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{Our research involves analyzing the latest models used for electricity price forecasting, which include both traditional inferential statistical methods and newer deep learning techniques. Through our analysis of historical data and the use of multiple weekday dummies, we have proposed an innovative solution for forecasting electricity spot prices. This solution involves breaking down the spot price series into two components: a seasonal trend component and a stochastic component. By utilizing this approach, we are able to provide highly accurate predictions for all considered time frames.}
|
||||||
|
\field{issn}{2673-9909}
|
||||||
|
\field{journaltitle}{{AppliedMath}}
|
||||||
|
\field{langid}{english}
|
||||||
|
\field{month}{6}
|
||||||
|
\field{note}{Number: 2 Publisher: Multidisciplinary Digital Publishing Institute}
|
||||||
|
\field{number}{2}
|
||||||
|
\field{shorttitle}{Electricity Price Forecasting via Statistical and Deep Learning Approaches}
|
||||||
|
\field{title}{Electricity Price Forecasting via Statistical and Deep Learning Approaches: The German Case}
|
||||||
|
\field{urlday}{2}
|
||||||
|
\field{urlmonth}{5}
|
||||||
|
\field{urlyear}{2024}
|
||||||
|
\field{volume}{3}
|
||||||
|
\field{year}{2023}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{316\bibrangedash 342}
|
||||||
|
\range{pages}{27}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.3390/appliedmath3020018
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb Full Text PDF:/Users/victormylle/Zotero/storage/3IR29RU3/Poggi et al. - 2023 - Electricity Price Forecasting via Statistical and .pdf:application/pdf
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.mdpi.com/2673-9909/3/2/18
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.mdpi.com/2673-9909/3/2/18
|
||||||
|
\endverb
|
||||||
|
\keyw{autoregressive,deep learning,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
|
||||||
|
\endentry
|
||||||
|
\entry{weron_electricity_2014}{article}{}
|
||||||
|
\name{author}{1}{}{%
|
||||||
|
{{un=0,uniquepart=base,hash=e1f0ef6630db56bf45bb6ba2fbf8f108}{%
|
||||||
|
family={Weron},
|
||||||
|
familyi={W\bibinitperiod},
|
||||||
|
given={Rafał},
|
||||||
|
giveni={R\bibinitperiod},
|
||||||
|
givenun=0}}%
|
||||||
|
}
|
||||||
|
\strng{namehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{fullhash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{bibnamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authorbibnamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authornamehash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\strng{authorfullhash}{e1f0ef6630db56bf45bb6ba2fbf8f108}
|
||||||
|
\field{sortinit}{W}
|
||||||
|
\field{sortinithash}{4315d78024d0cea9b57a0c6f0e35ed0d}
|
||||||
|
\field{extradatescope}{labelyear}
|
||||||
|
\field{labeldatesource}{}
|
||||||
|
\true{uniqueprimaryauthor}
|
||||||
|
\field{labelnamesource}{author}
|
||||||
|
\field{labeltitlesource}{shorttitle}
|
||||||
|
\field{abstract}{A variety of methods and ideas have been tried for electricity price forecasting ({EPF}) over the last 15 years, with varying degrees of success. This review article aims to explain the complexity of available solutions, their strengths and weaknesses, and the opportunities and threats that the forecasting tools offer or that may be encountered. The paper also looks ahead and speculates on the directions {EPF} will or should take in the next decade or so. In particular, it postulates the need for objective comparative {EPF} studies involving (i) the same datasets, (ii) the same robust error evaluation procedures, and (iii) statistical testing of the significance of one model’s outperformance of another.}
|
||||||
|
\field{day}{1}
|
||||||
|
\field{issn}{0169-2070}
|
||||||
|
\field{journaltitle}{International Journal of Forecasting}
|
||||||
|
\field{month}{10}
|
||||||
|
\field{number}{4}
|
||||||
|
\field{shortjournal}{International Journal of Forecasting}
|
||||||
|
\field{shorttitle}{Electricity price forecasting}
|
||||||
|
\field{title}{Electricity price forecasting: A review of the state-of-the-art with a look into the future}
|
||||||
|
\field{urlday}{2}
|
||||||
|
\field{urlmonth}{5}
|
||||||
|
\field{urlyear}{2024}
|
||||||
|
\field{volume}{30}
|
||||||
|
\field{year}{2014}
|
||||||
|
\field{dateera}{ce}
|
||||||
|
\field{urldateera}{ce}
|
||||||
|
\field{pages}{1030\bibrangedash 1081}
|
||||||
|
\range{pages}{52}
|
||||||
|
\verb{doi}
|
||||||
|
\verb 10.1016/j.ijforecast.2014.08.008
|
||||||
|
\endverb
|
||||||
|
\verb{file}
|
||||||
|
\verb ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/DDGF263F/S0169207014001083.html:text/html
|
||||||
|
\endverb
|
||||||
|
\verb{urlraw}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
|
||||||
|
\endverb
|
||||||
|
\verb{url}
|
||||||
|
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
|
||||||
|
\endverb
|
||||||
|
\keyw{Autoregression,Day-ahead market,Electricity price forecasting,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
|
||||||
|
\endentry
|
||||||
|
\enddatalist
|
||||||
|
\endrefsection
|
||||||
|
\endinput
|
||||||
|
|
||||||
|
|||||||
@@ -2829,8 +2829,12 @@
|
|||||||
<bcf:datasource type="file" datatype="bibtex" glob="false">./references.bib</bcf:datasource>
|
<bcf:datasource type="file" datatype="bibtex" glob="false">./references.bib</bcf:datasource>
|
||||||
</bcf:bibdata>
|
</bcf:bibdata>
|
||||||
<bcf:section number="0">
|
<bcf:section number="0">
|
||||||
<bcf:citekey order="1" intorder="1">ho2020denoising</bcf:citekey>
|
<bcf:citekey order="1" intorder="1">weron_electricity_2014</bcf:citekey>
|
||||||
<bcf:citekey order="2" intorder="1">ho2020denoising</bcf:citekey>
|
<bcf:citekey order="2" intorder="1">poggi_electricity_2023</bcf:citekey>
|
||||||
|
<bcf:citekey order="3" intorder="1">lu_scenarios_2022</bcf:citekey>
|
||||||
|
<bcf:citekey order="4" intorder="1">dumas_deep_2022</bcf:citekey>
|
||||||
|
<bcf:citekey order="5" intorder="1">rasul_autoregressive_2021</bcf:citekey>
|
||||||
|
<bcf:citekey order="6" intorder="1">dumas_deep_2022</bcf:citekey>
|
||||||
</bcf:section>
|
</bcf:section>
|
||||||
<!-- SORTING TEMPLATES -->
|
<!-- SORTING TEMPLATES -->
|
||||||
<bcf:sortingtemplate name="nyt">
|
<bcf:sortingtemplate name="nyt">
|
||||||
@@ -2871,4 +2875,13 @@
|
|||||||
uniquenametemplatename="global"
|
uniquenametemplatename="global"
|
||||||
labelalphanametemplatename="global">
|
labelalphanametemplatename="global">
|
||||||
</bcf:datalist>
|
</bcf:datalist>
|
||||||
|
<bcf:datalist section="0"
|
||||||
|
name="nyt/global//global/global"
|
||||||
|
type="entry"
|
||||||
|
sortingtemplatename="nyt"
|
||||||
|
sortingnamekeytemplatename="global"
|
||||||
|
labelprefix=""
|
||||||
|
uniquenametemplatename="global"
|
||||||
|
labelalphanametemplatename="global">
|
||||||
|
</bcf:datalist>
|
||||||
</bcf:controlfile>
|
</bcf:controlfile>
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 25 APR 2024 14:08
|
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 3 MAY 2024 17:59
|
||||||
entering extended mode
|
entering extended mode
|
||||||
restricted \write18 enabled.
|
restricted \write18 enabled.
|
||||||
file:line:error style messages enabled.
|
file:line:error style messages enabled.
|
||||||
@@ -857,7 +857,7 @@ File: T1LinuxLibertineT-TLF.fd 2017/03/20 (autoinst) Font definitions for T1/Lin
|
|||||||
)
|
)
|
||||||
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
||||||
(Font) scaled to size 12.0pt on input line 127.
|
(Font) scaled to size 12.0pt on input line 127.
|
||||||
(./verslag.aux (./sections/introduction.aux) (./sections/background.aux) (./sections/literature_study.aux))
|
(./verslag.aux (./sections/introduction.aux) (./sections/background.aux) (./sections/policies.aux) (./sections/literature_study.aux))
|
||||||
\openout1 = `verslag.aux'.
|
\openout1 = `verslag.aux'.
|
||||||
|
|
||||||
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 127.
|
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 127.
|
||||||
@@ -1029,7 +1029,7 @@ LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
|
|||||||
|
|
||||||
|
|
||||||
pdfTeX warning: pdflatex (file ./ea-en.pdf): PDF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
|
pdfTeX warning: pdflatex (file ./ea-en.pdf): PDF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
|
||||||
<ea-en.pdf, id=92, 192.05753pt x 64.01918pt>
|
<ea-en.pdf, id=124, 192.05753pt x 64.01918pt>
|
||||||
File: ea-en.pdf Graphic file (type pdf)
|
File: ea-en.pdf Graphic file (type pdf)
|
||||||
<use ea-en.pdf>
|
<use ea-en.pdf>
|
||||||
Package pdftex.def Info: ea-en.pdf used on input line 135.
|
Package pdftex.def Info: ea-en.pdf used on input line 135.
|
||||||
@@ -1105,7 +1105,7 @@ Overfull \hbox (33.0pt too wide) in paragraph at lines 135--135
|
|||||||
|
|
||||||
|
|
||||||
pdfTeX warning: pdflatex (file ./ugent-en.pdf): PDF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
|
pdfTeX warning: pdflatex (file ./ugent-en.pdf): PDF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
|
||||||
<ugent-en.pdf, id=93, 106.69862pt x 85.3589pt>
|
<ugent-en.pdf, id=125, 106.69862pt x 85.3589pt>
|
||||||
File: ugent-en.pdf Graphic file (type pdf)
|
File: ugent-en.pdf Graphic file (type pdf)
|
||||||
<use ugent-en.pdf>
|
<use ugent-en.pdf>
|
||||||
Package pdftex.def Info: ugent-en.pdf used on input line 135.
|
Package pdftex.def Info: ugent-en.pdf used on input line 135.
|
||||||
@@ -1138,420 +1138,301 @@ l.143 \newpage
|
|||||||
]
|
]
|
||||||
\openout2 = `sections/background.aux'.
|
\openout2 = `sections/background.aux'.
|
||||||
|
|
||||||
(./sections/background.tex
|
(./sections/background.tex [3
|
||||||
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
|
|
||||||
(Font) scaled to size 14.4pt on input line 9.
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 37--44
|
|
||||||
|
|
||||||
|
] [4]
|
||||||
|
Underfull \hbox (badness 10000) in paragraph at lines 75--80
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 37--44
|
Underfull \hbox (badness 10000) in paragraph at lines 75--80
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 37--44
|
Overfull \hbox (4.77582pt too wide) in paragraph at lines 92--94
|
||||||
|
[]\T1/LinuxLibertineT-TLF/m/n/12 There ex-ist many dif-fer-ent types of gen-er-a-tive mod-els. Some of the most pop-u-lar ones are:
|
||||||
[]
|
[]
|
||||||
|
|
||||||
[3
|
LaTeX Font Info: Trying to load font information for TS1+LinuxLibertineT-TLF on input line 94.
|
||||||
|
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertine/TS1LinuxLibertineT-TLF.fd
|
||||||
|
|
||||||
|
|
||||||
]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 45--53
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 45--53
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 54--58
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[4]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 76--81
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 76--81
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 87--95
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 87--95
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 87--95
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Overfull \hbox (4.77582pt too wide) in paragraph at lines 87--95
|
|
||||||
\T1/LinuxLibertineT-TLF/m/n/12 There ex-ist many dif-fer-ent types of gen-er-a-tive mod-els. Some of the most pop-u-lar ones are:
|
|
||||||
[]
|
|
||||||
|
|
||||||
[5{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMI.enc}]
|
|
||||||
LaTeX Font Info: Trying to load font information for TS1+LinuxLibertineT-TLF on input line 95.
|
|
||||||
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertine/TS1LinuxLibertineT-TLF.fd
|
|
||||||
File: TS1LinuxLibertineT-TLF.fd 2017/03/20 (autoinst) Font definitions for TS1/LinuxLibertineT-TLF.
|
File: TS1LinuxLibertineT-TLF.fd 2017/03/20 (autoinst) Font definitions for TS1/LinuxLibertineT-TLF.
|
||||||
)
|
)
|
||||||
LaTeX Font Info: Font shape `TS1/LinuxLibertineT-TLF/m/n' will be
|
LaTeX Font Info: Font shape `TS1/LinuxLibertineT-TLF/m/n' will be
|
||||||
(Font) scaled to size 12.0pt on input line 95.
|
(Font) scaled to size 12.0pt on input line 94.
|
||||||
|
[5{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMI.enc}]
|
||||||
|
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
|
||||||
|
(Font) scaled to size 14.4pt on input line 100.
|
||||||
|
<images/quantile_regression/cdf_quantiles_example.png, id=197, 722.7pt x 433.62pt>
|
||||||
|
File: images/quantile_regression/cdf_quantiles_example.png Graphic file (type png)
|
||||||
|
<use images/quantile_regression/cdf_quantiles_example.png>
|
||||||
|
Package pdftex.def Info: images/quantile_regression/cdf_quantiles_example.png used on input line 107.
|
||||||
|
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
|
||||||
|
[6{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_naooyc.enc} <./images/quantile_regression/cdf_quantiles_example.png>]
|
||||||
|
<images/quantile_regression/reconstructed_cdf.png, id=209, 722.7pt x 433.62pt>
|
||||||
|
File: images/quantile_regression/reconstructed_cdf.png Graphic file (type png)
|
||||||
|
<use images/quantile_regression/reconstructed_cdf.png>
|
||||||
|
Package pdftex.def Info: images/quantile_regression/reconstructed_cdf.png used on input line 117.
|
||||||
|
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
|
||||||
|
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
||||||
|
(Font) scaled to size 8.0pt on input line 127.
|
||||||
|
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
||||||
|
(Font) scaled to size 6.0pt on input line 127.
|
||||||
|
[7{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusEX.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusSYM.enc} <./images/quantile_regression/reconstructed_cdf.png>] [8] [9]
|
||||||
|
<images/quantile_regression/rnn/RNN_diagram.png, id=230, 753.81625pt x 324.21124pt>
|
||||||
|
File: images/quantile_regression/rnn/RNN_diagram.png Graphic file (type png)
|
||||||
|
<use images/quantile_regression/rnn/RNN_diagram.png>
|
||||||
|
Package pdftex.def Info: images/quantile_regression/rnn/RNN_diagram.png used on input line 204.
|
||||||
|
(pdftex.def) Requested size: 364.19667pt x 156.63872pt.
|
||||||
|
[10] [11 <./images/quantile_regression/rnn/RNN_diagram.png>]
|
||||||
|
<images/diffusion/Generation-with-Diffusion-Models.png, id=242, 926.00159pt x 228.96758pt>
|
||||||
|
File: images/diffusion/Generation-with-Diffusion-Models.png Graphic file (type png)
|
||||||
|
<use images/diffusion/Generation-with-Diffusion-Models.png>
|
||||||
|
Package pdftex.def Info: images/diffusion/Generation-with-Diffusion-Models.png used on input line 232.
|
||||||
|
(pdftex.def) Requested size: 364.19667pt x 90.05513pt.
|
||||||
|
[12 <./images/diffusion/Generation-with-Diffusion-Models.png>] [13{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusBMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusBB.enc}]
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 101--104
|
Package caption Warning: \label without proper reference on input line 279.
|
||||||
|
See the caption package documentation for explanation.
|
||||||
[]
|
|
||||||
|
|
||||||
[6{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_naooyc.enc}]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 112--115
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 120--125
|
LaTeX Warning: Reference `fig:diffusion_process' on page 14 undefined on input line 272.
|
||||||
|
|
||||||
[]
|
<images/diffusion/diffusion_graphical_model.png, id=257, 979.66pt x 185.69376pt>
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 120--125
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 126--131
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 126--131
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[7{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusBMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusEX.enc}]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 132--135
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
<images/diffusion/diffusion_graphical_model.png, id=171, 979.66pt x 185.69376pt>
|
|
||||||
File: images/diffusion/diffusion_graphical_model.png Graphic file (type png)
|
File: images/diffusion/diffusion_graphical_model.png Graphic file (type png)
|
||||||
<use images/diffusion/diffusion_graphical_model.png>
|
<use images/diffusion/diffusion_graphical_model.png>
|
||||||
Package pdftex.def Info: images/diffusion/diffusion_graphical_model.png used on input line 145.
|
Package pdftex.def Info: images/diffusion/diffusion_graphical_model.png used on input line 276.
|
||||||
(pdftex.def) Requested size: 364.19667pt x 69.03145pt.
|
(pdftex.def) Requested size: 364.19667pt x 69.03145pt.
|
||||||
|
[14 <./images/diffusion/diffusion_graphical_model.png>]
|
||||||
|
LaTeX Font Info: Trying to load font information for U+bbm on input line 305.
|
||||||
|
(/usr/local/texlive/2023/texmf-dist/tex/latex/bbm-macros/ubbm.fd
|
||||||
|
File: ubbm.fd 1999/03/15 V 1.2 Font definition for bbm font - TH
|
||||||
|
)
|
||||||
|
<images/quantile_regression/crps_visualization.png, id=267, 611.4042pt x 395.3169pt>
|
||||||
|
File: images/quantile_regression/crps_visualization.png Graphic file (type png)
|
||||||
|
<use images/quantile_regression/crps_visualization.png>
|
||||||
|
Package pdftex.def Info: images/quantile_regression/crps_visualization.png used on input line 323.
|
||||||
|
(pdftex.def) Requested size: 364.19667pt x 235.4849pt.
|
||||||
|
) [15 <./images/quantile_regression/crps_visualization.png>]
|
||||||
|
\openout2 = `sections/policies.aux'.
|
||||||
|
|
||||||
LaTeX Warning: Citation 'ho2020denoising' on page 8 undefined on input line 147.
|
(./sections/policies.tex) [16
|
||||||
|
|
||||||
|
|
||||||
LaTeX Warning: Citation 'ho2020denoising' on page 8 undefined on input line 147.
|
|
||||||
|
|
||||||
) [8 <./images/diffusion/diffusion_graphical_model.png>]
|
] [17]
|
||||||
\openout2 = `sections/literature_study.aux'.
|
\openout2 = `sections/literature_study.aux'.
|
||||||
|
|
||||||
(./sections/literature_study.tex) [9
|
(./sections/literature_study.tex
|
||||||
|
|
||||||
|
LaTeX Warning: Citation 'rasul_autoregressive_2021' on page 18 undefined on input line 9.
|
||||||
|
|
||||||
|
[18
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
] (./sections/nrv_prediction.tex [10
|
]) [19] (./sections/nrv_prediction.tex [20
|
||||||
|
|
||||||
|
|
||||||
]
|
]
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 45--48
|
Overfull \hbox (23.14783pt too wide) in paragraph at lines 50--66
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[11]
|
|
||||||
<images/quantile_regression/cdf_quantiles_example.png, id=192, 722.7pt x 433.62pt>
|
|
||||||
File: images/quantile_regression/cdf_quantiles_example.png Graphic file (type png)
|
|
||||||
<use images/quantile_regression/cdf_quantiles_example.png>
|
|
||||||
Package pdftex.def Info: images/quantile_regression/cdf_quantiles_example.png used on input line 51.
|
|
||||||
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
|
|
||||||
<images/quantile_regression/reconstructed_cdf.png, id=194, 722.7pt x 433.62pt>
|
|
||||||
File: images/quantile_regression/reconstructed_cdf.png Graphic file (type png)
|
|
||||||
<use images/quantile_regression/reconstructed_cdf.png>
|
|
||||||
Package pdftex.def Info: images/quantile_regression/reconstructed_cdf.png used on input line 61.
|
|
||||||
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
|
|
||||||
[12 <./images/quantile_regression/cdf_quantiles_example.png>]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 67--73
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 67--73
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 67--73
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 67--73
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[13 <./images/quantile_regression/reconstructed_cdf.png>]
|
|
||||||
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
|
||||||
(Font) scaled to size 8.0pt on input line 77.
|
|
||||||
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
|
||||||
(Font) scaled to size 6.0pt on input line 77.
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 86--90
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 106--109
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[14{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusSYM.enc}]
|
|
||||||
LaTeX Font Info: Trying to load font information for U+bbm on input line 128.
|
|
||||||
(/usr/local/texlive/2023/texmf-dist/tex/latex/bbm-macros/ubbm.fd
|
|
||||||
File: ubbm.fd 1999/03/15 V 1.2 Font definition for bbm font - TH
|
|
||||||
) [15]
|
|
||||||
<images/quantile_regression/crps_visualization.png, id=224, 611.4042pt x 395.3169pt>
|
|
||||||
File: images/quantile_regression/crps_visualization.png Graphic file (type png)
|
|
||||||
<use images/quantile_regression/crps_visualization.png>
|
|
||||||
Package pdftex.def Info: images/quantile_regression/crps_visualization.png used on input line 146.
|
|
||||||
(pdftex.def) Requested size: 364.19667pt x 235.4849pt.
|
|
||||||
[16 <./images/quantile_regression/crps_visualization.png>]
|
|
||||||
Overfull \hbox (23.14783pt too wide) in paragraph at lines 172--188
|
|
||||||
[][]
|
[][]
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 167--191
|
Underfull \hbox (badness 10000) in paragraph at lines 45--69
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
[21]
|
||||||
|
<images/quantile_regression/nrv_mean_std_over_quarter.png, id=313, 722.7pt x 433.62pt>
|
||||||
|
File: images/quantile_regression/nrv_mean_std_over_quarter.png Graphic file (type png)
|
||||||
|
<use images/quantile_regression/nrv_mean_std_over_quarter.png>
|
||||||
|
Package pdftex.def Info: images/quantile_regression/nrv_mean_std_over_quarter.png used on input line 74.
|
||||||
|
(pdftex.def) Requested size: 455.24411pt x 273.14896pt.
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 192--195
|
Overfull \hbox (23.14783pt too wide) in paragraph at lines 91--109
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[17]
|
|
||||||
Overfull \hbox (23.14783pt too wide) in paragraph at lines 206--224
|
|
||||||
[][]
|
[][]
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 200--227
|
Underfull \hbox (badness 10000) in paragraph at lines 85--112
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
[18]
|
[22]
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=248, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=323, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 234.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 119.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
|
||||||
(Font) scaled to size 10.95pt on input line 235.
|
(Font) scaled to size 10.95pt on input line 120.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=249, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=324, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 240.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 125.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=250, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=325, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 246.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 131.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=251, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=326, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 252.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 137.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 260--266
|
Underfull \hbox (badness 10000) in paragraph at lines 145--151
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 260--266
|
Underfull \hbox (badness 10000) in paragraph at lines 145--151
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
[19 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>]
|
[23 <./images/quantile_regression/nrv_mean_std_over_quarter.png>]
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png, id=265, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png, id=338, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png used on input line 292.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png used on input line 177.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png, id=266, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png, id=339, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png used on input line 298.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png used on input line 183.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png, id=267, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png, id=340, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png used on input line 304.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png used on input line 189.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png, id=268, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png, id=341, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png used on input line 310.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png used on input line 195.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png, id=272, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png, id=345, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png used on input line 327.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png used on input line 212.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png, id=273, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png, id=346, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png used on input line 331.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png used on input line 216.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png, id=274, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png, id=347, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png used on input line 334.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png used on input line 219.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png, id=275, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png, id=348, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png used on input line 338.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png used on input line 223.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png, id=276, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png, id=349, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png used on input line 341.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png used on input line 226.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png, id=277, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png, id=350, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png Graphic file (type png)
|
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png Graphic file (type png)
|
||||||
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png>
|
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png used on input line 346.
|
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png used on input line 231.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 319--362
|
Underfull \hbox (badness 10000) in paragraph at lines 204--247
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
[20 <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>]
|
[24]
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 364--370
|
|
||||||
|
LaTeX Warning: Reference `subsubsec:quantile_regression_training' on page 25 undefined on input line 249.
|
||||||
|
|
||||||
|
|
||||||
|
Underfull \hbox (badness 10000) in paragraph at lines 249--253
|
||||||
|
|
||||||
|
[]
|
||||||
|
|
||||||
|
[25 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>]
|
||||||
|
Underfull \hbox (badness 10000) in paragraph at lines 258--279
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 364--370
|
Underfull \hbox (badness 10000) in paragraph at lines 258--279
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
[21 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png>]
|
[26] [27{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_7grukw.enc} <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>]
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 375--396
|
Overfull \vbox (101.81699pt too high) has occurred while \output is active []
|
||||||
|
|
||||||
[]
|
[28 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_6336_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_6336_samples.png>]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 375--396
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[22] [23{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_7grukw.enc}]
|
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 439.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 322.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=314, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=395, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 443.
|
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 326.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 446.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 329.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=315, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=396, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 450.
|
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 333.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 453.
|
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 336.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=316, 1180.8918pt x 595.5048pt>
|
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=397, 1180.8918pt x 595.5048pt>
|
||||||
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
|
||||||
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
|
||||||
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 458.
|
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 341.
|
||||||
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
|
||||||
<images/quantile_regression/rnn/RNN_diagram.png, id=318, 753.81625pt x 324.21124pt>
|
[29 <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>] [30] [31]
|
||||||
File: images/quantile_regression/rnn/RNN_diagram.png Graphic file (type png)
|
Underfull \hbox (badness 10000) in paragraph at lines 427--435
|
||||||
<use images/quantile_regression/rnn/RNN_diagram.png>
|
|
||||||
Package pdftex.def Info: images/quantile_regression/rnn/RNN_diagram.png used on input line 478.
|
|
||||||
(pdftex.def) Requested size: 364.19667pt x 156.63872pt.
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 472--481
|
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 472--481
|
Underfull \hbox (badness 10000) in paragraph at lines 427--435
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 472--481
|
Underfull \hbox (badness 10000) in paragraph at lines 427--435
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[24 <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 491--511
|
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 491--511
|
Underfull \hbox (badness 10000) in paragraph at lines 427--435
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
[25 <./images/quantile_regression/rnn/RNN_diagram.png>] [26] [27]
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 565--573
|
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 565--573
|
Underfull \hbox (badness 10000) in paragraph at lines 427--435
|
||||||
|
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
) [32] [33] (./verslag.aux (./sections/introduction.aux) (./sections/background.aux) (./sections/policies.aux) (./sections/literature_study.aux))
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 565--573
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 565--573
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
|
|
||||||
Underfull \hbox (badness 10000) in paragraph at lines 565--573
|
|
||||||
|
|
||||||
[]
|
|
||||||
|
|
||||||
) [28] (./verslag.aux (./sections/introduction.aux) (./sections/background.aux) (./sections/literature_study.aux))
|
|
||||||
|
|
||||||
LaTeX Warning: There were undefined references.
|
LaTeX Warning: There were undefined references.
|
||||||
|
|
||||||
Package rerunfilecheck Info: File `verslag.out' has not changed.
|
Package rerunfilecheck Info: File `verslag.out' has not changed.
|
||||||
(rerunfilecheck) Checksum: 75EF59753ADB7EF255D73ECE4E8DD382;3011.
|
(rerunfilecheck) Checksum: C706F3FF5465D88DFF2FD800E01FDA3B;4583.
|
||||||
|
|
||||||
Package biblatex Warning: Please (re)run Biber on the file:
|
Package biblatex Warning: Please (re)run Biber on the file:
|
||||||
(biblatex) verslag
|
(biblatex) verslag
|
||||||
@@ -1562,18 +1443,18 @@ Package logreq Info: Writing requests to 'verslag.run.xml'.
|
|||||||
|
|
||||||
)
|
)
|
||||||
Here is how much of TeX's memory you used:
|
Here is how much of TeX's memory you used:
|
||||||
26750 strings out of 476025
|
26903 strings out of 476025
|
||||||
486953 string characters out of 5790017
|
491570 string characters out of 5790017
|
||||||
1883388 words of memory out of 5000000
|
1883388 words of memory out of 5000000
|
||||||
46684 multiletter control sequences out of 15000+600000
|
46807 multiletter control sequences out of 15000+600000
|
||||||
603223 words of font info for 88 fonts, out of 8000000 for 9000
|
603223 words of font info for 88 fonts, out of 8000000 for 9000
|
||||||
1141 hyphenation exceptions out of 8191
|
1141 hyphenation exceptions out of 8191
|
||||||
83i,16n,131p,1405b,5180s stack positions out of 10000i,1000n,20000p,200000b,200000s
|
83i,16n,131p,2100b,5180s stack positions out of 10000i,1000n,20000p,200000b,200000s
|
||||||
</Users/victormylle/Library/texlive/2023/texmf-var/fonts/pk/ljfour/public/bbm/bbm12.600pk></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertinust1math/LibertinusT1Math.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTI.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/stix/stix-mathcal.pfb>
|
</Users/victormylle/Library/texlive/2023/texmf-var/fonts/pk/ljfour/public/bbm/bbm12.600pk></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertinust1math/LibertinusT1Math.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTI.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/stix/stix-mathcal.pfb>
|
||||||
Output written on verslag.pdf (29 pages, 4068943 bytes).
|
Output written on verslag.pdf (34 pages, 5396984 bytes).
|
||||||
PDF statistics:
|
PDF statistics:
|
||||||
425 PDF objects out of 1000 (max. 8388607)
|
517 PDF objects out of 1000 (max. 8388607)
|
||||||
322 compressed objects within 4 object streams
|
404 compressed objects within 5 object streams
|
||||||
76 named destinations out of 1000 (max. 500000)
|
96 named destinations out of 1000 (max. 500000)
|
||||||
297 words of extra memory for PDF output out of 10000 (max. 10000000)
|
371 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,30 @@
|
|||||||
\BOOKMARK [1][-]{section.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 1
|
\BOOKMARK [1][-]{section.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 1
|
||||||
\BOOKMARK [1][-]{section.2}{\376\377\000B\000a\000c\000k\000g\000r\000o\000u\000n\000d}{}% 2
|
\BOOKMARK [1][-]{section.2}{\376\377\000E\000l\000e\000c\000t\000r\000i\000c\000i\000t\000y\000\040\000m\000a\000r\000k\000e\000t}{}% 2
|
||||||
\BOOKMARK [2][-]{subsection.2.1}{\376\377\000E\000l\000e\000c\000t\000r\000i\000c\000i\000t\000y\000\040\000m\000a\000r\000k\000e\000t}{section.2}% 3
|
\BOOKMARK [1][-]{section.3}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000v\000e\000\040\000m\000o\000d\000e\000l\000i\000n\000g}{}% 3
|
||||||
\BOOKMARK [2][-]{subsection.2.2}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000v\000e\000\040\000m\000o\000d\000e\000l\000i\000n\000g}{section.2}% 4
|
\BOOKMARK [2][-]{subsection.3.1}{\376\377\000Q\000u\000a\000n\000t\000i\000l\000e\000\040\000R\000e\000g\000r\000e\000s\000s\000i\000o\000n}{section.3}% 4
|
||||||
\BOOKMARK [2][-]{subsection.2.3}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n\000\040\000m\000o\000d\000e\000l\000s}{section.2}% 5
|
\BOOKMARK [2][-]{subsection.3.2}{\376\377\000A\000u\000t\000o\000r\000e\000g\000r\000e\000s\000s\000i\000v\000e\000\040\000v\000s\000\040\000N\000o\000n\000-\000A\000u\000t\000o\000r\000e\000g\000r\000e\000s\000s\000i\000v\000e\000\040\000m\000o\000d\000e\000l\000s}{section.3}% 5
|
||||||
\BOOKMARK [3][-]{subsubsection.2.3.1}{\376\377\000O\000v\000e\000r\000v\000i\000e\000w}{subsection.2.3}% 6
|
\BOOKMARK [2][-]{subsection.3.3}{\376\377\000M\000o\000d\000e\000l\000\040\000T\000y\000p\000e\000s}{section.3}% 6
|
||||||
\BOOKMARK [3][-]{subsubsection.2.3.2}{\376\377\000A\000p\000p\000l\000i\000c\000a\000t\000i\000o\000n\000s}{subsection.2.3}% 7
|
\BOOKMARK [3][-]{subsubsection.3.3.1}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.3.3}% 7
|
||||||
\BOOKMARK [3][-]{subsubsection.2.3.3}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000o\000n\000\040\000p\000r\000o\000c\000e\000s\000s}{subsection.2.3}% 8
|
\BOOKMARK [3][-]{subsubsection.3.3.2}{\376\377\000N\000o\000n\000-\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.3.3}% 8
|
||||||
\BOOKMARK [1][-]{section.3}{\376\377\000L\000i\000t\000e\000r\000a\000t\000u\000r\000e\000\040\000S\000t\000u\000d\000y}{}% 9
|
\BOOKMARK [3][-]{subsubsection.3.3.3}{\376\377\000R\000e\000c\000u\000r\000r\000e\000n\000t\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000\040\000\050\000R\000N\000N\000\051}{subsection.3.3}% 9
|
||||||
\BOOKMARK [1][-]{section.4}{\376\377\000T\000O\000D\000O\000:\000\040\000B\000e\000t\000t\000e\000r\000\040\000t\000i\000t\000l\000e\000\040\000f\000o\000r\000\040\000t\000h\000i\000s\000\040\000s\000e\000c\000t\000i\000o\000n}{}% 10
|
\BOOKMARK [2][-]{subsection.3.4}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n\000\040\000m\000o\000d\000e\000l\000s}{section.3}% 10
|
||||||
\BOOKMARK [1][-]{section.5}{\376\377\000N\000R\000V\000\040\000P\000r\000e\000d\000i\000c\000t\000i\000o\000n}{}% 11
|
\BOOKMARK [3][-]{subsubsection.3.4.1}{\376\377\000O\000v\000e\000r\000v\000i\000e\000w}{subsection.3.4}% 11
|
||||||
\BOOKMARK [2][-]{subsection.5.1}{\376\377\000D\000a\000t\000a}{section.5}% 12
|
\BOOKMARK [3][-]{subsubsection.3.4.2}{\376\377\000A\000p\000p\000l\000i\000c\000a\000t\000i\000o\000n\000s}{subsection.3.4}% 12
|
||||||
\BOOKMARK [2][-]{subsection.5.2}{\376\377\000Q\000u\000a\000n\000t\000i\000l\000e\000\040\000R\000e\000g\000r\000e\000s\000s\000i\000o\000n}{section.5}% 13
|
\BOOKMARK [3][-]{subsubsection.3.4.3}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000o\000n\000\040\000p\000r\000o\000c\000e\000s\000s}{subsection.3.4}% 13
|
||||||
\BOOKMARK [3][-]{subsubsection.5.2.1}{\376\377\000T\000r\000a\000i\000n\000i\000n\000g}{subsection.5.2}% 14
|
\BOOKMARK [2][-]{subsection.3.5}{\376\377\000E\000v\000a\000l\000u\000a\000t\000i\000o\000n}{section.3}% 14
|
||||||
\BOOKMARK [3][-]{subsubsection.5.2.2}{\376\377\000E\000v\000a\000l\000u\000a\000t\000i\000o\000n}{subsection.5.2}% 15
|
\BOOKMARK [1][-]{section.4}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s}{}% 15
|
||||||
\BOOKMARK [3][-]{subsubsection.5.2.3}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.5.2}% 16
|
\BOOKMARK [2][-]{subsection.4.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{section.4}% 16
|
||||||
\BOOKMARK [3][-]{subsubsection.5.2.4}{\376\377\000N\000o\000n\000-\000l\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.5.2}% 17
|
\BOOKMARK [2][-]{subsection.4.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000b\000a\000s\000e\000d\000\040\000o\000n\000\040\000N\000R\000V\000\040\000g\000e\000n\000e\000r\000a\000t\000i\000o\000n\000s}{section.4}% 17
|
||||||
\BOOKMARK [3][-]{subsubsection.5.2.5}{\376\377\000G\000R\000U\000\040\000M\000o\000d\000e\000l}{subsection.5.2}% 18
|
\BOOKMARK [1][-]{section.5}{\376\377\000L\000i\000t\000e\000r\000a\000t\000u\000r\000e\000\040\000S\000t\000u\000d\000y}{}% 18
|
||||||
\BOOKMARK [2][-]{subsection.5.3}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n}{section.5}% 19
|
\BOOKMARK [2][-]{subsection.5.1}{\376\377\000E\000l\000e\000c\000t\000r\000i\000c\000i\000t\000y\000\040\000P\000r\000i\000c\000e\000\040\000F\000o\000r\000e\000c\000a\000s\000t\000i\000n\000g}{section.5}% 19
|
||||||
\BOOKMARK [1][-]{section.6}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000b\000a\000t\000t\000e\000r\000y\000\040\000o\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{}% 20
|
\BOOKMARK [2][-]{subsection.5.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000B\000a\000t\000t\000e\000r\000y\000\040\000O\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{section.5}% 20
|
||||||
\BOOKMARK [2][-]{subsection.6.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{section.6}% 21
|
\BOOKMARK [1][-]{section.6}{\376\377\000T\000O\000D\000O\000:\000\040\000B\000e\000t\000t\000e\000r\000\040\000t\000i\000t\000l\000e\000\040\000f\000o\000r\000\040\000t\000h\000i\000s\000\040\000s\000e\000c\000t\000i\000o\000n}{}% 21
|
||||||
\BOOKMARK [2][-]{subsection.6.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000u\000s\000i\000n\000g\000\040\000N\000R\000V\000\040\000p\000r\000e\000d\000i\000c\000t\000i\000o\000n\000s}{section.6}% 22
|
\BOOKMARK [1][-]{section.7}{\376\377\000R\000e\000s\000u\000l\000t\000s\000\040\000\046\000\040\000D\000i\000s\000c\000u\000s\000s\000i\000o\000n}{}% 22
|
||||||
|
\BOOKMARK [2][-]{subsection.7.1}{\376\377\000D\000a\000t\000a}{section.7}% 23
|
||||||
|
\BOOKMARK [3][-]{subsubsection.7.1.1}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.7.1}% 24
|
||||||
|
\BOOKMARK [3][-]{subsubsection.7.1.2}{\376\377\000N\000o\000n\000-\000l\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.7.1}% 25
|
||||||
|
\BOOKMARK [3][-]{subsubsection.7.1.3}{\376\377\000G\000R\000U\000\040\000M\000o\000d\000e\000l}{subsection.7.1}% 26
|
||||||
|
\BOOKMARK [2][-]{subsection.7.2}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n}{section.7}% 27
|
||||||
|
\BOOKMARK [1][-]{section.8}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000b\000a\000t\000t\000e\000r\000y\000\040\000o\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{}% 28
|
||||||
|
\BOOKMARK [2][-]{subsection.8.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{section.8}% 29
|
||||||
|
\BOOKMARK [2][-]{subsection.8.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000u\000s\000i\000n\000g\000\040\000N\000R\000V\000\040\000p\000r\000e\000d\000i\000c\000t\000i\000o\000n\000s}{section.8}% 30
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -175,6 +175,8 @@
|
|||||||
|
|
||||||
\include{sections/background}
|
\include{sections/background}
|
||||||
|
|
||||||
|
\include{sections/policies}
|
||||||
|
|
||||||
\include{sections/literature_study}
|
\include{sections/literature_study}
|
||||||
|
|
||||||
% In introduction
|
% In introduction
|
||||||
@@ -189,4 +191,8 @@ This thesis can be divided into two main parts. The first part focuses on modeli
|
|||||||
|
|
||||||
\subsection{Policies using NRV predictions}
|
\subsection{Policies using NRV predictions}
|
||||||
|
|
||||||
|
\newpage
|
||||||
|
% bibliography
|
||||||
|
\printbibliography
|
||||||
|
|
||||||
\end{document}
|
\end{document}
|
||||||
|
|||||||
@@ -1,23 +1,31 @@
|
|||||||
\babel@toc {english}{}\relax
|
\babel@toc {english}{}\relax
|
||||||
\contentsline {section}{\numberline {1}Introduction}{2}{section.1}%
|
\contentsline {section}{\numberline {1}Introduction}{2}{section.1}%
|
||||||
\contentsline {section}{\numberline {2}Background}{3}{section.2}%
|
\contentsline {section}{\numberline {2}Electricity market}{3}{section.2}%
|
||||||
\contentsline {subsection}{\numberline {2.1}Electricity market}{3}{subsection.2.1}%
|
\contentsline {section}{\numberline {3}Generative modeling}{5}{section.3}%
|
||||||
\contentsline {subsection}{\numberline {2.2}Generative modeling}{5}{subsection.2.2}%
|
\contentsline {subsection}{\numberline {3.1}Quantile Regression}{6}{subsection.3.1}%
|
||||||
\contentsline {subsection}{\numberline {2.3}Diffusion models}{6}{subsection.2.3}%
|
\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{8}{subsection.3.2}%
|
||||||
\contentsline {subsubsection}{\numberline {2.3.1}Overview}{6}{subsubsection.2.3.1}%
|
\contentsline {subsection}{\numberline {3.3}Model Types}{9}{subsection.3.3}%
|
||||||
\contentsline {subsubsection}{\numberline {2.3.2}Applications}{7}{subsubsection.2.3.2}%
|
\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{9}{subsubsection.3.3.1}%
|
||||||
\contentsline {subsubsection}{\numberline {2.3.3}Generation process}{7}{subsubsection.2.3.3}%
|
\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{10}{subsubsection.3.3.2}%
|
||||||
\contentsline {section}{\numberline {3}Literature Study}{9}{section.3}%
|
\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{10}{subsubsection.3.3.3}%
|
||||||
\contentsline {section}{\numberline {4}TODO: Better title for this section}{10}{section.4}%
|
\contentsline {subsection}{\numberline {3.4}Diffusion models}{11}{subsection.3.4}%
|
||||||
\contentsline {section}{\numberline {5}NRV Prediction}{10}{section.5}%
|
\contentsline {subsubsection}{\numberline {3.4.1}Overview}{11}{subsubsection.3.4.1}%
|
||||||
\contentsline {subsection}{\numberline {5.1}Data}{10}{subsection.5.1}%
|
\contentsline {subsubsection}{\numberline {3.4.2}Applications}{12}{subsubsection.3.4.2}%
|
||||||
\contentsline {subsection}{\numberline {5.2}Quantile Regression}{12}{subsection.5.2}%
|
\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{12}{subsubsection.3.4.3}%
|
||||||
\contentsline {subsubsection}{\numberline {5.2.1}Training}{14}{subsubsection.5.2.1}%
|
\contentsline {subsection}{\numberline {3.5}Evaluation}{14}{subsection.3.5}%
|
||||||
\contentsline {subsubsection}{\numberline {5.2.2}Evaluation}{15}{subsubsection.5.2.2}%
|
\contentsline {section}{\numberline {4}Policies}{16}{section.4}%
|
||||||
\contentsline {subsubsection}{\numberline {5.2.3}Linear Model}{16}{subsubsection.5.2.3}%
|
\contentsline {subsection}{\numberline {4.1}Baselines}{16}{subsection.4.1}%
|
||||||
\contentsline {subsubsection}{\numberline {5.2.4}Non-linear Model}{22}{subsubsection.5.2.4}%
|
\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{17}{subsection.4.2}%
|
||||||
\contentsline {subsubsection}{\numberline {5.2.5}GRU Model}{25}{subsubsection.5.2.5}%
|
\contentsline {section}{\numberline {5}Literature Study}{18}{section.5}%
|
||||||
\contentsline {subsection}{\numberline {5.3}Diffusion}{28}{subsection.5.3}%
|
\contentsline {subsection}{\numberline {5.1}Electricity Price Forecasting}{18}{subsection.5.1}%
|
||||||
\contentsline {section}{\numberline {6}Policies for battery optimization}{28}{section.6}%
|
\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{19}{subsection.5.2}%
|
||||||
\contentsline {subsection}{\numberline {6.1}Baselines}{28}{subsection.6.1}%
|
\contentsline {section}{\numberline {6}TODO: Better title for this section}{20}{section.6}%
|
||||||
\contentsline {subsection}{\numberline {6.2}Policies using NRV predictions}{28}{subsection.6.2}%
|
\contentsline {section}{\numberline {7}Results \& Discussion}{20}{section.7}%
|
||||||
|
\contentsline {subsection}{\numberline {7.1}Data}{20}{subsection.7.1}%
|
||||||
|
\contentsline {subsubsection}{\numberline {7.1.1}Linear Model}{22}{subsubsection.7.1.1}%
|
||||||
|
\contentsline {subsubsection}{\numberline {7.1.2}Non-linear Model}{25}{subsubsection.7.1.2}%
|
||||||
|
\contentsline {subsubsection}{\numberline {7.1.3}GRU Model}{30}{subsubsection.7.1.3}%
|
||||||
|
\contentsline {subsection}{\numberline {7.2}Diffusion}{32}{subsection.7.2}%
|
||||||
|
\contentsline {section}{\numberline {8}Policies for battery optimization}{32}{section.8}%
|
||||||
|
\contentsline {subsection}{\numberline {8.1}Baselines}{32}{subsection.8.1}%
|
||||||
|
\contentsline {subsection}{\numberline {8.2}Policies using NRV predictions}{32}{subsection.8.2}%
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -69,7 +69,7 @@ class DiffusionTrainer:
|
|||||||
self.model = model
|
self.model = model
|
||||||
self.device = device
|
self.device = device
|
||||||
|
|
||||||
self.noise_steps = 30
|
self.noise_steps = 1000
|
||||||
self.beta_start = 0.0001
|
self.beta_start = 0.0001
|
||||||
self.beta_end = 0.02
|
self.beta_end = 0.02
|
||||||
self.ts_length = 96
|
self.ts_length = 96
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
|
|||||||
|
|
||||||
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
||||||
task = clearml_helper.get_task(
|
task = clearml_helper.get_task(
|
||||||
task_name="Diffusion Training: hidden_sizes=[256, 256], lr=0.0001, time_dim=8 + Load + PV + Wind + NP"
|
task_name="Diffusion Training: hidden_sizes=[1024, 1024, 1024], lr=0.0001, time_dim=8 + Load + PV + Wind + NP"
|
||||||
)
|
)
|
||||||
task.execute_remotely(queue_name="default", exit_process=True)
|
task.execute_remotely(queue_name="default", exit_process=True)
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ print("Input dim: ", inputDim)
|
|||||||
model_parameters = {
|
model_parameters = {
|
||||||
"epochs": 15000,
|
"epochs": 15000,
|
||||||
"learning_rate": 0.0001,
|
"learning_rate": 0.0001,
|
||||||
"hidden_sizes": [256, 256, 256],
|
"hidden_sizes": [1024, 1024, 1024],
|
||||||
"time_dim": 8,
|
"time_dim": 8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user