Implemented Non Autorgressive Quantile Regression

This commit is contained in:
Victor Mylle
2023-11-18 17:42:06 +00:00
parent 75f1f64c38
commit 1268af47a6
9 changed files with 196493 additions and 161 deletions

View File

@@ -5,13 +5,15 @@ from trainers.trainer import Trainer
from trainers.autoregressive_trainer import AutoRegressiveTrainer
from data.preprocessing import DataProcessor
from utils.clearml import ClearMLHelper
from losses import PinballLoss
from losses import PinballLoss, NonAutoRegressivePinballLoss
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class QuantileTrainer(AutoRegressiveTrainer):
class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, data_processor: DataProcessor, quantiles: list, device: torch.device, clearml_helper: ClearMLHelper = None, debug: bool = True):
quantiles_tensor = torch.tensor(quantiles)
quantiles_tensor = quantiles_tensor.to(device)
@@ -26,29 +28,29 @@ class QuantileTrainer(AutoRegressiveTrainer):
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
with torch.no_grad():
for inputs, targets in dataloader:
inputs, targets = inputs.to(self.device), targets
outputs = self.model(inputs)
total_amount_samples = len(dataloader.dataset) - 95
for idx in tqdm(range(total_amount_samples)):
_, outputs, samples, targets = self.auto_regressive(dataloader, idx)
samples = []
for output in outputs:
samples.append(self.sample_from_dist(self.quantiles.cpu().numpy(), output.cpu().numpy()))
samples = torch.tensor(samples).to(self.device).reshape(-1, 1)
inversed_samples = torch.tensor(self.data_processor.inverse_transform(samples))
inversed_targets = torch.tensor(self.data_processor.inverse_transform(targets.reshape(-1, 1)))
inversed_targets = torch.tensor(self.data_processor.inverse_transform(targets))
outputs = outputs.to(self.device)
targets = targets.to(self.device)
samples = samples.to(self.device)
for metric in self.metrics_to_track:
if metric.__class__ != PinballLoss:
transformed_metrics[metric.__class__.__name__] += metric(samples, targets.view(-1, 1).to(self.device))
transformed_metrics[metric.__class__.__name__] += metric(samples, targets)
metrics[metric.__class__.__name__] += metric(inversed_samples, inversed_targets)
else:
transformed_metrics[metric.__class__.__name__] += metric(outputs, targets.view(-1, 1).to(self.device))
transformed_metrics[metric.__class__.__name__] += metric(outputs, targets)
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= len(dataloader)
transformed_metrics[metric.__class__.__name__] /= len(dataloader)
metrics[metric.__class__.__name__] /= total_amount_samples
transformed_metrics[metric.__class__.__name__] /= total_amount_samples
for metric_name, metric_value in metrics.items():
if PinballLoss.__name__ in metric_name:
@@ -125,12 +127,12 @@ class QuantileTrainer(AutoRegressiveTrainer):
sample = self.sample_from_dist(self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy())
predictions_sampled.append(sample)
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.stack(target_full).cpu()
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.tensor(predictions_sampled).reshape(-1, 1), torch.stack(target_full).cpu()
@staticmethod
def sample_from_dist(quantiles, output_values):
# Interpolate the inverse CDF
inverse_cdf = interp1d(quantiles, output_values, kind='quadratic', bounds_error=False, fill_value="extrapolate")
inverse_cdf = interp1d(quantiles, output_values, kind='linear', bounds_error=False, fill_value="extrapolate")
# generate one random uniform number
uniform_random_numbers = np.random.uniform(0, 1, 1000)
@@ -141,4 +143,141 @@ class QuantileTrainer(AutoRegressiveTrainer):
# Return the mean of the samples
return np.mean(samples)
def plot_quantile_percentages(self, task, data_loader, train: bool = True, iteration: int = None):
total = 0
quantile_counter = {q: 0 for q in self.quantiles.cpu().numpy()}
with torch.no_grad():
for inputs, targets in data_loader:
inputs = inputs.to("cuda")
output = self.model(inputs)
# output shape: (batch_size, num_quantiles)
# target shape: (batch_size, 1)
for i, q in enumerate(self.quantiles.cpu().numpy()):
quantile_counter[q] += np.sum(targets.squeeze(-1).cpu().numpy() < output[:, i].cpu().numpy())
total += len(targets)
# to numpy array of length len(quantiles)
percentages = np.array([quantile_counter[q] / total for q in self.quantiles.cpu().numpy()])
bar_width = 0.35
index = np.arange(len(self.quantiles.cpu().numpy()))
# Plotting the bars
fig, ax = plt.subplots(figsize=(15, 10))
bar1 = ax.bar(index, self.quantiles.cpu().numpy(), bar_width, label='Ideal', color='brown')
bar2 = ax.bar(index + bar_width, percentages, bar_width, label='NN model', color='blue')
# Adding the percentage values above the bars for bar2
for rect in bar2:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.005 * height,
f'{height:.2}', ha='center', va='bottom') # Format the number as a percentage
series_name = "Training Set" if train else "Test Set"
# Adding labels and title
ax.set_xlabel('Quantile')
ax.set_ylabel('Fraction of data under quantile forecast')
ax.set_title(f'Quantile Performance Comparison ({series_name})')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(self.quantiles.cpu().numpy())
ax.legend()
task.get_logger().report_matplotlib_figure(title='Quantile Performance Comparison', series=series_name, report_image=True, figure=plt, iteration=iteration)
plt.close()
class NonAutoRegressiveQuantileRegression(Trainer):
def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, data_processor: DataProcessor, quantiles: list, device: torch.device, clearml_helper: ClearMLHelper = None, debug: bool = True):
quantiles_tensor = torch.tensor(quantiles)
quantiles_tensor = quantiles_tensor.to(device)
self.quantiles = quantiles
criterion = NonAutoRegressivePinballLoss(quantiles=quantiles_tensor)
super().__init__(model=model, optimizer=optimizer, criterion=criterion, data_processor=data_processor, device=device, clearml_helper=clearml_helper, debug=debug)
@staticmethod
def sample_from_dist(quantiles, output_values):
reshaped_values = output_values.reshape(-1, len(quantiles))
samples = []
for row in reshaped_values:
inverse_cdf = interp1d(quantiles, row, kind='linear', bounds_error=False, fill_value="extrapolate")
uniform_random_numbers = np.random.uniform(0, 1, 1000)
new_samples = inverse_cdf(uniform_random_numbers)
samples.append(np.mean(new_samples))
return np.array(samples)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
with torch.no_grad():
for inputs, targets in dataloader:
inputs, targets = inputs.to(self.device), targets
outputs = self.model(inputs)
outputted_samples = [self.sample_from_dist(self.quantiles.cpu(), output.cpu().numpy()) for output in outputs]
# to tensor
outputted_samples = torch.tensor(outputted_samples)
inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputted_samples))
inversed_inputs = torch.tensor(self.data_processor.inverse_transform(targets))
# set on same device
inversed_outputs = inversed_outputs.to(self.device)
inversed_inputs = inversed_inputs.to(self.device)
outputted_samples = outputted_samples.to(self.device)
for metric in self.metrics_to_track:
transformed_metrics[metric.__class__.__name__] += metric(outputted_samples, targets.to(self.device))
metrics[metric.__class__.__name__] += metric(inversed_outputs, inversed_inputs)
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= len(dataloader)
transformed_metrics[metric.__class__.__name__] /= len(dataloader)
for metric_name, metric_value in metrics.items():
if train:
metric_name = f'train_{metric_name}'
else:
metric_name = f'test_{metric_name}'
task.get_logger().report_single_value(name=metric_name, value=metric_value)
for metric_name, metric_value in transformed_metrics.items():
if train:
metric_name = f'train_transformed_{metric_name}'
else:
metric_name = f'test_transformed_{metric_name}'
task.get_logger().report_single_value(name=metric_name, value=metric_value)
def get_plot(self, current_day, next_day, predictions, show_legend: bool = True):
fig = go.Figure()
# Convert to numpy for plotting
current_day_np = current_day.view(-1).cpu().numpy()
next_day_np = next_day.view(-1).cpu().numpy()
# reshape predictions to (n, len(quantiles))$
predictions_np = predictions.cpu().numpy().reshape(-1, len(self.quantiles))
# Add traces for current and next day
fig.add_trace(go.Scatter(x=np.arange(96), y=current_day_np, name="Current Day"))
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
for i, q in enumerate(self.quantiles):
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=predictions_np[:, i],
name=f"Prediction (Q={q})", line=dict(dash='dash')))
# Update the layout
fig.update_layout(title="Predictions and Quantiles", showlegend=show_legend)
return fig