Lot of changes
This commit is contained in:
@@ -10,13 +10,39 @@ from plotly.subplots import make_subplots
|
||||
from trainers.trainer import Trainer
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
class AutoRegressiveTrainer(Trainer):
|
||||
def __init__(
|
||||
self,
|
||||
model: torch.nn.Module,
|
||||
optimizer: torch.optim.Optimizer,
|
||||
criterion: torch.nn.Module,
|
||||
data_processor: DataProcessor,
|
||||
device: torch.device,
|
||||
clearml_helper: ClearMLHelper = None,
|
||||
debug: bool = True,
|
||||
):
|
||||
super().__init__(
|
||||
model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
data_processor=data_processor,
|
||||
device=device,
|
||||
clearml_helper=clearml_helper,
|
||||
debug=debug,
|
||||
)
|
||||
self.model.output_size = 1
|
||||
|
||||
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
|
||||
num_samples = len(sample_indices)
|
||||
rows = num_samples # One row per sample since we only want one column
|
||||
cols = 1
|
||||
|
||||
fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
|
||||
|
||||
fig = make_subplots(
|
||||
rows=rows,
|
||||
cols=cols,
|
||||
subplot_titles=[f"Sample {i+1}" for i in range(num_samples)],
|
||||
)
|
||||
|
||||
for i, idx in enumerate(sample_indices):
|
||||
auto_regressive_output = self.auto_regressive(data_loader, idx)
|
||||
@@ -26,27 +52,30 @@ class AutoRegressiveTrainer(Trainer):
|
||||
initial, predictions, _, target = auto_regressive_output
|
||||
|
||||
sub_fig = self.get_plot(initial, target, predictions, show_legend=(i == 0))
|
||||
|
||||
|
||||
row = i + 1
|
||||
col = 1
|
||||
|
||||
|
||||
for trace in sub_fig.data:
|
||||
fig.add_trace(trace, row=row, col=col)
|
||||
|
||||
loss = self.criterion(predictions.to(self.device), target.to(self.device)).item()
|
||||
loss = self.criterion(
|
||||
predictions.to(self.device), target.to(self.device)
|
||||
).item()
|
||||
|
||||
fig['layout']['annotations'][i].update(text=f"{loss.__class__.__name__}: {loss:.6f}")
|
||||
fig["layout"]["annotations"][i].update(
|
||||
text=f"{loss.__class__.__name__}: {loss:.6f}"
|
||||
)
|
||||
|
||||
# y axis same for all plots
|
||||
fig.update_yaxes(range=[-1, 1], col=1)
|
||||
|
||||
|
||||
fig.update_layout(height=300 * rows)
|
||||
task.get_logger().report_plotly(
|
||||
title=f"{'Training' if train else 'Test'} Samples",
|
||||
series="full_day",
|
||||
iteration=epoch,
|
||||
figure=fig
|
||||
figure=fig,
|
||||
)
|
||||
|
||||
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
|
||||
@@ -61,14 +90,25 @@ class AutoRegressiveTrainer(Trainer):
|
||||
|
||||
target_full.append(target)
|
||||
with torch.no_grad():
|
||||
print(prev_features.shape)
|
||||
prediction = self.model(prev_features.unsqueeze(0))
|
||||
predictions_full.append(prediction.squeeze(-1))
|
||||
|
||||
for i in range(sequence_length - 1):
|
||||
new_features = torch.cat((prev_features[1:97].cpu(), prediction.squeeze(-1).cpu()), dim=0)
|
||||
new_features = torch.cat(
|
||||
(
|
||||
prev_features[1:96].cpu(),
|
||||
prediction.squeeze(-1).cpu(),
|
||||
),
|
||||
dim=0,
|
||||
)
|
||||
|
||||
print(new_features.shape)
|
||||
|
||||
# get the other needed features
|
||||
other_features, new_target = data_loader.dataset.random_day_autoregressive(idx + i + 1)
|
||||
other_features, new_target = data_loader.dataset.random_day_autoregressive(
|
||||
idx + i + 1
|
||||
)
|
||||
|
||||
if other_features is not None:
|
||||
prev_features = torch.cat((new_features, other_features), dim=0)
|
||||
@@ -80,14 +120,20 @@ class AutoRegressiveTrainer(Trainer):
|
||||
|
||||
# predict
|
||||
with torch.no_grad():
|
||||
prediction = self.model(new_features.unsqueeze(0).to(self.device))
|
||||
prediction = self.model(prev_features.unsqueeze(0).to(self.device))
|
||||
predictions_full.append(prediction.squeeze(-1))
|
||||
|
||||
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.stack(target_full).cpu()
|
||||
|
||||
return (
|
||||
initial_sequence.cpu(),
|
||||
torch.stack(predictions_full).cpu(),
|
||||
torch.stack(target_full).cpu(),
|
||||
)
|
||||
|
||||
def log_final_metrics(self, task, dataloader, train: bool = True):
|
||||
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
|
||||
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
|
||||
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
|
||||
transformed_metrics = {
|
||||
metric.__class__.__name__: 0.0 for metric in self.metrics_to_track
|
||||
}
|
||||
|
||||
with torch.no_grad():
|
||||
# iterate idx over dataset
|
||||
@@ -96,15 +142,23 @@ class AutoRegressiveTrainer(Trainer):
|
||||
for idx in tqdm(range(total_amount_samples)):
|
||||
_, outputs, targets = self.auto_regressive(dataloader, idx)
|
||||
|
||||
inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputs))
|
||||
inversed_inputs = torch.tensor(self.data_processor.inverse_transform(targets))
|
||||
inversed_outputs = torch.tensor(
|
||||
self.data_processor.inverse_transform(outputs)
|
||||
)
|
||||
inversed_inputs = torch.tensor(
|
||||
self.data_processor.inverse_transform(targets)
|
||||
)
|
||||
|
||||
outputs = outputs.to(self.device)
|
||||
targets = targets.to(self.device)
|
||||
|
||||
for metric in self.metrics_to_track:
|
||||
transformed_metrics[metric.__class__.__name__] += metric(outputs, targets)
|
||||
metrics[metric.__class__.__name__] += metric(inversed_outputs, inversed_inputs)
|
||||
transformed_metrics[metric.__class__.__name__] += metric(
|
||||
outputs, targets
|
||||
)
|
||||
metrics[metric.__class__.__name__] += metric(
|
||||
inversed_outputs, inversed_inputs
|
||||
)
|
||||
|
||||
for metric in self.metrics_to_track:
|
||||
metrics[metric.__class__.__name__] /= total_amount_samples
|
||||
@@ -112,16 +166,20 @@ class AutoRegressiveTrainer(Trainer):
|
||||
|
||||
for metric_name, metric_value in metrics.items():
|
||||
if train:
|
||||
metric_name = f'train_{metric_name}'
|
||||
metric_name = f"train_{metric_name}"
|
||||
else:
|
||||
metric_name = f'test_{metric_name}'
|
||||
|
||||
task.get_logger().report_single_value(name=metric_name, value=metric_value)
|
||||
metric_name = f"test_{metric_name}"
|
||||
|
||||
task.get_logger().report_single_value(
|
||||
name=metric_name, value=metric_value
|
||||
)
|
||||
|
||||
for metric_name, metric_value in transformed_metrics.items():
|
||||
if train:
|
||||
metric_name = f'train_transformed_{metric_name}'
|
||||
metric_name = f"train_transformed_{metric_name}"
|
||||
else:
|
||||
metric_name = f'test_transformed_{metric_name}'
|
||||
metric_name = f"test_transformed_{metric_name}"
|
||||
|
||||
task.get_logger().report_single_value(name=metric_name, value=metric_value)
|
||||
task.get_logger().report_single_value(
|
||||
name=metric_name, value=metric_value
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user