the reason not working grad accum steps found. it was becasue of my accelerate settings

This commit is contained in:
BootsofLagrangian
2024-02-09 17:47:49 +09:00
parent a98fecaeb1
commit 03f0816f86
4 changed files with 14 additions and 7 deletions

View File

@@ -224,8 +224,9 @@ def train(args):
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
accelerator.print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
accelerator.print(
f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
)
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)