fix full_fp16 compatible and train_step

This commit is contained in:
BootsofLagrangian
2024-02-07 16:42:05 +09:00
parent 7d2a9268b9
commit 62556619bd
6 changed files with 121 additions and 40 deletions

View File

@@ -221,18 +221,10 @@ def train(args):
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
if args.deepspeed:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / args.gradient_accumulation_steps
)
accelerator.print(
f"[DeepSpeed] override steps not dividing by {accelerator.num_processes}. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
)
else:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
accelerator.print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
accelerator.print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)