From ae0872ba3b461ed9ef4430e0f1bda556ff412c59 Mon Sep 17 00:00:00 2001 From: ykume Date: Sun, 4 Feb 2024 14:56:31 +0900 Subject: [PATCH] search block-wise application weights --- README.md | 36 ++ networks/lora.py | 41 +- train_network_appl_weights.py | 1039 +++++++++++++++++++++++++++++++++ 3 files changed, 1115 insertions(+), 1 deletion(-) create mode 100644 train_network_appl_weights.py diff --git a/README.md b/README.md index e2c60670..cccfc1e3 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,39 @@ +## LoRAの層別適用率の探索について + +層別適用率を探索する `train_network_appl_weights.py` を追加してあります。現在は SDXL のみ対応しています。 + +LoRA 等の学習済みネットワークに対して、層別適用率を変化させながら通常の学習プロセスを実行することで、適用率を探索します。つまり、どのような層別適用率を適用すると、学習データに近い画像が生成されるかを探索することができます。 + +層別適用率の合計をペナルティとすることが可能です。つまり、画像を再現しつつ、影響の少ない層の適用率が低くなるような適用率が探索できるはずです。 + +複数のネットワークを対象に探索できます。また探索には最低 1 枚の学習データが必要になります。 + +(何枚程度から正しく動くかは確認していません。50枚程度の画像でテスト済みです。また学習データは LoRA 学習時のデータでなくてもよいはずですが、未確認です。) + +コマンドラインオプションは `sdxl_train_network.py` とほぼ同じですが、以下のオプションが追加、拡張されています。 + +- `--application_loss_weight` : 層別適用率を loss に加える際の重みです。デフォルトは 0.0001 です。大きくすると、なるべく適用率を低くするように学習します。0 を指定するとペナルティが適用されないため、再現度が最も高くなる適用率を自由に探索します。 +- `--network_module` : 探索対象の複数のモジュールを指定することができます。たとえば `--network_module networks.lora networks.lora` のように指定します。 +- `--network_weights` : 探索対象の複数のネットワークの重みを指定することができます。たとえば `--network_weights model1.safetensors model2.safetensors` のように指定します。 + +層別適用率のパラメータ数は 20個で、`BASE, IN00-08, MID, OUT00-08` となります。`BASE` は Text Encoder に適用されます。(Text Encoder を対象とした LoRA の動作は未確認です。) + +パラメータは一応ファイルに保存されますが、画面に表示される値をコピーして保存することをお勧めします。 + +### 備考 + +オプティマイザ AdamW、学習率 1e-1 で動作確認しています。学習率はかなり高めに設定してよいようです。この設定では LoRA 学習時の 1/20 ~ 1/10 ほどの epoch 数でそれなりの結果が得られます。 + +`application_loss_weight` を 0.0001 より大きくすると合計の適用率がかなり低くなる(=LoRA があまり適用されない)ようです。条件にもよると思いますので、適宜調整してください。 + +適用率に負の値を使うと、影響の少ない層の適用率を極端に低くして合計を小さくする、という動きをしてしまうので、負の値は10倍の重み付けをしてあります(-0.01 は 0.1 とほぼ同じペナルティ)。重み付けを変更するときはソースを修正してください。 + +「必要ない層への適用率を下げて影響範囲を小さくする」という使い方だけでなく、「あるキャラクターがあるポーズをしている画像を教師データに、キャラクターを維持しつつポーズを取るための LoRA の適用率を探索する」、「ある画風のあるキャラクターの画像を教師データに、画風 LoRA とキャラクター LoRA の適用率を探索する」などの使い方が考えられます。 + +もしかすると、「あるキャラクターの、あえて別の画風の画像を教師データに、キャラクターの属性を再現するのに必要な層を探す」、「理想とする画像を教師データに、使えそうな LoRA を多数適用し、その中から最も再現度が高い適用率を探す(ただし LoRA の数が多いほど学習が遅くなります)」といった使い方もできるかもしれません。 + +--- + __SDXL is now supported. The sdxl branch has been merged into the main branch. If you update the repository, please follow the upgrade instructions. Also, the version of accelerate has been updated, so please run accelerate config again.__ The documentation for SDXL training is [here](./README.md#sdxl-training). This repository contains training, generation and utility scripts for Stable Diffusion. diff --git a/networks/lora.py b/networks/lora.py index 0c75cd42..333abbb2 100644 --- a/networks/lora.py +++ b/networks/lora.py @@ -511,7 +511,9 @@ def get_block_dims_and_alphas( len(block_dims) == num_total_blocks ), f"block_dims must have {num_total_blocks} elements / block_dimsは{num_total_blocks}個指定してください" else: - print(f"block_dims is not specified. all dims are set to {network_dim} / block_dimsが指定されていません。すべてのdimは{network_dim}になります") + print( + f"block_dims is not specified. all dims are set to {network_dim} / block_dimsが指定されていません。すべてのdimは{network_dim}になります" + ) block_dims = [network_dim] * num_total_blocks if block_alphas is not None: @@ -1223,3 +1225,40 @@ class LoRANetwork(torch.nn.Module): norms.append(scalednorm.item()) return keys_scaled, sum(norms) / len(norms), max(norms) + + # region application weight + + def get_number_of_blocks(self): + # only for SDXL + return 20 + + def has_text_encoder_block(self): + return self.text_encoder_loras is not None and len(self.text_encoder_loras) > 0 + + def set_block_wise_weights(self, weights): + if self.text_encoder_loras: + for lora in self.text_encoder_loras: + lora.multiplier = weights[0] + + for lora in self.unet_loras: + # determine block index + key = lora.lora_name[10:] # remove "lora_unet_" + if key.startswith("input_blocks"): + block_index = int(key.split("_")[2]) + 1 # 1-9 + elif key.startswith("middle_block"): + block_index = 10 # int(key.split("_")[2]) + 10 + elif key.startswith("output_blocks"): + block_index = int(key.split("_")[2]) + 11 # 11-19 + else: + print(f"unknown block: {key}") + block_index = 0 + + lora.multiplier = weights[block_index] + + # print(f"{lora.lora_name} block index: {block_index}, weight: {lora.multiplier}") + # print(f"set block-wise weights to {weights}") + + # TODO LoRA の weight をあらかじめ計算しておいて multiplier を掛けるだけにすると速くなるはず + + +# endregion diff --git a/train_network_appl_weights.py b/train_network_appl_weights.py new file mode 100644 index 00000000..4b6bb378 --- /dev/null +++ b/train_network_appl_weights.py @@ -0,0 +1,1039 @@ +import importlib +import argparse +import gc +import math +import os +import sys +import random +import time +import json +from multiprocessing import Value +import toml + +from tqdm import tqdm +import torch +from torch.nn.parallel import DistributedDataParallel as DDP + +from library.ipex_interop import init_ipex + +init_ipex() + +from accelerate.utils import set_seed +from diffusers import DDPMScheduler +from library import model_util, sdxl_model_util, sdxl_train_util + +import library.train_util as train_util +from library.train_util import ( + DreamBoothDataset, +) +import library.config_util as config_util +from library.config_util import ( + ConfigSanitizer, + BlueprintGenerator, +) +import library.huggingface_util as huggingface_util +import library.custom_train_functions as custom_train_functions +from library.custom_train_functions import ( + apply_snr_weight, + get_weighted_text_embeddings, + prepare_scheduler_for_custom_training, + scale_v_prediction_loss_like_noise_prediction, + add_v_prediction_like_loss, + apply_debiased_estimation, +) + + +class NetworkApplicationWeights(torch.nn.Module): + def __init__(self, is_sdxl, num_networks, num_weights_for_network): + super().__init__() + self.is_sdxl = is_sdxl + self.num_networks = num_networks + self.num_weights_for_network = num_weights_for_network + # self.weights = torch.nn.Parameter(torch.rand(sum(num_weights_for_network), requires_grad=True)) + # self.weights = torch.nn.Parameter(torch.zeros(sum(num_weights_for_network), requires_grad=True)) + self.weights = torch.nn.Parameter(torch.full((sum(num_weights_for_network),), 0.5, requires_grad=True)) + + def apply_weights(self, networks): + weight_index = 0 + for i, network in enumerate(networks): + network_weights = self.weights[weight_index : weight_index + self.num_weights_for_network[i]] + weight_index += self.num_weights_for_network[i] + network.set_block_wise_weights(network_weights) + + def forward(self, networks, unet_func, unet_args): + self.apply_weights(networks) + + # I'm not sure if this is the correct way. Is it okay not to call unet here? + return unet_func(*unet_args) + + def print_parameters(self): + weight_index = 0 + for i in range(self.num_networks): + network_weights = self.weights[weight_index : weight_index + self.num_weights_for_network[i]] + weight_index += self.num_weights_for_network[i] + + network_weights = network_weights.detach().cpu().numpy() + weights_str = ",".join([f"{w:.3f}" for w in network_weights]) + print(f"Network {i} weights: {weights_str}") + + +class NetworkAppTrainer: + def __init__(self): + self.vae_scale_factor = 0.18215 + self.is_sdxl = False + + # TODO 他のスクリプトと共通化する + def generate_step_logs( + self, args: argparse.Namespace, current_loss, avr_loss, lr_scheduler, keys_scaled=None, mean_norm=None, maximum_norm=None + ): + logs = {"loss/current": current_loss, "loss/average": avr_loss} + + if keys_scaled is not None: + logs["max_norm/keys_scaled"] = keys_scaled + logs["max_norm/average_key_norm"] = mean_norm + logs["max_norm/max_key_norm"] = maximum_norm + + lrs = lr_scheduler.get_last_lr() + + if args.network_train_text_encoder_only or len(lrs) <= 2: # not block lr (or single block) + if args.network_train_unet_only: + logs["lr/unet"] = float(lrs[0]) + elif args.network_train_text_encoder_only: + logs["lr/textencoder"] = float(lrs[0]) + else: + logs["lr/textencoder"] = float(lrs[0]) + logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder + + if ( + args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower() + ): # tracking d*lr value of unet. + logs["lr/d*lr"] = ( + lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"] + ) + else: + idx = 0 + if not args.network_train_unet_only: + logs["lr/textencoder"] = float(lrs[0]) + idx = 1 + + for i in range(idx, len(lrs)): + logs[f"lr/group{i}"] = float(lrs[i]) + if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): + logs[f"lr/d*lr/group{i}"] = ( + lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"] + ) + + return logs + + def assert_extra_args(self, args, train_dataset_group): + pass + + def load_target_model(self, args, weight_dtype, accelerator): + text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator) + return model_util.get_model_version_str_for_sd1_sd2(args.v2, args.v_parameterization), text_encoder, vae, unet + + def load_tokenizer(self, args): + tokenizer = train_util.load_tokenizer(args) + return tokenizer + + def is_text_encoder_outputs_cached(self, args): + return False + + def is_train_text_encoder(self, args): + return not args.network_train_unet_only and not self.is_text_encoder_outputs_cached(args) + + def cache_text_encoder_outputs_if_needed( + self, args, accelerator, unet, vae, tokenizers, text_encoders, data_loader, weight_dtype + ): + for t_enc in text_encoders: + t_enc.to(accelerator.device, dtype=weight_dtype) + + def get_text_cond(self, args, accelerator, batch, tokenizers, text_encoders, weight_dtype): + input_ids = batch["input_ids"].to(accelerator.device) + encoder_hidden_states = train_util.get_hidden_states(args, input_ids, tokenizers[0], text_encoders[0], weight_dtype) + return encoder_hidden_states + + def call_unet(self, args, accelerator, unet, noisy_latents, timesteps, text_conds, batch, weight_dtype): + noise_pred = unet(noisy_latents, timesteps, text_conds).sample + return noise_pred + + def all_reduce_network(self, accelerator, network): + for param in network.parameters(): + if param.grad is not None: + param.grad = accelerator.reduce(param.grad, reduction="mean") + + def sample_images(self, accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet): + train_util.sample_images(accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet) + + def train(self, args): + session_id = random.randint(0, 2**32) + training_started_at = time.time() + train_util.verify_training_args(args) + train_util.prepare_dataset_args(args, True) + + cache_latents = args.cache_latents + use_dreambooth_method = args.in_json is None + use_user_config = args.dataset_config is not None + + if args.seed is None: + args.seed = random.randint(0, 2**32) + set_seed(args.seed) + + # tokenizerは単体またはリスト、tokenizersは必ずリスト:既存のコードとの互換性のため + tokenizer = self.load_tokenizer(args) + tokenizers = tokenizer if isinstance(tokenizer, list) else [tokenizer] + + # データセットを準備する + if args.dataset_class is None: + blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, False, True)) + if use_user_config: + print(f"Loading dataset config from {args.dataset_config}") + user_config = config_util.load_user_config(args.dataset_config) + ignored = ["train_data_dir", "reg_data_dir", "in_json"] + if any(getattr(args, attr) is not None for attr in ignored): + print( + "ignoring the following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format( + ", ".join(ignored) + ) + ) + else: + if use_dreambooth_method: + print("Using DreamBooth method.") + user_config = { + "datasets": [ + { + "subsets": config_util.generate_dreambooth_subsets_config_by_subdirs( + args.train_data_dir, args.reg_data_dir + ) + } + ] + } + else: + print("Training with captions.") + user_config = { + "datasets": [ + { + "subsets": [ + { + "image_dir": args.train_data_dir, + "metadata_file": args.in_json, + } + ] + } + ] + } + + blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer) + train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group) + else: + # use arbitrary dataset class + train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer) + + current_epoch = Value("i", 0) + current_step = Value("i", 0) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) + + if args.debug_dataset: + train_util.debug_dataset(train_dataset_group) + return + if len(train_dataset_group) == 0: + print( + "No data found. Please verify arguments (train_data_dir must be the parent of folders with images) / 画像がありません。引数指定を確認してください(train_data_dirには画像があるフォルダではなく、画像があるフォルダの親フォルダを指定する必要があります)" + ) + return + + if cache_latents: + assert ( + train_dataset_group.is_latent_cacheable() + ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" + + self.assert_extra_args(args, train_dataset_group) + + # acceleratorを準備する + print("preparing accelerator") + accelerator = train_util.prepare_accelerator(args) + is_main_process = accelerator.is_main_process + + # mixed precisionに対応した型を用意しておき適宜castする + weight_dtype, save_dtype = train_util.prepare_dtype(args) + vae_dtype = torch.float32 if args.no_half_vae else weight_dtype + + # モデルを読み込む + model_version, text_encoder, vae, unet = self.load_target_model(args, weight_dtype, accelerator) + + # text_encoder is List[CLIPTextModel] or CLIPTextModel + text_encoders = text_encoder if isinstance(text_encoder, list) else [text_encoder] + + # モデルに xformers とか memory efficient attention を組み込む + train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa) + if torch.__version__ >= "2.0.0": # PyTorch 2.0.0 以上対応のxformersなら以下が使える + vae.set_use_memory_efficient_attention_xformers(args.xformers) + + # # 差分追加学習のためにモデルを読み込む + sys.path.append(os.path.dirname(__file__)) + # accelerator.print("import network module:", args.network_module) + # network_module = importlib.import_module(args.network_module) + + if args.base_weights is not None: + # base_weights が指定されている場合は、指定された重みを読み込みマージする + # currently 1st network_module is used for merging weights + network_module = importlib.import_module(args.network_module[0]) + for i, weight_path in enumerate(args.base_weights): + if args.base_weights_multiplier is None or len(args.base_weights_multiplier) <= i: + multiplier = 1.0 + else: + multiplier = args.base_weights_multiplier[i] + + accelerator.print(f"merging module: {weight_path} with multiplier {multiplier}") + + module, weights_sd = network_module.create_network_from_weights( + multiplier, weight_path, vae, text_encoder, unet, for_inference=True + ) + module.merge_to(text_encoder, unet, weights_sd, weight_dtype, accelerator.device if args.lowram else "cpu") + + accelerator.print(f"all weights merged: {', '.join(args.base_weights)}") + + # 学習を準備する + if cache_latents: + vae.to(accelerator.device, dtype=vae_dtype) + vae.requires_grad_(False) + vae.eval() + with torch.no_grad(): + train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process) + vae.to("cpu") + if torch.cuda.is_available(): + torch.cuda.empty_cache() + gc.collect() + + accelerator.wait_for_everyone() + + # 必要ならテキストエンコーダーの出力をキャッシュする: Text Encoderはcpuまたはgpuへ移される + # cache text encoder outputs if needed: Text Encoder is moved to cpu or gpu + self.cache_text_encoder_outputs_if_needed( + args, accelerator, unet, vae, tokenizers, text_encoders, train_dataset_group, weight_dtype + ) + + # prepare network + networks = [] + for network_module_name, network_weight in zip(args.network_module, args.network_weights): + accelerator.print("import network module:", network_module_name) + network_module = importlib.import_module(network_module_name) + + # currently network_kwargs is not supported + network, _ = network_module.create_network_from_weights(1, network_weight, vae, text_encoder, unet) # , **net_kwargs) + if hasattr(network, "prepare_network"): + network.prepare_network(args) + + # do not support Text Encoder only LoRA + network.apply_to(text_encoder, unet, network.has_text_encoder_block(), True) # train_unet) + + info = network.load_weights(network_weight) + accelerator.print(f"load network weights from {network_weight}: {info}") + + assert hasattr(network, "set_block_wise_weights"), "network should have set_block_wise_weights method" + + networks.append(network) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + for t_enc in text_encoders: + t_enc.gradient_checkpointing_enable() + del t_enc + + # 学習に必要なクラスを準備する + accelerator.print("prepare optimizer, data loader etc.") + + network_application = NetworkApplicationWeights( + self.is_sdxl, len(networks), [network.get_number_of_blocks() for network in networks] + ) + trainable_params = network_application.parameters() + + train_text_encoder = any([network.has_text_encoder_block() for network in networks]) + print(f"train_text_encoder: {train_text_encoder}") + + optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(args, trainable_params) + + # dataloaderを準備する + # DataLoaderのプロセス数:0はメインプロセスになる + n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで + + train_dataloader = torch.utils.data.DataLoader( + train_dataset_group, + batch_size=1, + shuffle=True, + collate_fn=collator, + num_workers=n_workers, + persistent_workers=args.persistent_data_loader_workers, + ) + + # 学習ステップ数を計算する + if args.max_train_epochs is not None: + args.max_train_steps = args.max_train_epochs * math.ceil( + len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps + ) + accelerator.print( + f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}" + ) + + # データセット側にも学習ステップを送信 + train_dataset_group.set_max_train_steps(args.max_train_steps) + + # lr schedulerを用意する + lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes) + + # # 実験的機能:勾配も含めたfp16/bf16学習を行う モデル全体をfp16/bf16にする + # if args.full_fp16: + # assert ( + # args.mixed_precision == "fp16" + # ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" + # accelerator.print("enable full fp16 training.") + # network.to(weight_dtype) + # elif args.full_bf16: + # assert ( + # args.mixed_precision == "bf16" + # ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。" + # accelerator.print("enable full bf16 training.") + # network.to(weight_dtype) + + unet_weight_dtype = te_weight_dtype = weight_dtype + # Experimental Feature: Put base model into fp8 to save vram + if args.fp8_base: + assert torch.__version__ >= "2.1.0", "fp8_base requires torch>=2.1.0 / fp8を使う場合はtorch>=2.1.0が必要です。" + assert ( + args.mixed_precision != "no" + ), "fp8_base requires mixed precision='fp16' or 'bf16' / fp8を使う場合はmixed_precision='fp16'または'bf16'が必要です。" + accelerator.print("enable fp8 training.") + unet_weight_dtype = torch.float8_e4m3fn + te_weight_dtype = torch.float8_e4m3fn + + unet.requires_grad_(False) + unet.to(dtype=unet_weight_dtype) + for t_enc in text_encoders: + t_enc.requires_grad_(False) + + # in case of cpu, dtype is already set to fp32 because cpu does not support fp8/fp16/bf16 + if t_enc.device.type != "cpu": + t_enc.to(dtype=te_weight_dtype) + # nn.Embedding not support FP8 + t_enc.text_model.embeddings.to(dtype=(weight_dtype if te_weight_dtype != weight_dtype else te_weight_dtype)) + + for network in networks: + network.requires_grad_(False) + network.to(dtype=weight_dtype).to(accelerator.device) + + # acceleratorがなんかよろしくやってくれるらしい / accelerator will do something good + # if train_unet: + unet = accelerator.prepare(unet) + # else: + # unet.to(accelerator.device, dtype=unet_weight_dtype) # move to device because unet is not prepared by accelerator + if train_text_encoder: + if len(text_encoders) > 1: + text_encoder = text_encoders = [accelerator.prepare(t_enc) for t_enc in text_encoders] + else: + text_encoder = accelerator.prepare(text_encoder) + text_encoders = [text_encoder] + # else: + # pass # if text_encoder is not trained, no need to prepare. and device and dtype are already set + + network_application, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + network_application, optimizer, train_dataloader, lr_scheduler + ) + + if args.gradient_checkpointing: + # according to TI example in Diffusers, train is required + unet.train() + for t_enc in text_encoders: + t_enc.train() + + # set top parameter requires_grad = True for gradient checkpointing works + if train_text_encoder: + t_enc.text_model.embeddings.requires_grad_(True) + + else: + unet.eval() + for t_enc in text_encoders: + t_enc.eval() + + for network in networks: + network.eval() + + del t_enc + + # accelerator.unwrap_model(network_application).prepare_grad_etc(text_encoder, unet) + + if not cache_latents: # キャッシュしない場合はVAEを使うのでVAEを準備する + vae.requires_grad_(False) + vae.eval() + vae.to(accelerator.device, dtype=vae_dtype) + + # # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする + # if args.full_fp16: + # train_util.patch_accelerator_for_fp16_training(accelerator) + + # resumeする + train_util.resume_from_local_or_hf_if_specified(accelerator, args) + + # epoch数を計算する + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): + args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 + + # 学習する + # TODO: find a way to handle total batch size when there are multiple datasets + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + accelerator.print("running training / 学習開始") + accelerator.print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}") + accelerator.print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}") + accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") + accelerator.print(f" num epochs / epoch数: {num_train_epochs}") + accelerator.print( + f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}" + ) + # accelerator.print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") + accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") + accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") + + progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") + global_step = 0 + + noise_scheduler = DDPMScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False + ) + prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device) + if args.zero_terminal_snr: + custom_train_functions.fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler) + + if accelerator.is_main_process: + init_kwargs = {} + if args.wandb_run_name: + init_kwargs["wandb"] = {"name": args.wandb_run_name} + if args.log_tracker_config is not None: + init_kwargs = toml.load(args.log_tracker_config) + accelerator.init_trackers( + "network_train" if args.log_tracker_name is None else args.log_tracker_name, init_kwargs=init_kwargs + ) + + loss_recorder = train_util.LossRecorder() + del train_dataset_group + + # # callback for step start + # if hasattr(accelerator.unwrap_model(network_application), "on_step_start"): + # on_step_start = accelerator.unwrap_model(network_application).on_step_start + # else: + # on_step_start = lambda *args, **kwargs: None + + # function for saving/removing + def save_model(ckpt_name, unwrapped_nw, steps, epoch_no, force_sync_upload=False): + os.makedirs(args.output_dir, exist_ok=True) + ckpt_file = os.path.join(args.output_dir, ckpt_name) + accelerator.print(f"\nsaving checkpoint: {ckpt_file}") + + state_dict = unwrapped_nw.state_dict() + if os.path.splitext(ckpt_file)[1] == ".safetensors": + from safetensors.torch import save_file + + save_file(state_dict, ckpt_file) + else: + torch.save(state_dict, ckpt_file) + + # print parameters + unwrapped_nw.print_parameters() + + def remove_model(old_ckpt_name): + old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name) + if os.path.exists(old_ckpt_file): + accelerator.print(f"removing old checkpoint: {old_ckpt_file}") + os.remove(old_ckpt_file) + + # For --sample_at_first + accelerator.unwrap_model(network_application).apply_weights(networks) + accelerator.unwrap_model(network_application).print_parameters() + + self.sample_images(accelerator, args, 0, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) + + # training loop + for epoch in range(num_train_epochs): + accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}") + current_epoch.value = epoch + 1 + + # accelerator.unwrap_model(network_application).on_epoch_start(text_encoder, unet) + + for step, batch in enumerate(train_dataloader): + current_step.value = global_step + with accelerator.accumulate(network_application): + # on_step_start(text_encoder, unet) + + with torch.no_grad(): + if "latents" in batch and batch["latents"] is not None: + latents = batch["latents"].to(accelerator.device) + else: + # latentに変換 + latents = vae.encode(batch["images"].to(dtype=vae_dtype)).latent_dist.sample() + + # NaNが含まれていれば警告を表示し0に置き換える + if torch.any(torch.isnan(latents)): + accelerator.print("NaN found in latents, replacing with zeros") + latents = torch.nan_to_num(latents, 0, out=latents) + latents = latents * self.vae_scale_factor + + with torch.set_grad_enabled(train_text_encoder), accelerator.autocast(): + # Get the text embedding for conditioning + if args.weighted_captions: + text_encoder_conds = get_weighted_text_embeddings( + tokenizer, + text_encoder, + batch["captions"], + accelerator.device, + args.max_token_length // 75 if args.max_token_length else 1, + clip_skip=args.clip_skip, + ) + else: + text_encoder_conds = self.get_text_cond( + args, accelerator, batch, tokenizers, text_encoders, weight_dtype + ) + + # Sample noise, sample a random timestep for each image, and add noise to the latents, + # with noise offset and/or multires noise if specified + noise, noisy_latents, timesteps = train_util.get_noise_noisy_latents_and_timesteps( + args, noise_scheduler, latents + ) + + # ensure the hidden state will require grad + if args.gradient_checkpointing: + for x in noisy_latents: + x.requires_grad_(True) + for t in text_encoder_conds: + t.requires_grad_(True) + + # Predict the noise residual + with accelerator.autocast(): + unet_func = self.call_unet + unet_args = ( + args, + accelerator, + unet, + noisy_latents.requires_grad_(True), + timesteps, + text_encoder_conds, + batch, + weight_dtype, + ) + noise_pred = network_application(networks, unet_func, unet_args) + + if args.v_parameterization: + # v-parameterization training + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + target = noise + + loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none") + loss = loss.mean([1, 2, 3]) + + loss_weights = batch["loss_weights"] # 各sampleごとのweight + loss = loss * loss_weights + + if args.min_snr_gamma: + loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma, args.v_parameterization) + if args.scale_v_pred_loss_like_noise_pred: + loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler) + if args.v_pred_like_loss: + loss = add_v_prediction_like_loss(loss, timesteps, noise_scheduler, args.v_pred_like_loss) + if args.debiased_estimation_loss: + loss = apply_debiased_estimation(loss, timesteps, noise_scheduler) + + loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし + + # use sum of parameter values as loss + weights_loss = 0 + for param in network_application.parameters(): + # weights_loss += param.abs().sum() + # we add more weight for negative values. because we want to keep the weights positive + weights_loss += param.abs().sum() + param[param < 0].abs().sum() * 10 + loss = loss + weights_loss * args.application_loss_weight + + accelerator.backward(loss) + if accelerator.sync_gradients: + self.all_reduce_network(accelerator, network_application) # sync DDP grad manually + # if args.max_grad_norm != 0.0: + # params_to_clip = accelerator.unwrap_model(network_application).get_trainable_params() + # accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=True) + + # if args.scale_weight_norms: + # keys_scaled, mean_norm, maximum_norm = accelerator.unwrap_model(network_application).apply_max_norm_regularization( + # args.scale_weight_norms, accelerator.device + # ) + # max_mean_logs = {"Keys Scaled": keys_scaled, "Average key norm": mean_norm} + # else: + # keys_scaled, mean_norm, maximum_norm = None, None, None + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + self.sample_images(accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) + + # 指定ステップごとにモデルを保存 + if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step) + save_model(ckpt_name, accelerator.unwrap_model(network_application), global_step, epoch) + + if args.save_state: + train_util.save_and_remove_state_stepwise(args, accelerator, global_step) + + remove_step_no = train_util.get_remove_step_no(args, global_step) + if remove_step_no is not None: + remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no) + remove_model(remove_ckpt_name) + + current_loss = loss.detach().item() + loss_recorder.add(epoch=epoch, step=step, loss=current_loss) + avr_loss: float = loss_recorder.moving_average + logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + + logs["ap_loss"] = weights_loss.detach().item() + + progress_bar.set_postfix(**logs) + + # if args.scale_weight_norms: + # progress_bar.set_postfix(**{**max_mean_logs, **logs}) + + if args.logging_dir is not None: + # logs = self.generate_step_logs(args, current_loss, avr_loss, lr_scheduler, keys_scaled, mean_norm, maximum_norm) + accelerator.log(logs, step=global_step) + + if global_step >= args.max_train_steps: + break + + if args.logging_dir is not None: + logs = {"loss/epoch": loss_recorder.moving_average} + accelerator.log(logs, step=epoch + 1) + + accelerator.wait_for_everyone() + + # 指定エポックごとにモデルを保存 + if args.save_every_n_epochs is not None: + saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs + if is_main_process and saving: + ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1) + save_model(ckpt_name, accelerator.unwrap_model(network_application), global_step, epoch + 1) + + remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1) + if remove_epoch_no is not None: + remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no) + remove_model(remove_ckpt_name) + + if args.save_state: + train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1) + + self.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) + + # end of epoch + + # metadata["ss_epoch"] = str(num_train_epochs) + # metadata["ss_training_finished_at"] = str(time.time()) + + if is_main_process: + network_application = accelerator.unwrap_model(network_application) + + accelerator.end_training() + + if is_main_process and args.save_state: + train_util.save_state_on_train_end(args, accelerator) + + if is_main_process: + ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as) + save_model(ckpt_name, network_application, global_step, num_train_epochs, force_sync_upload=True) + + print("model saved.") + + +def setup_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser() + + train_util.add_sd_models_arguments(parser) + train_util.add_dataset_arguments(parser, True, True, True) + train_util.add_training_arguments(parser, True) + train_util.add_optimizer_arguments(parser) + config_util.add_config_arguments(parser) + custom_train_functions.add_custom_train_arguments(parser) + + # parser.add_argument( + # "--no_metadata", action="store_true", help="do not save metadata in output model / メタデータを出力先モデルに保存しない" + # ) + parser.add_argument( + "--save_model_as", + type=str, + default="safetensors", + choices=[None, "ckpt", "pt", "safetensors"], + help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)", + ) + + # parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率") + # parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率") + + parser.add_argument( + "--network_weights", + type=str, + nargs="+", + default=None, + help="pretrained weights for network / 学習するネットワークの初期重み", + ) + parser.add_argument( + "--network_module", type=str, nargs="+", default=None, help="network module to train / 学習対象のネットワークのモジュール" + ) + # parser.add_argument( + # "--network_dim", + # type=int, + # default=None, + # help="network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)", + # ) + # parser.add_argument( + # "--network_alpha", + # type=float, + # default=1, + # help="alpha for LoRA weight scaling, default 1 (same as network_dim for same behavior as old version) / LoRaの重み調整のalpha値、デフォルト1(旧バージョンと同じ動作をするにはnetwork_dimと同じ値を指定)", + # ) + # parser.add_argument( + # "--network_dropout", + # type=float, + # default=None, + # help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)", + # ) + # parser.add_argument( + # "--network_args", + # type=str, + # default=None, + # nargs="*", + # help="additional arguments for network (key=value) / ネットワークへの追加の引数", + # ) + # parser.add_argument( + # "--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する" + # ) + # parser.add_argument( + # "--network_train_text_encoder_only", + # action="store_true", + # help="only training Text Encoder part / Text Encoder関連部分のみ学習する", + # ) + # parser.add_argument( + # "--training_comment", + # type=str, + # default=None, + # help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列", + # ) + parser.add_argument( + "--dim_from_weights", + action="store_true", + help="automatically determine dim (rank) from network_weights / dim (rank)をnetwork_weightsで指定した重みから自動で決定する", + ) + parser.add_argument( + "--scale_weight_norms", + type=float, + default=None, + help="Scale the weight of each key pair to help prevent overtraing via exploding gradients. (1 is a good starting point) / 重みの値をスケーリングして勾配爆発を防ぐ(1が初期値としては適当)", + ) + parser.add_argument( + "--base_weights", + type=str, + default=None, + nargs="*", + help="network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みファイル", + ) + parser.add_argument( + "--base_weights_multiplier", + type=float, + default=None, + nargs="*", + help="multiplier for network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みの倍率", + ) + parser.add_argument( + "--no_half_vae", + action="store_true", + help="do not use fp16/bf16 VAE in mixed precision (use float VAE) / mixed precisionでも fp16/bf16 VAEを使わずfloat VAEを使う", + ) + + parser.add_argument( + "--application_loss_weight", type=float, default=0.0001, help="weight for application loss / application lossの重み" + ) + + sdxl_train_util.add_sdxl_training_arguments(parser) + + return parser + + +class SdxlNetworkAppTrainer(NetworkAppTrainer): + def __init__(self): + super().__init__() + self.vae_scale_factor = sdxl_model_util.VAE_SCALE_FACTOR + self.is_sdxl = True + + def assert_extra_args(self, args, train_dataset_group): + super().assert_extra_args(args, train_dataset_group) + sdxl_train_util.verify_sdxl_training_args(args) + + if args.cache_text_encoder_outputs: + assert ( + train_dataset_group.is_text_encoder_output_cacheable() + ), "when caching Text Encoder output, either caption_dropout_rate, shuffle_caption, token_warmup_step or caption_tag_dropout_rate cannot be used / Text Encoderの出力をキャッシュするときはcaption_dropout_rate, shuffle_caption, token_warmup_step, caption_tag_dropout_rateは使えません" + + assert ( + args.network_train_unet_only or not args.cache_text_encoder_outputs + ), "network for Text Encoder cannot be trained with caching Text Encoder outputs / Text Encoderの出力をキャッシュしながらText Encoderのネットワークを学習することはできません" + + train_dataset_group.verify_bucket_reso_steps(32) + + def load_target_model(self, args, weight_dtype, accelerator): + ( + load_stable_diffusion_format, + text_encoder1, + text_encoder2, + vae, + unet, + logit_scale, + ckpt_info, + ) = sdxl_train_util.load_target_model(args, accelerator, sdxl_model_util.MODEL_VERSION_SDXL_BASE_V1_0, weight_dtype) + + self.load_stable_diffusion_format = load_stable_diffusion_format + self.logit_scale = logit_scale + self.ckpt_info = ckpt_info + + return sdxl_model_util.MODEL_VERSION_SDXL_BASE_V1_0, [text_encoder1, text_encoder2], vae, unet + + def load_tokenizer(self, args): + tokenizer = sdxl_train_util.load_tokenizers(args) + return tokenizer + + def is_text_encoder_outputs_cached(self, args): + return args.cache_text_encoder_outputs + + def cache_text_encoder_outputs_if_needed( + self, args, accelerator, unet, vae, tokenizers, text_encoders, dataset: train_util.DatasetGroup, weight_dtype + ): + if args.cache_text_encoder_outputs: + if not args.lowram: + # メモリ消費を減らす + print("move vae and unet to cpu to save memory") + org_vae_device = vae.device + org_unet_device = unet.device + vae.to("cpu") + unet.to("cpu") + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + # When TE is not be trained, it will not be prepared so we need to use explicit autocast + with accelerator.autocast(): + dataset.cache_text_encoder_outputs( + tokenizers, + text_encoders, + accelerator.device, + weight_dtype, + args.cache_text_encoder_outputs_to_disk, + accelerator.is_main_process, + ) + + text_encoders[0].to("cpu", dtype=torch.float32) # Text Encoder doesn't work with fp16 on CPU + text_encoders[1].to("cpu", dtype=torch.float32) + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + if not args.lowram: + print("move vae and unet back to original device") + vae.to(org_vae_device) + unet.to(org_unet_device) + else: + # Text Encoderから毎回出力を取得するので、GPUに乗せておく + text_encoders[0].to(accelerator.device, dtype=weight_dtype) + text_encoders[1].to(accelerator.device, dtype=weight_dtype) + + def get_text_cond(self, args, accelerator, batch, tokenizers, text_encoders, weight_dtype): + if "text_encoder_outputs1_list" not in batch or batch["text_encoder_outputs1_list"] is None: + input_ids1 = batch["input_ids"] + input_ids2 = batch["input_ids2"] + with torch.enable_grad(): + # Get the text embedding for conditioning + # TODO support weighted captions + # if args.weighted_captions: + # encoder_hidden_states = get_weighted_text_embeddings( + # tokenizer, + # text_encoder, + # batch["captions"], + # accelerator.device, + # args.max_token_length // 75 if args.max_token_length else 1, + # clip_skip=args.clip_skip, + # ) + # else: + input_ids1 = input_ids1.to(accelerator.device) + input_ids2 = input_ids2.to(accelerator.device) + encoder_hidden_states1, encoder_hidden_states2, pool2 = train_util.get_hidden_states_sdxl( + args.max_token_length, + input_ids1, + input_ids2, + tokenizers[0], + tokenizers[1], + text_encoders[0], + text_encoders[1], + None if not args.full_fp16 else weight_dtype, + accelerator=accelerator, + ) + else: + encoder_hidden_states1 = batch["text_encoder_outputs1_list"].to(accelerator.device).to(weight_dtype) + encoder_hidden_states2 = batch["text_encoder_outputs2_list"].to(accelerator.device).to(weight_dtype) + pool2 = batch["text_encoder_pool2_list"].to(accelerator.device).to(weight_dtype) + + # # verify that the text encoder outputs are correct + # ehs1, ehs2, p2 = train_util.get_hidden_states_sdxl( + # args.max_token_length, + # batch["input_ids"].to(text_encoders[0].device), + # batch["input_ids2"].to(text_encoders[0].device), + # tokenizers[0], + # tokenizers[1], + # text_encoders[0], + # text_encoders[1], + # None if not args.full_fp16 else weight_dtype, + # ) + # b_size = encoder_hidden_states1.shape[0] + # assert ((encoder_hidden_states1.to("cpu") - ehs1.to(dtype=weight_dtype)).abs().max() > 1e-2).sum() <= b_size * 2 + # assert ((encoder_hidden_states2.to("cpu") - ehs2.to(dtype=weight_dtype)).abs().max() > 1e-2).sum() <= b_size * 2 + # assert ((pool2.to("cpu") - p2.to(dtype=weight_dtype)).abs().max() > 1e-2).sum() <= b_size * 2 + # print("text encoder outputs verified") + + return encoder_hidden_states1, encoder_hidden_states2, pool2 + + def call_unet(self, args, accelerator, unet, noisy_latents, timesteps, text_conds, batch, weight_dtype): + noisy_latents = noisy_latents.to(weight_dtype) # TODO check why noisy_latents is not weight_dtype + + # get size embeddings + orig_size = batch["original_sizes_hw"] + crop_size = batch["crop_top_lefts"] + target_size = batch["target_sizes_hw"] + embs = sdxl_train_util.get_size_embeddings(orig_size, crop_size, target_size, accelerator.device).to(weight_dtype) + + # concat embeddings + encoder_hidden_states1, encoder_hidden_states2, pool2 = text_conds + vector_embedding = torch.cat([pool2, embs], dim=1).to(weight_dtype) + text_embedding = torch.cat([encoder_hidden_states1, encoder_hidden_states2], dim=2).to(weight_dtype) + + noise_pred = unet(noisy_latents, timesteps, text_embedding, vector_embedding) + return noise_pred + + def sample_images(self, accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet): + sdxl_train_util.sample_images(accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet) + + +if __name__ == "__main__": + parser = setup_parser() + + args = parser.parse_args() + args = train_util.read_config_from_file(args, parser) + + trainer = SdxlNetworkAppTrainer() + trainer.train(args)