From b08154dc365c46cb84f736363567e49a25847e15 Mon Sep 17 00:00:00 2001 From: Fair <541077827@qq.com> Date: Sun, 7 May 2023 02:51:01 +0800 Subject: [PATCH] fix print "saving" and "epoch" in newline --- fine_tune.py | 2 +- library/train_util.py | 12 ++++++------ networks/extract_lora_from_dylora.py | 2 +- networks/merge_lora.py | 4 ++-- networks/merge_lora_old.py | 4 ++-- networks/resize_lora.py | 2 +- networks/svd_merge_lora.py | 2 +- train_db.py | 2 +- train_network.py | 4 ++-- train_textual_inversion.py | 4 ++-- train_textual_inversion_XTI.py | 4 ++-- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/fine_tune.py b/fine_tune.py index 442bd132..f9fe7b22 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -266,7 +266,7 @@ def train(args): accelerator.init_trackers("finetuning" if args.log_tracker_name is None else args.log_tracker_name) for epoch in range(num_train_epochs): - print(f"epoch {epoch+1}/{num_train_epochs}") + print(f"\nepoch {epoch+1}/{num_train_epochs}") current_epoch.value = epoch + 1 for m in training_models: diff --git a/library/train_util.py b/library/train_util.py index ad139c06..9fd2eee0 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1424,7 +1424,7 @@ def debug_dataset(train_dataset, show_input_ids=False): epoch = 1 while True: - print(f"epoch: {epoch}") + print(f"\nepoch: {epoch}") steps = (epoch - 1) * len(train_dataset) + 1 indices = list(range(len(train_dataset))) @@ -3083,7 +3083,7 @@ def save_sd_model_on_epoch_end_or_stepwise( ckpt_name = get_step_ckpt_name(args, ext, global_step) ckpt_file = os.path.join(args.output_dir, ckpt_name) - print(f"saving checkpoint: {ckpt_file}") + print(f"\nsaving checkpoint: {ckpt_file}") model_util.save_stable_diffusion_checkpoint( args.v2, ckpt_file, text_encoder, unet, src_path, epoch_no, global_step, save_dtype, vae ) @@ -3109,7 +3109,7 @@ def save_sd_model_on_epoch_end_or_stepwise( else: out_dir = os.path.join(args.output_dir, STEP_DIFFUSERS_DIR_NAME.format(model_name, global_step)) - print(f"saving model: {out_dir}") + print(f"\nsaving model: {out_dir}") model_util.save_diffusers_checkpoint( args.v2, out_dir, text_encoder, unet, src_path, vae=vae, use_safetensors=use_safetensors ) @@ -3136,7 +3136,7 @@ def save_sd_model_on_epoch_end_or_stepwise( def save_and_remove_state_on_epoch_end(args: argparse.Namespace, accelerator, epoch_no): model_name = default_if_none(args.output_name, DEFAULT_EPOCH_NAME) - print(f"saving state at epoch {epoch_no}") + print(f"\nsaving state at epoch {epoch_no}") os.makedirs(args.output_dir, exist_ok=True) state_dir = os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, epoch_no)) @@ -3157,7 +3157,7 @@ def save_and_remove_state_on_epoch_end(args: argparse.Namespace, accelerator, ep def save_and_remove_state_stepwise(args: argparse.Namespace, accelerator, step_no): model_name = default_if_none(args.output_name, DEFAULT_STEP_NAME) - print(f"saving state at step {step_no}") + print(f"\nsaving state at step {step_no}") os.makedirs(args.output_dir, exist_ok=True) state_dir = os.path.join(args.output_dir, STEP_STATE_NAME.format(model_name, step_no)) @@ -3182,7 +3182,7 @@ def save_and_remove_state_stepwise(args: argparse.Namespace, accelerator, step_n def save_state_on_train_end(args: argparse.Namespace, accelerator): model_name = default_if_none(args.output_name, DEFAULT_LAST_OUTPUT_NAME) - print("saving last state.") + print("\nsaving last state.") os.makedirs(args.output_dir, exist_ok=True) state_dir = os.path.join(args.output_dir, LAST_STATE_NAME.format(model_name)) diff --git a/networks/extract_lora_from_dylora.py b/networks/extract_lora_from_dylora.py index 0abee983..5aa9403a 100644 --- a/networks/extract_lora_from_dylora.py +++ b/networks/extract_lora_from_dylora.py @@ -94,7 +94,7 @@ def split(args): filename, ext = os.path.splitext(args.save_to) model_file_name = filename + f"-{new_rank:04d}{ext}" - print(f"saving model to: {model_file_name}") + print(f"\nsaving model to: {model_file_name}") save_to_file(model_file_name, state_dict, new_metadata) diff --git a/networks/merge_lora.py b/networks/merge_lora.py index 2fa8861b..a7a0d83d 100644 --- a/networks/merge_lora.py +++ b/networks/merge_lora.py @@ -193,12 +193,12 @@ def merge(args): merge_to_sd_model(text_encoder, unet, args.models, args.ratios, merge_dtype) - print(f"saving SD model to: {args.save_to}") + print(f"\nsaving SD model to: {args.save_to}") model_util.save_stable_diffusion_checkpoint(args.v2, args.save_to, text_encoder, unet, args.sd_model, 0, 0, save_dtype, vae) else: state_dict = merge_lora_models(args.models, args.ratios, merge_dtype) - print(f"saving model to: {args.save_to}") + print(f"\nsaving model to: {args.save_to}") save_to_file(args.save_to, state_dict, state_dict, save_dtype) diff --git a/networks/merge_lora_old.py b/networks/merge_lora_old.py index c4b6efce..ffd6b2b4 100644 --- a/networks/merge_lora_old.py +++ b/networks/merge_lora_old.py @@ -148,13 +148,13 @@ def merge(args): merge_to_sd_model(text_encoder, unet, args.models, args.ratios, merge_dtype) - print(f"saving SD model to: {args.save_to}") + print(f"\nsaving SD model to: {args.save_to}") model_util.save_stable_diffusion_checkpoint(args.v2, args.save_to, text_encoder, unet, args.sd_model, 0, 0, save_dtype, vae) else: state_dict, _, _ = merge_lora_models(args.models, args.ratios, merge_dtype) - print(f"saving model to: {args.save_to}") + print(f"\nsaving model to: {args.save_to}") save_to_file(args.save_to, state_dict, state_dict, save_dtype) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 7b740634..4f7499e8 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -326,7 +326,7 @@ def resize(args): metadata["sshs_model_hash"] = model_hash metadata["sshs_legacy_hash"] = legacy_hash - print(f"saving model to: {args.save_to}") + print(f"\nsaving model to: {args.save_to}") save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata) diff --git a/networks/svd_merge_lora.py b/networks/svd_merge_lora.py index 9d17efba..8cd389db 100644 --- a/networks/svd_merge_lora.py +++ b/networks/svd_merge_lora.py @@ -160,7 +160,7 @@ def merge(args): new_conv_rank = args.new_conv_rank if args.new_conv_rank is not None else args.new_rank state_dict = merge_lora_models(args.models, args.ratios, args.new_rank, new_conv_rank, args.device, merge_dtype) - print(f"saving model to: {args.save_to}") + print(f"\nsaving model to: {args.save_to}") save_to_file(args.save_to, state_dict, save_dtype) diff --git a/train_db.py b/train_db.py index 90ee1bb1..6ffd2547 100644 --- a/train_db.py +++ b/train_db.py @@ -240,7 +240,7 @@ def train(args): loss_list = [] loss_total = 0.0 for epoch in range(num_train_epochs): - print(f"epoch {epoch+1}/{num_train_epochs}") + print(f"\nepoch {epoch+1}/{num_train_epochs}") current_epoch.value = epoch + 1 # 指定したステップ数までText Encoderを学習する:epoch最初の状態 diff --git a/train_network.py b/train_network.py index 4c4cc281..76a49246 100644 --- a/train_network.py +++ b/train_network.py @@ -532,7 +532,7 @@ def train(args): os.makedirs(args.output_dir, exist_ok=True) ckpt_file = os.path.join(args.output_dir, ckpt_name) - print(f"saving checkpoint: {ckpt_file}") + print(f"\nsaving checkpoint: {ckpt_file}") metadata["ss_training_finished_at"] = str(time.time()) metadata["ss_steps"] = str(steps) metadata["ss_epoch"] = str(epoch_no) @@ -550,7 +550,7 @@ def train(args): # training loop for epoch in range(num_train_epochs): if is_main_process: - print(f"epoch {epoch+1}/{num_train_epochs}") + print(f"\nepoch {epoch+1}/{num_train_epochs}") current_epoch.value = epoch + 1 metadata["ss_epoch"] = str(epoch + 1) diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 301aae7a..609b3294 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -347,7 +347,7 @@ def train(args): os.makedirs(args.output_dir, exist_ok=True) ckpt_file = os.path.join(args.output_dir, ckpt_name) - print(f"saving checkpoint: {ckpt_file}") + print(f"\nsaving checkpoint: {ckpt_file}") save_weights(ckpt_file, embs, save_dtype) if args.huggingface_repo_id is not None: huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload) @@ -360,7 +360,7 @@ def train(args): # training loop for epoch in range(num_train_epochs): - print(f"epoch {epoch+1}/{num_train_epochs}") + print(f"\nepoch {epoch+1}/{num_train_epochs}") current_epoch.value = epoch + 1 text_encoder.train() diff --git a/train_textual_inversion_XTI.py b/train_textual_inversion_XTI.py index 2aa6cd7f..32d12ea7 100644 --- a/train_textual_inversion_XTI.py +++ b/train_textual_inversion_XTI.py @@ -381,7 +381,7 @@ def train(args): os.makedirs(args.output_dir, exist_ok=True) ckpt_file = os.path.join(args.output_dir, ckpt_name) - print(f"saving checkpoint: {ckpt_file}") + print(f"\nsaving checkpoint: {ckpt_file}") save_weights(ckpt_file, embs, save_dtype) if args.huggingface_repo_id is not None: huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload) @@ -394,7 +394,7 @@ def train(args): # training loop for epoch in range(num_train_epochs): - print(f"epoch {epoch+1}/{num_train_epochs}") + print(f"\nepoch {epoch+1}/{num_train_epochs}") current_epoch.value = epoch + 1 text_encoder.train()