support ckpt without position id in sd v1 #687

This commit is contained in:
Kohya S
2023-07-29 14:19:25 +09:00
parent 38b59a93de
commit 1e4512b2c8

View File

@@ -540,6 +540,11 @@ def convert_ldm_clip_checkpoint_v1(checkpoint):
for key in keys:
if key.startswith("cond_stage_model.transformer"):
text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
# support checkpoint without position_ids (invalid checkpoint)
if "text_model.embeddings.position_ids" not in text_model_dict:
text_model_dict["text_model.embeddings.position_ids"] = torch.arange(77).unsqueeze(0) # 77 is the max length of the text
return text_model_dict