From 5e45df722d434bd64b230f462cac632d5ea68c96 Mon Sep 17 00:00:00 2001 From: sdbds <865105819@qq.com> Date: Tue, 4 Mar 2025 08:07:33 +0800 Subject: [PATCH] update gemma2 train attention layer --- networks/lora_lumina.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/networks/lora_lumina.py b/networks/lora_lumina.py index 3f6c9b41..431c183d 100644 --- a/networks/lora_lumina.py +++ b/networks/lora_lumina.py @@ -462,7 +462,7 @@ def create_network_from_weights(multiplier, file, ae, text_encoders, lumina, wei class LoRANetwork(torch.nn.Module): LUMINA_TARGET_REPLACE_MODULE = ["JointTransformerBlock"] - TEXT_ENCODER_TARGET_REPLACE_MODULE = ["Gemma2Attention", "Gemma2MLP"] + TEXT_ENCODER_TARGET_REPLACE_MODULE = ["Gemma2Attention", "Gemma2FlashAttention2", "Gemma2SdpaAttention", "Gemma2MLP"] LORA_PREFIX_LUMINA = "lora_unet" LORA_PREFIX_TEXT_ENCODER = "lora_te" # Simplified prefix since we only have one text encoder