'lora_unet_final_layer_adaLN_modulation_1.lora_down.weight'

#2
by kasiasta91 - opened

Hi, I have tried to run the code snippet

from diffusers.utils import load_image

pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev")
pipe.load_lora_weights("gokaygokay/Fuse-it-Kontext-Dev-LoRA")

prompt = "Fuse this image into background"
input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")

image = pipe(image=input_image, prompt=prompt).images[0]```

But I am getting this error:

```KeyError                                  Traceback (most recent call last)
/tmp/ipython-input-1963967165.py in <cell line: 0>()
      3 
      4 pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev")
----> 5 pipe.load_lora_weights("gokaygokay/Fuse-it-Kontext-Dev-LoRA")

5 frames
/usr/local/lib/python3.12/dist-packages/diffusers/loaders/lora_pipeline.py in load_lora_weights(self, pretrained_model_name_or_path_or_dict, adapter_name, hotswap, **kwargs)
   2151         # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
   2152         kwargs["return_lora_metadata"] = True
-> 2153         state_dict, network_alphas, metadata = self.lora_state_dict(
   2154             pretrained_model_name_or_path_or_dict, return_alphas=True, **kwargs
   2155         )

/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_validators.py in _inner_fn(*args, **kwargs)
    112             kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
    113 
--> 114         return fn(*args, **kwargs)
    115 
    116     return _inner_fn  # type: ignore

/usr/local/lib/python3.12/dist-packages/diffusers/loaders/lora_pipeline.py in lora_state_dict(cls, pretrained_model_name_or_path_or_dict, return_alphas, **kwargs)
   2032         is_kohya = any(".lora_down.weight" in k for k in state_dict)
   2033         if is_kohya:
-> 2034             state_dict = _convert_kohya_flux_lora_to_diffusers(state_dict)
   2035             # Kohya already takes care of scaling the LoRA parameters with alpha.
   2036             return cls._prepare_outputs(

/usr/local/lib/python3.12/dist-packages/diffusers/loaders/lora_conversion_utils.py in _convert_kohya_flux_lora_to_diffusers(state_dict)
    919         return _convert_mixture_state_dict_to_diffusers(state_dict)
    920 
--> 921     return _convert_sd_scripts_to_ai_toolkit(state_dict)
    922 
    923 

/usr/local/lib/python3.12/dist-packages/diffusers/loaders/lora_conversion_utils.py in _convert_sd_scripts_to_ai_toolkit(sds_sd)
    627         if any("final_layer" in k for k in sds_sd):
    628             # Notice the swap in processing for "final_layer".
--> 629             assign_remaining_weights(
    630                 [
    631                     (

/usr/local/lib/python3.12/dist-packages/diffusers/loaders/lora_conversion_utils.py in assign_remaining_weights(assignments, source)
    553                     target_key = target_fmt.format(lora_key=lora_key)
    554                     source_key = source_fmt.format(orig_lora_key=orig_lora_key)
--> 555                     value = source.pop(source_key)
    556                     if transform:
    557                         value = transform(value)

KeyError: 'lora_unet_final_layer_adaLN_modulation_1.lora_down.weight```

any idead how to fix it?

Sign up or log in to comment