pad error
I got pad error with following sample code. Env 1 H100 GPU, model huggingface/Llama-4-Scout-17B-16E-Instruct, latest stable version pytorch+transformers.
url1 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
url2 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png"
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": url1},
{"type": "image", "url": url2},
{"type": "text", "text": "Can you describe how these two images are similar, and how they differ?"},
]
},
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=256,
)
response = processor.batch_decode(outputs[:, inputs["input_ids"].shape[-1]:])[0]
print(response)
print(outputs[0])
TypeError Traceback (most recent call last)
in <cell line: 0>()
19 ).to(model.device)
20
---> 21 outputs = model.generate(
22 **inputs,
23 max_new_tokens=256,
15 frames
/usr/local/lib/python3.11/dist-packages/torch/utils/_contextlib.py in decorate_context(*args, **kwargs)
114 def decorate_context(*args, **kwargs):
115 with ctx_factory():
--> 116 return func(*args, **kwargs)
117
118 return decorate_context
/usr/local/lib/python3.11/dist-packages/transformers/generation/utils.py in generate(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, use_model_defaults, **kwargs)
2458
2459 # 12. run sample (it degenerates to greedy search when generation_config.do_sample=False
)
-> 2460 result = self._sample(
2461 input_ids,
2462 logits_processor=prepared_logits_processor,
/usr/local/lib/python3.11/dist-packages/transformers/generation/utils.py in _sample(self, input_ids, logits_processor, stopping_criteria, generation_config, synced_gpus, streamer, **model_kwargs)
3424
3425 if is_prefill:
-> 3426 outputs = self(**model_inputs, return_dict=True)
3427 is_prefill = False
3428 else:
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs)
1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1738 else:
-> 1739 return self._call_impl(*args, **kwargs)
1740
1741 # torchrec tests the code consistency with the following code
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1748 or _global_backward_pre_hooks or _global_backward_hooks
1749 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1750 return forward_call(*args, **kwargs)
1751
1752 result = None
/usr/local/lib/python3.11/dist-packages/accelerate/hooks.py in new_forward(module, *args, **kwargs)
174 output = module._old_forward(*args, **kwargs)
175 else:
--> 176 output = module._old_forward(*args, **kwargs)
177 return module._hf_hook.post_forward(module, output)
178
/usr/local/lib/python3.11/dist-packages/transformers/models/llama4/modeling_llama4.py in forward(self, input_ids, pixel_values, attention_mask, position_ids, past_key_values, inputs_embeds, vision_feature_layer, vision_feature_select_strategy, labels, use_cache, output_attentions, output_hidden_states, return_dict, cache_position, logits_to_keep, image_sizes, **lm_kwargs)
1759 inputs_embeds = inputs_embeds.view(original_inputs_embeds_shape)
1760
-> 1761 outputs = self.language_model(
1762 attention_mask=attention_mask,
1763 position_ids=position_ids,
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs)
1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1738 else:
-> 1739 return self._call_impl(*args, **kwargs)
1740
1741 # torchrec tests the code consistency with the following code
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1748 or _global_backward_pre_hooks or _global_backward_hooks
1749 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1750 return forward_call(*args, **kwargs)
1751
1752 result = None
/usr/local/lib/python3.11/dist-packages/transformers/models/llama4/modeling_llama4.py in forward(self, input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict, cache_position, logits_to_keep, **kwargs)
1013
1014 # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
-> 1015 outputs = self.model(
1016 input_ids=input_ids,
1017 attention_mask=attention_mask,
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs)
1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1738 else:
-> 1739 return self._call_impl(*args, **kwargs)
1740
1741 # torchrec tests the code consistency with the following code
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1748 or _global_backward_pre_hooks or _global_backward_hooks
1749 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1750 return forward_call(*args, **kwargs)
1751
1752 result = None
/usr/local/lib/python3.11/dist-packages/transformers/models/llama4/modeling_llama4.py in forward(self, input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict, cache_position, **flash_attn_kwargs)
667 position_ids = cache_position.unsqueeze(0)
668
--> 669 causal_mask, chunk_causal_mask = self._update_causal_mask(
670 attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
671 )
/usr/local/lib/python3.11/dist-packages/transformers/models/llama4/modeling_llama4.py in _update_causal_mask(self, attention_mask, input_tensor, cache_position, past_key_values, output_attentions, chunked_attention_mask)
777 attention_mask, self.config.attention_chunk_size, sequence_length, key_length, offsets=offsets
778 )
--> 779 attention_mask = make_flex_block_causal_mask(
780 attention_mask,
781 query_length=sequence_length,
/usr/local/lib/python3.11/dist-packages/transformers/integrations/flex_attention.py in make_flex_block_causal_mask(attention_mask_2d, attention_chunk_size, query_length, key_length, offsets)
101 BlockMask
102 """
--> 103 attention_mask_2d = torch.nn.functional.pad(attention_mask_2d, value=0, pad=(0, key_length))
104 device = attention_mask_2d.device
105 document_ids = attention_mask_2d.clone()
/usr/local/lib/python3.11/dist-packages/torch/nn/functional.py in pad(input, pad, mode, value)
5207 "torch._decomp.decompositions"
5208 )._replication_pad(input, pad)
-> 5209 return torch._C._nn.pad(input, pad, mode, value)
5210
5211
TypeError: pad(): argument 'pad' failed to unpack the object at pos 2 with error "type must be tuple of ints,but got NoneType"
I got the same error message. Any suggestions?
Same here, any suggestions?
I just made a ticket for this error: https://github.com/huggingface/transformers/issues/37323
key_length being None no idea how will fix asap
workaround: Use "eager" attention instead of flex
@ArthurZ Looked a bit into it, leaving this here https://github.com/huggingface/transformers/issues/37323#issuecomment-2781396224
Don't have more time but should give a first idea.
Fixed on main sorry