Is that an error in the backend code?
/usr/local/lib/python3.11/dist-packages/transformers/generation/configuration_utils.py:626: UserWarning: do_sample
is set to False
. However, temperature
is set to 0.0
-- this flag is only used in sample-based generation modes. You should set do_sample=True
or unset temperature
.
warnings.warn(
The seen_tokens
attribute is deprecated and will be removed in v4.41. Use the cache_position
model input instead.
AttributeError Traceback (most recent call last)
in <cell line: 0>()
8 # Generate the response from the Vision model
9 # where it will raise the "FlashAttention only supports Ampere GPUs or newer" error
---> 10 generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
11
12 # Remove input tokens from the generated response to get the actual output tokens
4 frames
/usr/local/lib/python3.11/dist-packages/torch/utils/_contextlib.py in decorate_context(*args, **kwargs)
114 def decorate_context(*args, **kwargs):
115 with ctx_factory():
--> 116 return func(*args, **kwargs)
117
118 return decorate_context
/usr/local/lib/python3.11/dist-packages/transformers/generation/utils.py in generate(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, **kwargs)
2250
2251 # 12. run sample (it degenerates to greedy search when generation_config.do_sample=False
)
-> 2252 result = self._sample(
2253 input_ids,
2254 logits_processor=prepared_logits_processor,
/usr/local/lib/python3.11/dist-packages/transformers/generation/utils.py in _sample(self, input_ids, logits_processor, stopping_criteria, generation_config, synced_gpus, streamer, **model_kwargs)
3222 ):
3223 # prepare model inputs
-> 3224 model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
3225
3226 # prepare variable output controls (note: some models won't accept all output controls)
~/.cache/huggingface/modules/transformers_modules/microsoft/Phi-3-vision-128k-instruct/c45209e90a4c4f7d16b2e9d48503c7f3e83623ed/modeling_phi3_v.py in prepare_inputs_for_generation(self, input_ids, past_key_values, attention_mask, inputs_embeds, pixel_values, image_sizes, **kwargs)
1350 cache_length = past_key_values.get_seq_length()
1351 past_length = past_key_values.seen_tokens
-> 1352 max_cache_length = past_key_values.get_max_length()
1353 else:
1354 cache_length = past_length = past_key_values[0][0].shape[2]
/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py in getattr(self, name)
1929 if name in modules:
1930 return modules[name]
-> 1931 raise AttributeError(
1932 f"'{type(self).name}' object has no attribute '{name}'"
1933 )
AttributeError: 'DynamicCache' object has no attribute 'get_max_length'