Update with commit 1709ed96e47f36fe926e2cd2556fa839b41c2afd
Browse filesSee: https://github.com/huggingface/transformers/commit/1709ed96e47f36fe926e2cd2556fa839b41c2afd
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -8,6 +8,7 @@
|
|
| 8 |
{"model_type":"aria","pytorch":true,"processor":"AutoProcessor"}
|
| 9 |
{"model_type":"aria_text","pytorch":true,"processor":"AutoTokenizer"}
|
| 10 |
{"model_type":"audio-spectrogram-transformer","pytorch":true,"processor":"AutoFeatureExtractor"}
|
|
|
|
| 11 |
{"model_type":"autoformer","pytorch":true,"processor":"AutoTokenizer"}
|
| 12 |
{"model_type":"aya_vision","pytorch":true,"processor":"AutoProcessor"}
|
| 13 |
{"model_type":"bamba","pytorch":true,"processor":"AutoTokenizer"}
|
|
|
|
| 8 |
{"model_type":"aria","pytorch":true,"processor":"AutoProcessor"}
|
| 9 |
{"model_type":"aria_text","pytorch":true,"processor":"AutoTokenizer"}
|
| 10 |
{"model_type":"audio-spectrogram-transformer","pytorch":true,"processor":"AutoFeatureExtractor"}
|
| 11 |
+
{"model_type":"audioflamingo3","pytorch":true,"processor":"AutoProcessor"}
|
| 12 |
{"model_type":"autoformer","pytorch":true,"processor":"AutoTokenizer"}
|
| 13 |
{"model_type":"aya_vision","pytorch":true,"processor":"AutoProcessor"}
|
| 14 |
{"model_type":"bamba","pytorch":true,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
|
@@ -23,6 +23,8 @@
|
|
| 23 |
{"model_class":"AriaModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 24 |
{"model_class":"AriaTextForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 25 |
{"model_class":"AriaTextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
| 26 |
{"model_class":"AutoformerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 27 |
{"model_class":"AyaVisionForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 28 |
{"model_class":"AyaVisionModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
| 23 |
{"model_class":"AriaModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 24 |
{"model_class":"AriaTextForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 25 |
{"model_class":"AriaTextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 26 |
+
{"model_class":"AudioFlamingo3Encoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 27 |
+
{"model_class":"AudioFlamingo3ForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
| 28 |
{"model_class":"AutoformerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 29 |
{"model_class":"AyaVisionForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 30 |
{"model_class":"AyaVisionModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|