Upload 2025-09-08/ci_results_run_models_gpu/new_failures.json with huggingface_hub
Browse files
2025-09-08/ci_results_run_models_gpu/new_failures.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chameleon": {
|
3 |
+
"single-gpu": [
|
4 |
+
"tests/models/chameleon/test_modeling_chameleon.py::ChameleonVision2SeqModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
|
5 |
+
],
|
6 |
+
"multi-gpu": []
|
7 |
+
},
|
8 |
+
"deepseek_vl_hybrid": {
|
9 |
+
"single-gpu": [],
|
10 |
+
"multi-gpu": [
|
11 |
+
"tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridIntegrationTest::test_model_text_generation_batched",
|
12 |
+
"tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridIntegrationTest::test_model_text_generation_with_multi_image"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
"ernie4_5_moe": {
|
16 |
+
"single-gpu": [
|
17 |
+
"tests/models/ernie4_5_moe/test_modeling_ernie4_5_moe.py::Ernie4_5_MoeModelTest::test_flash_attn_2_equivalence"
|
18 |
+
],
|
19 |
+
"multi-gpu": []
|
20 |
+
},
|
21 |
+
"fastspeech2_conformer": {
|
22 |
+
"single-gpu": [],
|
23 |
+
"multi-gpu": [
|
24 |
+
"tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py::FastSpeech2ConformerModelTest::test_multi_gpu_data_parallel_forward"
|
25 |
+
]
|
26 |
+
},
|
27 |
+
"gemma3": {
|
28 |
+
"single-gpu": [
|
29 |
+
"tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_flash_attn_2_inference_equivalence"
|
30 |
+
],
|
31 |
+
"multi-gpu": [
|
32 |
+
"tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_flash_attn_2_inference_equivalence"
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"mllama": {
|
36 |
+
"single-gpu": [
|
37 |
+
"tests/models/mllama/test_modeling_mllama.py::MllamaForCausalLMModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
|
38 |
+
"tests/models/mllama/test_modeling_mllama.py::MllamaForCausalLMModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
|
39 |
+
],
|
40 |
+
"multi-gpu": []
|
41 |
+
},
|
42 |
+
"pipelines": {
|
43 |
+
"single-gpu": [
|
44 |
+
"tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_small_model_pt",
|
45 |
+
"tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_threshold"
|
46 |
+
],
|
47 |
+
"multi-gpu": [
|
48 |
+
"tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_small_model_pt",
|
49 |
+
"tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_threshold"
|
50 |
+
]
|
51 |
+
}
|
52 |
+
}
|