Upload 2025-06-27/runs/71-15925005710/ci_results_run_trainer_and_fsdp_gpu/trainer_and_fsdp_results.json with huggingface_hub
Browse files
2025-06-27/runs/71-15925005710/ci_results_run_trainer_and_fsdp_gpu/trainer_and_fsdp_results.json
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"trainer": {
|
3 |
+
"failed": {
|
4 |
+
"PyTorch": {
|
5 |
+
"unclassified": 0,
|
6 |
+
"single": 0,
|
7 |
+
"multi": 0
|
8 |
+
},
|
9 |
+
"TensorFlow": {
|
10 |
+
"unclassified": 0,
|
11 |
+
"single": 0,
|
12 |
+
"multi": 0
|
13 |
+
},
|
14 |
+
"Flax": {
|
15 |
+
"unclassified": 0,
|
16 |
+
"single": 0,
|
17 |
+
"multi": 0
|
18 |
+
},
|
19 |
+
"Tokenizers": {
|
20 |
+
"unclassified": 0,
|
21 |
+
"single": 0,
|
22 |
+
"multi": 0
|
23 |
+
},
|
24 |
+
"Pipelines": {
|
25 |
+
"unclassified": 0,
|
26 |
+
"single": 0,
|
27 |
+
"multi": 0
|
28 |
+
},
|
29 |
+
"Trainer": {
|
30 |
+
"unclassified": 0,
|
31 |
+
"single": 1,
|
32 |
+
"multi": 0
|
33 |
+
},
|
34 |
+
"ONNX": {
|
35 |
+
"unclassified": 0,
|
36 |
+
"single": 0,
|
37 |
+
"multi": 0
|
38 |
+
},
|
39 |
+
"Auto": {
|
40 |
+
"unclassified": 0,
|
41 |
+
"single": 0,
|
42 |
+
"multi": 0
|
43 |
+
},
|
44 |
+
"Quantization": {
|
45 |
+
"unclassified": 0,
|
46 |
+
"single": 0,
|
47 |
+
"multi": 0
|
48 |
+
},
|
49 |
+
"Unclassified": {
|
50 |
+
"unclassified": 0,
|
51 |
+
"single": 1,
|
52 |
+
"multi": 0
|
53 |
+
}
|
54 |
+
},
|
55 |
+
"success": 402,
|
56 |
+
"time_spent": "0:05:11, 0:02:43, ",
|
57 |
+
"failures": {
|
58 |
+
"single": [
|
59 |
+
{
|
60 |
+
"line": "[100%]",
|
61 |
+
"trace": "(line 2853) Failed: (subprocess) AssertionError: {'loss': 8.9688, 'grad_norm': nan, 'learnin[53 chars]': 1} != {'epoch': 0.0049261083743842365, 'grad_norm[53 chars]': 1}"
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"line": "tests/trainer/test_trainer.py::TrainerIntegrationTest::test_can_resume_training_lm",
|
65 |
+
"trace": "Cannot retrieve error message."
|
66 |
+
}
|
67 |
+
]
|
68 |
+
},
|
69 |
+
"job_link": {
|
70 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/15925005710/job/44920191136",
|
71 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/15925005710/job/44920191125"
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"fsdp": {
|
75 |
+
"failed": {
|
76 |
+
"PyTorch": {
|
77 |
+
"unclassified": 0,
|
78 |
+
"single": 0,
|
79 |
+
"multi": 0
|
80 |
+
},
|
81 |
+
"TensorFlow": {
|
82 |
+
"unclassified": 0,
|
83 |
+
"single": 0,
|
84 |
+
"multi": 0
|
85 |
+
},
|
86 |
+
"Flax": {
|
87 |
+
"unclassified": 0,
|
88 |
+
"single": 0,
|
89 |
+
"multi": 0
|
90 |
+
},
|
91 |
+
"Tokenizers": {
|
92 |
+
"unclassified": 0,
|
93 |
+
"single": 0,
|
94 |
+
"multi": 0
|
95 |
+
},
|
96 |
+
"Pipelines": {
|
97 |
+
"unclassified": 0,
|
98 |
+
"single": 0,
|
99 |
+
"multi": 0
|
100 |
+
},
|
101 |
+
"Trainer": {
|
102 |
+
"unclassified": 0,
|
103 |
+
"single": 0,
|
104 |
+
"multi": 0
|
105 |
+
},
|
106 |
+
"ONNX": {
|
107 |
+
"unclassified": 0,
|
108 |
+
"single": 0,
|
109 |
+
"multi": 0
|
110 |
+
},
|
111 |
+
"Auto": {
|
112 |
+
"unclassified": 0,
|
113 |
+
"single": 0,
|
114 |
+
"multi": 0
|
115 |
+
},
|
116 |
+
"Quantization": {
|
117 |
+
"unclassified": 0,
|
118 |
+
"single": 0,
|
119 |
+
"multi": 0
|
120 |
+
},
|
121 |
+
"Unclassified": {
|
122 |
+
"unclassified": 0,
|
123 |
+
"single": 0,
|
124 |
+
"multi": 12
|
125 |
+
}
|
126 |
+
},
|
127 |
+
"success": 25,
|
128 |
+
"time_spent": "0:12:11, .34, ",
|
129 |
+
"failures": {
|
130 |
+
"multi": [
|
131 |
+
{
|
132 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_full_shard_bf16",
|
133 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpyzurb1ek --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --bf16 --fsdp full_shard auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_full_shard_fp16",
|
137 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpm2vf57o3 --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fp16 --fsdp full_shard auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_shard_grad_op_bf16",
|
141 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmp25ygstg9 --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --bf16 --fsdp shard_grad_op auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_shard_grad_op_fp16",
|
145 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpxjxxuv8e --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fp16 --fsdp shard_grad_op auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_cpu_offload_0_bf16",
|
149 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpuk5319ne --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --bf16 --max_steps 10 --fsdp full_shard auto_wrap offload --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_cpu_offload_1_fp16",
|
153 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpdu4sx6qb --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fp16 --max_steps 10 --fsdp full_shard auto_wrap offload --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
154 |
+
},
|
155 |
+
{
|
156 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_gradient_accumulation_full_shard_bf16",
|
157 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpstcsoer1 --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --bf16 --gradient_accumulation_steps 2 --fsdp full_shard auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
158 |
+
},
|
159 |
+
{
|
160 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_gradient_accumulation_full_shard_fp16",
|
161 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmp01z_a79j --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fp16 --gradient_accumulation_steps 2 --fsdp full_shard auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_gradient_accumulation_shard_grad_op_bf16",
|
165 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpimtz6v1r --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --bf16 --gradient_accumulation_steps 2 --fsdp shard_grad_op auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_basic_run_with_gradient_accumulation_shard_grad_op_fp16",
|
169 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir /tmp/tmpsi0dxr_9 --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 1 --lr_scheduler_type cosine --logging_steps 50 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fp16 --gradient_accumulation_steps 2 --fsdp shard_grad_op auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_training_and_can_resume_normally_FULL_STATE_DICT",
|
173 |
+
"trace": "(line 2490) RuntimeError: 'torchrun --nnodes 1 --nproc-per-node 2 --master-port 10999 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir ./xxx --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 2 --lr_scheduler_type cosine --logging_steps 25 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none --fsdp full_shard auto_wrap --fsdp_transformer_layer_cls_to_wrap BertLayer' failed with returncode 1"
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"line": "tests/fsdp/test_fsdp.py::TrainerIntegrationFSDP::test_training_and_can_resume_normally_SHARDED_STATE_DICT",
|
177 |
+
"trace": "(line 2490) RuntimeError: 'accelerate launch --num_processes 2 --main_process_port 10999 --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_state_dict_type SHARDED_STATE_DICT --fsdp_transformer_layer_cls_to_wrap BertLayer --fsdp_sharding_strategy 1 /__w/transformers/transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir ./xxx --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs 2 --lr_scheduler_type cosine --logging_steps 25 --save_strategy epoch --do_eval --eval_strategy epoch --report_to none' failed with returncode 1"
|
178 |
+
}
|
179 |
+
]
|
180 |
+
},
|
181 |
+
"job_link": {
|
182 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/15925005710/job/44920191121",
|
183 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/15925005710/job/44920191106"
|
184 |
+
}
|
185 |
+
}
|
186 |
+
}
|