Update status of AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original to FAILED
Browse files
AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,12 @@
|
|
8 |
"architectures": "Gemma2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-08-29T19:24:31Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1513,
|
16 |
-
"job_start_time": "2025-02-12T19-41-06.015447"
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "Gemma2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FAILED",
|
12 |
"submitted_time": "2024-08-29T19:24:31Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1513,
|
16 |
+
"job_start_time": "2025-02-12T19-41-06.015447",
|
17 |
+
"error_msg": "CUDA out of memory. Tried to allocate 410.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 334.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 1010245 has 77.47 GiB memory in use. Of the allocated memory 56.70 GiB is allocated by PyTorch, with 1.93 GiB allocated in private pools (e.g., CUDA Graphs), and 1.20 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
|
18 |
+
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 106, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1553, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1075, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/utils/_contextlib.py\", line 116, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/generation/utils.py\", line 2224, in generate\n result = self._sample(\n ^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/generation/utils.py\", line 3205, in _sample\n outputs = self(**model_inputs, return_dict=True)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1736, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1747, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/utils/deprecation.py\", line 172, in wrapped_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/models/gemma2/modeling_gemma2.py\", line 883, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1736, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1747, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/models/gemma2/modeling_gemma2.py\", line 666, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1736, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1747, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/models/gemma2/modeling_gemma2.py\", line 336, in forward\n hidden_states = self.mlp(hidden_states)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1736, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\", line 1747, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/transformers/models/gemma2/modeling_gemma2.py\", line 90, in forward\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~\ntorch.OutOfMemoryError: CUDA out of memory. Tried to allocate 410.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 334.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 1010245 has 77.47 GiB memory in use. Of the allocated memory 56.70 GiB is allocated by PyTorch, with 1.93 GiB allocated in private pools (e.g., CUDA Graphs), and 1.20 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
|
19 |
}
|