Retry 16 FAILED models
Browse files- 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +2 -4
- Kukedlc/NeuralSynthesis-7B-v0.1_eval_request_False_bfloat16_Original.json +2 -4
- Kukedlc/NeuralSynthesis-7b-v0.4-slerp_eval_request_bb3bd36_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.9.2-qwen2-7b_eval_request_False_bfloat16_Original.json +2 -4
- gradientai/Llama-3-8B-Instruct-262k_eval_request_False_bfloat16_Original.json +2 -4
- lightblue/suzume-llama-3-8B-multilingual-orpo-borda-full_eval_request_False_bfloat16_Original.json +2 -4
- lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half_eval_request_False_bfloat16_Original.json +2 -4
- lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half_eval_request_b82150a_False_bfloat16_Original.json +2 -4
- lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75_eval_request_False_bfloat16_Original.json +2 -4
- shadowml/BeagSake-7B_eval_request_False_bfloat16_Original.json +2 -4
- vicgalle/Configurable-Hermes-2-Pro-Llama-3-8B_eval_request_False_float16_Original.json +2 -4
- vicgalle/Configurable-Mistral-7B_eval_request_False_float16_Original.json +2 -4
- vicgalle/ConfigurableBeagle-11B_eval_request_False_float16_Original.json +2 -4
- vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json +2 -4
- wenbopan/Faro-Yi-9B_eval_request_False_bfloat16_Original.json +2 -4
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json
CHANGED
@@ -7,13 +7,11 @@
|
|
7 |
"params": 0,
|
8 |
"architectures": "?",
|
9 |
"weight_type": "Adapter",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:03:11Z",
|
12 |
"model_type": "๐ถ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 808,
|
15 |
"job_start_time": "2024-06-13T03-00-04.333530",
|
16 |
-
"main_language": "Portuguese"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
7 |
"params": 0,
|
8 |
"architectures": "?",
|
9 |
"weight_type": "Adapter",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:03:11Z",
|
12 |
"model_type": "๐ถ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 808,
|
15 |
"job_start_time": "2024-06-13T03-00-04.333530",
|
16 |
+
"main_language": "Portuguese"
|
|
|
|
|
17 |
}
|
Kukedlc/NeuralSynthesis-7B-v0.1_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-12T20:59:36Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 806,
|
16 |
-
"job_start_time": "2024-06-13T02-58-53.322017"
|
17 |
-
"error_msg": "Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-12T20:59:36Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 806,
|
16 |
+
"job_start_time": "2024-06-13T02-58-53.322017"
|
|
|
|
|
17 |
}
|
Kukedlc/NeuralSynthesis-7b-v0.4-slerp_eval_request_bb3bd36_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-14T06:14:24Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 815,
|
16 |
-
"job_start_time": "2024-06-14T12-51-33.530183"
|
17 |
-
"error_msg": "Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-14T06:14:24Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 815,
|
16 |
+
"job_start_time": "2024-06-14T12-51-33.530183"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-13T18:30:14Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 810,
|
16 |
-
"job_start_time": "2024-06-14T04-46-19.185818"
|
17 |
-
"error_msg": "Failed to import transformers.models.mixtral.modeling_mixtral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mixtral/modeling_mixtral.py\", line 57, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mixtral.modeling_mixtral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-13T18:30:14Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 810,
|
16 |
+
"job_start_time": "2024-06-14T04-46-19.185818"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.9.2-qwen2-7b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-10T15:48:17Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 812,
|
16 |
-
"job_start_time": "2024-06-14T04-46-56.521714"
|
17 |
-
"error_msg": "Failed to import transformers.models.qwen2.modeling_qwen2 because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/qwen2/modeling_qwen2.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.qwen2.modeling_qwen2 because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-10T15:48:17Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 812,
|
16 |
+
"job_start_time": "2024-06-14T04-46-56.521714"
|
|
|
|
|
17 |
}
|
gradientai/Llama-3-8B-Instruct-262k_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-13T20:47:28Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 811,
|
16 |
-
"job_start_time": "2024-06-14T04-46-37.515025"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-13T20:47:28Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 811,
|
16 |
+
"job_start_time": "2024-06-14T04-46-37.515025"
|
|
|
|
|
17 |
}
|
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-full_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-06T18:37:18Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 796,
|
16 |
-
"job_start_time": "2024-06-13T00-25-18.427226"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-06T18:37:18Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 796,
|
16 |
+
"job_start_time": "2024-06-13T00-25-18.427226"
|
|
|
|
|
17 |
}
|
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-06T18:40:09Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 798,
|
16 |
-
"job_start_time": "2024-06-13T00-25-58.222542"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-06T18:40:09Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 798,
|
16 |
+
"job_start_time": "2024-06-13T00-25-58.222542"
|
|
|
|
|
17 |
}
|
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half_eval_request_b82150a_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-14T06:15:06Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 815,
|
16 |
-
"job_start_time": "2024-06-14T12-51-55.841919"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-14T06:15:06Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 815,
|
16 |
+
"job_start_time": "2024-06-14T12-51-55.841919"
|
|
|
|
|
17 |
}
|
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-06T18:38:56Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 797,
|
16 |
-
"job_start_time": "2024-06-13T00-25-38.678388"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-06T18:38:56Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 797,
|
16 |
+
"job_start_time": "2024-06-13T00-25-38.678388"
|
|
|
|
|
17 |
}
|
shadowml/BeagSake-7B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-14T06:09:59Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 814,
|
16 |
-
"job_start_time": "2024-06-14T12-51-08.691430"
|
17 |
-
"error_msg": "Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-14T06:09:59Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 814,
|
16 |
+
"job_start_time": "2024-06-14T12-51-08.691430"
|
|
|
|
|
17 |
}
|
vicgalle/Configurable-Hermes-2-Pro-Llama-3-8B_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-12T19:41:07Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 803,
|
16 |
-
"job_start_time": "2024-06-13T02-57-19.034617"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-12T19:41:07Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 803,
|
16 |
+
"job_start_time": "2024-06-13T02-57-19.034617"
|
|
|
|
|
17 |
}
|
vicgalle/Configurable-Mistral-7B_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-12T19:39:55Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 802,
|
16 |
-
"job_start_time": "2024-06-13T02-56-37.736146"
|
17 |
-
"error_msg": "Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-12T19:39:55Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 802,
|
16 |
+
"job_start_time": "2024-06-13T02-56-37.736146"
|
|
|
|
|
17 |
}
|
vicgalle/ConfigurableBeagle-11B_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-12T19:43:10Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 804,
|
16 |
-
"job_start_time": "2024-06-13T02-57-46.585778"
|
17 |
-
"error_msg": "Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 48, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.mistral.modeling_mistral because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-12T19:43:10Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 804,
|
16 |
+
"job_start_time": "2024-06-13T02-57-46.585778"
|
|
|
|
|
17 |
}
|
vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-12T19:45:05Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 805,
|
16 |
-
"job_start_time": "2024-06-13T02-58-21.877122"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-12T19:45:05Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 805,
|
16 |
+
"job_start_time": "2024-06-13T02-58-21.877122"
|
|
|
|
|
17 |
}
|
wenbopan/Faro-Yi-9B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-29T09:28:02Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 807,
|
16 |
-
"job_start_time": "2024-06-13T02-59-27.476454"
|
17 |
-
"error_msg": "Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1535, in _get_module\n return importlib.import_module(\".\" + module_name, self.__name__)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 54, in <module>\n from flash_attn import flash_attn_func, flash_attn_varlen_func\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/__init__.py\", line 3, in <module>\n from flash_attn.flash_attn_interface import (\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 10, in <module>\n import flash_attn_2_cuda as flash_attn_cuda\nImportError: /root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 562, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 383, in _get_model_class\n supported_models = model_mapping[type(config)]\n ~~~~~~~~~~~~~^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 734, in __getitem__\n return self._load_attr_from_module(model_type, model_name)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 748, in _load_attr_from_module\n return getattribute_from_module(self._modules[module_name], attr)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 692, in getattribute_from_module\n if hasattr(module, attr):\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1525, in __getattr__\n module = self._get_module(self._class_to_module[name])\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/import_utils.py\", line 1537, in _get_module\n raise RuntimeError(\nRuntimeError: Failed to import transformers.models.llama.modeling_llama because of the following error (look up to see its traceback):\n/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEi\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-29T09:28:02Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 807,
|
16 |
+
"job_start_time": "2024-06-13T02-59-27.476454"
|
|
|
|
|
17 |
}
|