Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model-index:
|
|
23 |
metrics:
|
24 |
- name: pass@1
|
25 |
type: pass@1
|
26 |
-
value:
|
27 |
verified: false
|
28 |
---
|
29 |
|
@@ -36,6 +36,39 @@ Total 122,828 samples.
|
|
36 |
- Open-Orca/OpenOrca: Filter the 'cot' category in 1M GPT4 dataset. 74,440 samples.
|
37 |
- garage-bAInd/Open-Platypus: 100%, 24,926 samples.
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
| | |
|
40 |
|------ | ------ |
|
41 |
| lr | 2e-4 |
|
@@ -71,33 +104,6 @@ A40-48G x 2
|
|
71 |
| eeval_samples_per_second | 2.335 |
|
72 |
| eeval_steps_per_second | 1.167 |
|
73 |
|
74 |
-
| Metric | Value |
|
75 |
-
| --- | --- |
|
76 |
-
| humaneval-python | |
|
77 |
-
|
78 |
-
[Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard)
|
79 |
-
|
80 |
-
CodeLlama-34B-Python: 53.29
|
81 |
-
|
82 |
-
CodeLlama-34B-Instruct: 50.79
|
83 |
-
|
84 |
-
CodeLlama-13B-Instruct: 50.6
|
85 |
-
|
86 |
-
CodeLlama-34B: 45.11
|
87 |
-
|
88 |
-
CodeLlama-13B-Python: 42.89
|
89 |
-
|
90 |
-
CodeLlama-13B: 35.07
|
91 |
-
|
92 |
-
[Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
93 |
-
| Metric | Value |
|
94 |
-
| --- | --- |
|
95 |
-
| ARC | |
|
96 |
-
| HellaSwag | |
|
97 |
-
| MMLU | |
|
98 |
-
| TruthfulQA | |
|
99 |
-
| Average | |
|
100 |
-
|
101 |
|
102 |
# **Code Llama**
|
103 |
|
|
|
23 |
metrics:
|
24 |
- name: pass@1
|
25 |
type: pass@1
|
26 |
+
value: 70.73
|
27 |
verified: false
|
28 |
---
|
29 |
|
|
|
36 |
- Open-Orca/OpenOrca: Filter the 'cot' category in 1M GPT4 dataset. 74,440 samples.
|
37 |
- garage-bAInd/Open-Platypus: 100%, 24,926 samples.
|
38 |
|
39 |
+
## HumanEval
|
40 |
+
|
41 |
+
| Metric | Value |
|
42 |
+
| --- | --- |
|
43 |
+
| humaneval-python | 70.73 |
|
44 |
+
|
45 |
+
[Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard)
|
46 |
+
|
47 |
+
CodeLlama-34B-Python: 53.29
|
48 |
+
|
49 |
+
CodeLlama-34B-Instruct: 50.79
|
50 |
+
|
51 |
+
CodeLlama-13B-Instruct: 50.6
|
52 |
+
|
53 |
+
CodeLlama-34B: 45.11
|
54 |
+
|
55 |
+
CodeLlama-13B-Python: 42.89
|
56 |
+
|
57 |
+
CodeLlama-13B: 35.07
|
58 |
+
|
59 |
+
## lm-evaluation-harness
|
60 |
+
|
61 |
+
[Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
62 |
+
| Metric | Value |
|
63 |
+
| --- | --- |
|
64 |
+
| ARC | |
|
65 |
+
| HellaSwag | |
|
66 |
+
| MMLU | |
|
67 |
+
| TruthfulQA | |
|
68 |
+
| Average | |
|
69 |
+
|
70 |
+
## Training Arguments
|
71 |
+
|
72 |
| | |
|
73 |
|------ | ------ |
|
74 |
| lr | 2e-4 |
|
|
|
104 |
| eeval_samples_per_second | 2.335 |
|
105 |
| eeval_steps_per_second | 1.167 |
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
# **Code Llama**
|
109 |
|