Update model card with paper link and correct pipeline tag (#6)
Browse files- Update model card with paper link and correct pipeline tag (ebc6d02e440a360feb1497ad89c281f7d5e7c919)
Co-authored-by: Niels Rogge <[email protected]>
README.md
CHANGED
@@ -1,16 +1,10 @@
|
|
1 |
---
|
2 |
-
extra_gated_heading: Acknowledge to follow corresponding license to access the repository
|
3 |
-
extra_gated_button_content: Agree and access repository
|
4 |
-
extra_gated_fields:
|
5 |
-
First Name: text
|
6 |
-
Last Name: text
|
7 |
-
Country: country
|
8 |
-
Affiliation: text
|
9 |
-
license: cc-by-nc-4.0
|
10 |
datasets:
|
11 |
- Salesforce/xlam-function-calling-60k
|
12 |
language:
|
13 |
- en
|
|
|
|
|
14 |
pipeline_tag: text-generation
|
15 |
tags:
|
16 |
- function-calling
|
@@ -18,9 +12,16 @@ tags:
|
|
18 |
- tool-use
|
19 |
- mistral
|
20 |
- pytorch
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
---
|
23 |
|
|
|
24 |
<p align="center">
|
25 |
<img width="500px" alt="xLAM" src="https://huggingface.co/datasets/jianguozhang/logos/resolve/main/xlam-no-background.png">
|
26 |
</p>
|
@@ -191,15 +192,35 @@ def build_conversation_history_prompt(conversation_history: str):
|
|
191 |
})
|
192 |
|
193 |
history_string = json.dumps(parsed_history)
|
194 |
-
return f"
|
|
|
|
|
|
|
|
|
195 |
|
196 |
|
197 |
# Helper function to build the input prompt for our model
|
198 |
def build_prompt(task_instruction: str, format_instruction: str, tools: list, query: str, conversation_history: list):
|
199 |
-
prompt = f"[BEGIN OF TASK INSTRUCTION]
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
if len(conversation_history) > 0: prompt += build_conversation_history_prompt(conversation_history)
|
205 |
return prompt
|
@@ -479,4 +500,5 @@ If you find this repo helpful, please consider to cite our papers:
|
|
479 |
journal={arXiv preprint arXiv:2402.15506},
|
480 |
year={2024}
|
481 |
}
|
|
|
482 |
```
|
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
datasets:
|
3 |
- Salesforce/xlam-function-calling-60k
|
4 |
language:
|
5 |
- en
|
6 |
+
library_name: transformers
|
7 |
+
license: cc-by-nc-4.0
|
8 |
pipeline_tag: text-generation
|
9 |
tags:
|
10 |
- function-calling
|
|
|
12 |
- tool-use
|
13 |
- mistral
|
14 |
- pytorch
|
15 |
+
extra_gated_heading: Acknowledge to follow corresponding license to access the repository
|
16 |
+
extra_gated_button_content: Agree and access repository
|
17 |
+
extra_gated_fields:
|
18 |
+
First Name: text
|
19 |
+
Last Name: text
|
20 |
+
Country: country
|
21 |
+
Affiliation: text
|
22 |
---
|
23 |
|
24 |
+
```markdown
|
25 |
<p align="center">
|
26 |
<img width="500px" alt="xLAM" src="https://huggingface.co/datasets/jianguozhang/logos/resolve/main/xlam-no-background.png">
|
27 |
</p>
|
|
|
192 |
})
|
193 |
|
194 |
history_string = json.dumps(parsed_history)
|
195 |
+
return f"
|
196 |
+
[BEGIN OF HISTORY STEPS]
|
197 |
+
{history_string}
|
198 |
+
[END OF HISTORY STEPS]
|
199 |
+
"
|
200 |
|
201 |
|
202 |
# Helper function to build the input prompt for our model
|
203 |
def build_prompt(task_instruction: str, format_instruction: str, tools: list, query: str, conversation_history: list):
|
204 |
+
prompt = f"[BEGIN OF TASK INSTRUCTION]
|
205 |
+
{task_instruction}
|
206 |
+
[END OF TASK INSTRUCTION]
|
207 |
+
|
208 |
+
"
|
209 |
+
prompt += f"[BEGIN OF AVAILABLE TOOLS]
|
210 |
+
{json.dumps(xlam_format_tools)}
|
211 |
+
[END OF AVAILABLE TOOLS]
|
212 |
+
|
213 |
+
"
|
214 |
+
prompt += f"[BEGIN OF FORMAT INSTRUCTION]
|
215 |
+
{format_instruction}
|
216 |
+
[END OF FORMAT INSTRUCTION]
|
217 |
+
|
218 |
+
"
|
219 |
+
prompt += f"[BEGIN OF QUERY]
|
220 |
+
{query}
|
221 |
+
[END OF QUERY]
|
222 |
+
|
223 |
+
"
|
224 |
|
225 |
if len(conversation_history) > 0: prompt += build_conversation_history_prompt(conversation_history)
|
226 |
return prompt
|
|
|
500 |
journal={arXiv preprint arXiv:2402.15506},
|
501 |
year={2024}
|
502 |
}
|
503 |
+
```
|
504 |
```
|