nehcgs commited on
Commit
991fab3
·
verified ·
1 Parent(s): ff63c0e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/Arch-Agent-1.5B.png filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT
2
+ **Version Release Date:** April 11th, 2025
3
+
4
+ This Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT is based on the Llama 3.2 Community License, Copyright © Meta Platforms, Inc. The terms and conditions have been adapted to reflect the proprietary nature of Katanemo Labs' materials.
5
+
6
+ ---
7
+
8
+ 1.Definitions
9
+ a. "Agreement": The terms and conditions for use, reproduction, distribution, and modification of the Katanemo Materials set forth herein.
10
+ b. "Documentation": The specifications, manuals, and documentation accompanying Katanemo LLMs v1.
11
+ c. "Licensee" or "you: The individual or entity entering into this Agreement, including your employer if you are acting on their behalf.
12
+ d. "Katanemo": The foundational large language models and software provided by Katanemo Labs, Inc., available at https://huggingface.co/katanemolabs.
13
+ e. "Katanemo Materials": Collectively, Katanemo's proprietary models and Documentation. Some Materials are derived from the Qwen language models licensed under the Qwen RESEARCH LICENSE AGREEMENT.
14
+ f. "Katanemo Labs" or "we": Katanemo Labs Inc., a Delaware, USA Corporation.
15
+
16
+ ---
17
+
18
+ 2.
19
+ By clicking "I Accept" or using any part of the Katanemo Materials, you agree to be bound by this Agreement.
20
+
21
+ ---
22
+
23
+ 3. LICENSE RIGHTS AND REDISTRIBUTION
24
+ a. Grant of Rights
25
+ Subject to the restrictions in Section 4, you are granted a non-exclusive, worldwide, non-transferable, and royalty-free license to:
26
+ - Use, reproduce, distribute, and modify the Katanemo Materials.
27
+ - Create derivative works based on the Katanemo Materials.
28
+
29
+ b. Permitted Redistribution
30
+ If you distribute the Katanemo Materials or any derivative work:
31
+ - You must include a copy of this License.
32
+ - You must prominently display the notice “Built with Katanemo” on a related website or documentation.
33
+
34
+ c. Attribution Requirement
35
+ You must include the following attribution notice in any distributed or public-facing use:
36
+ "Katanemo is licensed under the Katanemo Labs Community License.
37
+ Copyright © Katanemo Labs, Inc. All Rights Reserved."
38
+
39
+ d. Compliance Requirement
40
+ All use of the Katanemo Materials must comply with the Acceptable Use Policy, available at: https://katanemo.com/use-policy
41
+
42
+ ---
43
+
44
+ 4. COMMERCIAL USE AND DISTRIBUTION RESTRICTIONS
45
+
46
+ You may use, reproduce, modify, distribute, and create derivative works from the Katanemo Materials for any purpose, including commercial use, EXCEPT in the following cases:
47
+
48
+ You may NOT package, distribute, or make available the Katanemo Materials as part of:
49
+ - A framework,
50
+ - A proxy server,
51
+ - Middleware,
52
+ - A gateway infrastructure product,
53
+ - Or any product substantially similar in function or purpose to the above,
54
+
55
+ unless you obtain a separate commercial license from Katanemo Labs.
56
+
57
+ ---
58
+
59
+ This license does not grant trademark rights or rights outside the scope described above.
60
+
61
+ 5. Disclaimer of Warranty
62
+ The Katanemo Materials are provided "AS IS" without warranties of any kind, either express or implied, including but not limited to warranties of title, non-infringement, or fitness for a particular purpose.
63
+
64
+ ---
65
+
66
+ 6. Limitation of Liability
67
+ Katanemo Labs is not liable for any indirect, special, or consequential damages arising out of the use of the Katanemo Materials, even if advised of the possibility of such damages.
68
+
69
+ ---
70
+
71
+ 7. Intellectual Property
72
+ a. Trademarks
73
+ No trademark licenses are granted, except as required for attribution as described in Section 1.b. You may use the “Katanemo” mark according to Katanemo Labs' brand guidelines.
74
+
75
+ b. Ownership
76
+ You own any derivative works or modifications you create, except for portions owned by Katanemo Labs.
77
+
78
+ c. Litigation
79
+ If you file a lawsuit against Katanemo Labs regarding intellectual property, your license under this Agreement terminates.
80
+
81
+ ---
82
+
83
+ 8. Term and Termination
84
+ This Agreement continues until terminated. Katanemo Labs may terminate the Agreement if you breach any terms. Upon termination, you must cease using the Katanemo Materials.
85
+
86
+ ---
87
+
88
+ 10. Governing Law and Jurisdiction
89
+ This Agreement is governed by the laws of the State of Washington, USA. Any disputes will be resolved in the courts of California.
README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: katanemo-research
4
+ license_link: >-
5
+ https://huggingface.co/katanemo/Arch-Agent-1.5B/blob/main/LICENSE
6
+ base_model:
7
+ - Qwen/Qwen2.5-Coder-1.5B-Instruct
8
+ language:
9
+ - en
10
+ pipeline_tag: text-generation
11
+ library_name: transformers
12
+ ---
13
+
14
+ # katanemo/Arch-Agent-1.5B
15
+
16
+ ## Overview
17
+ Arch-Agent is a collection of state-of-the-art (SOTA) LLMs specifically designed for advanced function calling and agent-based applications. Designed to power sophisticated multi-step and multi-turn workflows, Arch-Agent excels at handling complex, multi-step tasks that require intelligent tool selection, adaptive planning, and seamless integration with external APIs and services. Built with a focus on real-world agent deployments, Arch-Agent delivers leading performance in complex scenarios while maintaining reliability and precision across extended function call sequences. Key capabilities inlcude:
18
+
19
+ - **Multi-Turn Function Callinge**: Maintains contextual continuity across multiple dialogue turns, enabling natural, ongoing conversations with nested or evolving tool use.
20
+ - **Multi-Step Function Calling**: Plans and executes a sequence of function calls to complete complex tasks. Adapts dynamically based on intermediate results and decomposes goals into sub-tasks.
21
+ - **Agentic Capabilities**: Advanced decision-making and workflow management for complex agentic tasks with seamless tool coordination and error recovery.
22
+
23
+ For more details, including fine-tuning, inference, and deployment, please refer to our [Github](https://github.com/katanemo/Arch-Function).
24
+
25
+
26
+ ## Performance Benchmarks
27
+ We evaluate Katanemo Arch-Agent series on the [Berkeley Function-Calling Leaderboard (BFCL)](https://gorilla.cs.berkeley.edu/leaderboard.html#leaderboard). We compare with commonly-used models and the results (as of June 14th, 2025) are shown below.
28
+
29
+ <div align="center">
30
+ <img width="100%" height="auto" src="./assets/Arch-Agent-1.5B.png"></a>
31
+ </div>
32
+
33
+ > [!NOTE]
34
+ > For evaluation, we use YaRN scaling to deploy the models for Multi-Turn evaluation, and all Arch-Agent models are evaluated with a context length of 64K.
35
+
36
+ ## Requirements
37
+ The code of Arch-Agent-1.5B has been in the Hugging Face `transformers` library and we recommend to install latest version:
38
+ ```bash
39
+ pip install transformers>=4.37.0
40
+ ```
41
+
42
+
43
+ ## How to use
44
+ We use the following example to illustrate how to use our model to perform function calling tasks. Please note that, our model works best with our provided prompt format. It allows us to extract JSON output that is similar to the [OpenAI's function calling](https://platform.openai.com/docs/guides/function-calling).
45
+
46
+
47
+ ### Quickstart
48
+ ````python
49
+ import json
50
+ from typing import Any, Dict, List
51
+ from transformers import AutoModelForCausalLM, AutoTokenizer
52
+
53
+ model_name = "katanemo/Arch-Agent-1.5B"
54
+ model = AutoModelForCausalLM.from_pretrained(
55
+ model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True
56
+ )
57
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
58
+
59
+ # Please use our provided prompt for best performance
60
+ TASK_PROMPT = (
61
+ "You are a helpful assistant designed to assist with the user query by making one or more function calls if needed."
62
+ "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
63
+ "You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
64
+ "\n</tools>\n\nFor each function call, return a json object with function name and arguments within "
65
+ """<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """
66
+ """"arguments": <args-json-object>}}\n</tool_call>"""
67
+ )
68
+
69
+ # Define available tools
70
+ tools = [
71
+ {
72
+ "type": "function",
73
+ "function": {
74
+ "name": "get_weather",
75
+ "description": "Get the current weather for a location",
76
+ "parameters": {
77
+ "type": "object",
78
+ "properties": {
79
+ "location": {
80
+ "type": "str",
81
+ "description": "The city and state, e.g. San Francisco, New York",
82
+ },
83
+ "unit": {
84
+ "type": "str",
85
+ "enum": ["celsius", "fahrenheit"],
86
+ "description": "The unit of temperature to return",
87
+ },
88
+ },
89
+ "required": ["location"],
90
+ },
91
+ },
92
+ }
93
+ ]
94
+
95
+
96
+ # Helper function to create the system prompt for our model
97
+ def format_prompt(tools: List[Dict[str, Any]]):
98
+ tool_text = "\n".join(
99
+ [json.dumps(tool["function"], ensure_ascii=False) for tool in tools]
100
+ )
101
+ return TASK_PROMPT.format(tool_text=tool_text)
102
+
103
+
104
+ system_prompt = format_prompt(tools)
105
+
106
+ messages = [
107
+ {"role": "system", "content": system_prompt},
108
+ {"role": "user", "content": "What is the weather in Seattle?"},
109
+ ]
110
+
111
+ model_inputs = tokenizer.apply_chat_template(
112
+ messages, add_generation_prompt=True, return_tensors="pt"
113
+ ).to(model.device)
114
+
115
+ generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
116
+
117
+ generated_ids = [
118
+ output_ids[len(input_ids) :]
119
+ for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
120
+ ]
121
+
122
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
123
+ print(response)
124
+ ````
125
+
126
+ # License
127
+ The Arch-Agent collection is distributed under the [Katanemo license](https://huggingface.co/katanemo/Arch-Agent-1.5B/blob/main/LICENSE).
assets/Arch-Agent-1.5B.png ADDED

Git LFS Details

  • SHA256: b1baa8b30b8a768a5e0c84c42f6fe1e8bf5316f6ee61863697524df62e20add0
  • Pointer size: 131 Bytes
  • Size of remote file: 120 kB
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8960,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 28,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 28,
17
+ "num_key_value_heads": 2,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.52.3",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.52.3"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25635b3de56b335cea2b6eee098562478eed32b966c760fcdd4b4faa35fca185
3
+ size 3087467144
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|im_end|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "padding_side": "right",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff