itzmealvin commited on
Commit
0e06e3f
·
verified ·
1 Parent(s): ba2aefc

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. .gitignore +1 -0
  3. README.md +21 -3
  4. config.json +49 -0
  5. model.gguf +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.gguf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
README.md CHANGED
@@ -1,3 +1,21 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ base_model_relation: quantized
4
+ base_model: Qwen/Qwen3-4B-Instruct-2507
5
+ library_name: transformers
6
+ license: mit
7
+ license_link: https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507/blob/main/LICENSE
8
+ tags:
9
+ - qwen
10
+ - qwen3
11
+ - unsloth
12
+ - gguf
13
+ ---
14
+
15
+ # Qwen3 4B Instruct 2507 by Qwen
16
+
17
+ **Model creator:** [Qwen](https://huggingface.co/Qwen)
18
+ **Original model**: [Qwen3-4B-Instruct-2507](https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507)
19
+ **GGUF quantization:** provided by [unsloth](https://huggingface.co/unsloth)
20
+
21
+ [![Unsloth Logo](https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png)](https://github.com/unslothai/unsloth/)
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "format": "gguf",
3
+ "quantization": "IQ2_XXS/Q4_K_M",
4
+ "parameters": "4.02 B",
5
+ "architecture": "qwen3",
6
+ "size": "2.32 GiB",
7
+ "gguf": {
8
+ "general.architecture": "qwen3",
9
+ "general.base_model.0.name": "Qwen3 4B Instruct 2507",
10
+ "general.base_model.0.organization": "Qwen",
11
+ "general.base_model.0.repo_url": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507",
12
+ "general.base_model.0.version": "2507",
13
+ "general.base_model.count": "1",
14
+ "general.basename": "Qwen3-4B-Instruct-2507",
15
+ "general.file_type": "15",
16
+ "general.finetune": "Instruct",
17
+ "general.license": "apache-2.0",
18
+ "general.license.link": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507/blob/main/LICENSE",
19
+ "general.name": "Qwen3-4B-Instruct-2507",
20
+ "general.quantization_version": "2",
21
+ "general.quantized_by": "Unsloth",
22
+ "general.repo_url": "https://huggingface.co/unsloth",
23
+ "general.size_label": "4B",
24
+ "general.tags": "unsloth, text-generation",
25
+ "general.type": "model",
26
+ "general.version": "2507",
27
+ "quantize.imatrix.chunks_count": "79",
28
+ "quantize.imatrix.dataset": "unsloth_calibration_Qwen3-4B-Instruct-2507.txt",
29
+ "quantize.imatrix.entries_count": "252",
30
+ "quantize.imatrix.file": "Qwen3-4B-Instruct-2507-GGUF/imatrix_unsloth.gguf",
31
+ "qwen3.attention.head_count": "32",
32
+ "qwen3.attention.head_count_kv": "8",
33
+ "qwen3.attention.key_length": "128",
34
+ "qwen3.attention.layer_norm_rms_epsilon": "0.000001",
35
+ "qwen3.attention.value_length": "128",
36
+ "qwen3.block_count": "36",
37
+ "qwen3.context_length": "262144",
38
+ "qwen3.embedding_length": "2560",
39
+ "qwen3.feed_forward_length": "9728",
40
+ "qwen3.rope.freq_base": "5000000.000000",
41
+ "tokenizer.chat_template": "{%- if tools %}\n {{- '\u003c|im_start|\u003esystem\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within \u003ctools\u003e\u003c/tools\u003e XML tags:\\n\u003ctools\u003e\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\u003c/tools\u003e\\n\\nFor each function call, return a json object with function name and arguments within \u003ctool_call\u003e\u003c/tool_call\u003e XML tags:\\n\u003ctool_call\u003e\\n{\\\"name\\\": \u003cfunction-name\u003e, \\\"arguments\\\": \u003cargs-json-object\u003e}\\n\u003c/tool_call\u003e\u003c|im_end|\u003e\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '\u003c|im_start|\u003esystem\\n' + messages[0].content + '\u003c|im_end|\u003e\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('\u003ctool_response\u003e') and message.content.endswith('\u003c/tool_response\u003e')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content + '\u003c|im_end|\u003e' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '\u003c/think\u003e' in content %}\n {%- set reasoning_content = ((content.split('\u003c/think\u003e')|first).rstrip('\\n').split('\u003cthink\u003e')|last).lstrip('\\n') %}\n {%- set content = (content.split('\u003c/think\u003e')|last).lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 \u003e ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n\u003cthink\u003e\\n' + reasoning_content.strip('\\n') + '\\n\u003c/think\u003e\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\u003ctool_call\u003e\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n\u003c/tool_call\u003e' }}\n {%- endfor %}\n {%- endif %}\n {{- '\u003c|im_end|\u003e\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '\u003c|im_start|\u003euser' }}\n {%- endif %}\n {{- '\\n\u003ctool_response\u003e\\n' }}\n {{- content }}\n {{- '\\n\u003c/tool_response\u003e' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '\u003c|im_end|\u003e\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '\u003c|im_start|\u003eassistant\\n' }}\n{%- endif %}",
42
+ "tokenizer.ggml.add_bos_token": "false",
43
+ "tokenizer.ggml.eos_token_id": "151645",
44
+ "tokenizer.ggml.model": "gpt2",
45
+ "tokenizer.ggml.padding_token_id": "151654",
46
+ "tokenizer.ggml.pre": "qwen2"
47
+ },
48
+ "context_size": 6144
49
+ }
model.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3605803b982cb64aead44f6c1b2ae36e3acdb41d8e46c8a94c6533bc4c67e597
3
+ size 2497281120