|
{ |
|
"format": "gguf", |
|
"quantization": "IQ2_XXS/Q4_K_M", |
|
"parameters": "4.02 B", |
|
"architecture": "qwen3", |
|
"size": "2.32 GiB", |
|
"gguf": { |
|
"general.architecture": "qwen3", |
|
"general.base_model.0.name": "Qwen3 4B Instruct 2507", |
|
"general.base_model.0.organization": "Qwen", |
|
"general.base_model.0.repo_url": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507", |
|
"general.base_model.0.version": "2507", |
|
"general.base_model.count": "1", |
|
"general.basename": "Qwen3-4B-Instruct-2507", |
|
"general.file_type": "15", |
|
"general.finetune": "Instruct", |
|
"general.license": "apache-2.0", |
|
"general.license.link": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507/blob/main/LICENSE", |
|
"general.name": "Qwen3-4B-Instruct-2507", |
|
"general.quantization_version": "2", |
|
"general.quantized_by": "Unsloth", |
|
"general.repo_url": "https://huggingface.co/unsloth", |
|
"general.size_label": "4B", |
|
"general.tags": "unsloth, text-generation", |
|
"general.type": "model", |
|
"general.version": "2507", |
|
"quantize.imatrix.chunks_count": "79", |
|
"quantize.imatrix.dataset": "unsloth_calibration_Qwen3-4B-Instruct-2507.txt", |
|
"quantize.imatrix.entries_count": "252", |
|
"quantize.imatrix.file": "Qwen3-4B-Instruct-2507-GGUF/imatrix_unsloth.gguf", |
|
"qwen3.attention.head_count": "32", |
|
"qwen3.attention.head_count_kv": "8", |
|
"qwen3.attention.key_length": "128", |
|
"qwen3.attention.layer_norm_rms_epsilon": "0.000001", |
|
"qwen3.attention.value_length": "128", |
|
"qwen3.block_count": "36", |
|
"qwen3.context_length": "262144", |
|
"qwen3.embedding_length": "2560", |
|
"qwen3.feed_forward_length": "9728", |
|
"qwen3.rope.freq_base": "5000000.000000", |
|
"tokenizer.chat_template": "{%- if tools %}\n {{- '\u003c|im_start|\u003esystem\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within \u003ctools\u003e\u003c/tools\u003e XML tags:\\n\u003ctools\u003e\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\u003c/tools\u003e\\n\\nFor each function call, return a json object with function name and arguments within \u003ctool_call\u003e\u003c/tool_call\u003e XML tags:\\n\u003ctool_call\u003e\\n{\\\"name\\\": \u003cfunction-name\u003e, \\\"arguments\\\": \u003cargs-json-object\u003e}\\n\u003c/tool_call\u003e\u003c|im_end|\u003e\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '\u003c|im_start|\u003esystem\\n' + messages[0].content + '\u003c|im_end|\u003e\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('\u003ctool_response\u003e') and message.content.endswith('\u003c/tool_response\u003e')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content + '\u003c|im_end|\u003e' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '\u003c/think\u003e' in content %}\n {%- set reasoning_content = ((content.split('\u003c/think\u003e')|first).rstrip('\\n').split('\u003cthink\u003e')|last).lstrip('\\n') %}\n {%- set content = (content.split('\u003c/think\u003e')|last).lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 \u003e ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n\u003cthink\u003e\\n' + reasoning_content.strip('\\n') + '\\n\u003c/think\u003e\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '\u003c|im_start|\u003e' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\u003ctool_call\u003e\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n\u003c/tool_call\u003e' }}\n {%- endfor %}\n {%- endif %}\n {{- '\u003c|im_end|\u003e\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '\u003c|im_start|\u003euser' }}\n {%- endif %}\n {{- '\\n\u003ctool_response\u003e\\n' }}\n {{- content }}\n {{- '\\n\u003c/tool_response\u003e' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '\u003c|im_end|\u003e\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '\u003c|im_start|\u003eassistant\\n' }}\n{%- endif %}", |
|
"tokenizer.ggml.add_bos_token": "false", |
|
"tokenizer.ggml.eos_token_id": "151645", |
|
"tokenizer.ggml.model": "gpt2", |
|
"tokenizer.ggml.padding_token_id": "151654", |
|
"tokenizer.ggml.pre": "qwen2" |
|
}, |
|
"context_size": 6144 |
|
} |
|
|