danielhanchen commited on
Commit
89b4608
·
verified ·
1 Parent(s): c22f44e

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ GLM-Z1-9B-0414-UD-IQ1_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ GLM-Z1-9B-0414-UD-IQ1_S.gguf filter=lfs diff=lfs merge=lfs -text
38
+ GLM-Z1-9B-0414-UD-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ GLM-Z1-9B-0414-UD-IQ3_XXS.gguf filter=lfs diff=lfs merge=lfs -text
40
+ GLM-Z1-9B-0414-UD-Q2_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
41
+ GLM-Z1-9B-0414-UD-Q4_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
GLM-Z1-9B-0414-UD-IQ1_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a03e2cb37c99859a946b738d1a04f08d2f62d8e95e4e8ddc898fb3aa9e7f028f
3
+ size 3400595584
GLM-Z1-9B-0414-UD-IQ1_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68161f335a2e71a1bc3fbdcb009a3c98ac347b9505619b63f6b8553cf34661cb
3
+ size 3286644864
GLM-Z1-9B-0414-UD-IQ2_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6fdee7d2adb9f76083da078c40c941e148766c030864f04f18aa321b958b5ad
3
+ size 4074469504
GLM-Z1-9B-0414-UD-IQ3_XXS.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7e12844a0d7d36e11a4ef9b5de0df9c0deb9f971f1df0797ad13be58f6d5c6f
3
+ size 4317280384
GLM-Z1-9B-0414-UD-Q2_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73f47b60d511792772eb81e4a4118f629bf6dc3e61057420de56ad9ce0e38b5
3
+ size 4211062912
GLM-Z1-9B-0414-UD-Q4_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af1bb6bdec4f2cc6ba5310b91623fa8c2afb78fb05f82ea89d90cb66ac0a1601
3
+ size 6208387200
README.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - unsloth
4
+ base_model:
5
+ - THUDM/GLM-Z1-9B-0414
6
+ license: mit
7
+ language:
8
+ - zh
9
+ - en
10
+ pipeline_tag: text-generation
11
+ library_name: transformers
12
+ ---
13
+
14
+ # GLM-4-Z1-9B-0414
15
+
16
+ ## Introduction
17
+
18
+ The GLM family welcomes a new generation of open-source models, the **GLM-4-32B-0414** series, featuring 32 billion parameters. Its performance is comparable to OpenAI's GPT series and DeepSeek's V3/R1 series, and it supports very user-friendly local deployment features. GLM-4-32B-Base-0414 was pre-trained on 15T of high-quality data, including a large amount of reasoning-type synthetic data, laying the foundation for subsequent reinforcement learning extensions. In the post-training stage, in addition to human preference alignment for dialogue scenarios, we also enhanced the model's performance in instruction following, engineering code, and function calling using techniques such as rejection sampling and reinforcement learning, strengthening the atomic capabilities required for agent tasks. GLM-4-32B-0414 achieves good results in areas such as engineering code, Artifact generation, function calling, search-based Q&A, and report generation. Some benchmarks even rival larger models like GPT-4o and DeepSeek-V3-0324 (671B).
19
+
20
+ **GLM-Z1-32B-0414** is a reasoning model with **deep thinking capabilities**. This was developed based on GLM-4-32B-0414 through cold start and extended reinforcement learning, as well as further training of the model on tasks involving mathematics, code, and logic. Compared to the base model, GLM-Z1-32B-0414 significantly improves mathematical abilities and the capability to solve complex tasks. During the training process, we also introduced general reinforcement learning based on pairwise ranking feedback, further enhancing the model's general capabilities.
21
+
22
+ **GLM-Z1-Rumination-32B-0414** is a deep reasoning model with **rumination capabilities** (benchmarked against OpenAI's Deep Research). Unlike typical deep thinking models, the rumination model employs longer periods of deep thought to solve more open-ended and complex problems (e.g., writing a comparative analysis of AI development in two cities and their future development plans). The rumination model integrates search tools during its deep thinking process to handle complex tasks and is trained by utilizing multiple rule-based rewards to guide and extend end-to-end reinforcement learning. Z1-Rumination shows significant improvements in research-style writing and complex retrieval tasks.
23
+
24
+ Finally, **GLM-Z1-9B-0414** is a surprise. We employed the aforementioned series of techniques to train a 9B small-sized model that maintains the open-source tradition. Despite its smaller scale, GLM-Z1-9B-0414 still exhibits excellent capabilities in mathematical reasoning and general tasks. Its overall performance is already at a leading level among open-source models of the same size. Especially in resource-constrained scenarios, this model achieves an excellent balance between efficiency and effectiveness, providing a powerful option for users seeking lightweight deployment.
25
+
26
+ ## Performance
27
+
28
+ <p align="center">
29
+ <img width="100%" src="https://raw.githubusercontent.com/THUDM/GLM-4/refs/heads/main/resources/Bench-Z1-32B.png">
30
+ </p>
31
+
32
+ <p align="center">
33
+ <img width="100%" src="https://raw.githubusercontent.com/THUDM/GLM-4/refs/heads/main/resources/Bench-Z1-9B.png">
34
+ </p>
35
+
36
+ ## Model Usage Guidelines
37
+
38
+ ### I. Sampling Parameters
39
+
40
+ | Parameter | Recommended Value | Description |
41
+ | ------------ | ----------------- | -------------------------------------------- |
42
+ | temperature | **0.6** | Balances creativity and stability |
43
+ | top_p | **0.95** | Cumulative probability threshold for sampling|
44
+ | top_k | **40** | Filters out rare tokens while maintaining diversity |
45
+ | max_new_tokens | **30000** | Leaves enough tokens for thinking |
46
+
47
+ ### II. Enforced Thinking
48
+
49
+ - Add \<think\>\n to the **first line**: Ensures the model thinks before responding
50
+ - When using `chat_template.jinja`, the prompt is automatically injected to enforce this behavior
51
+
52
+
53
+ ### III. Dialogue History Trimming
54
+
55
+ - Retain only the **final user-visible reply**.
56
+ Hidden thinking content should **not** be saved to history to reduce interference—this is already implemented in `chat_template.jinja`
57
+
58
+
59
+ ### IV. Handling Long Contexts (YaRN)
60
+
61
+ - When input length exceeds **8,192 tokens**, consider enabling YaRN (Rope Scaling)
62
+
63
+ - In supported frameworks, add the following snippet to `config.json`:
64
+
65
+ ```json
66
+ "rope_scaling": {
67
+ "type": "yarn",
68
+ "factor": 4.0,
69
+ "original_max_position_embeddings": 32768
70
+ }
71
+ ```
72
+
73
+ - **Static YaRN** applies uniformly to all text. It may slightly degrade performance on short texts, so enable as needed.
74
+
75
+ ## Inference Code
76
+
77
+ Make Sure Using `transforemrs>=4.51.3`.
78
+
79
+ ```python
80
+ from transformers import AutoModelForCausalLM, AutoTokenizer
81
+
82
+ MODEL_PATH = "THUDM/GLM-4-Z1-9B-0414"
83
+
84
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
85
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")
86
+
87
+ message = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]
88
+
89
+ inputs = tokenizer.apply_chat_template(
90
+ message,
91
+ return_tensors="pt",
92
+ add_generation_prompt=True,
93
+ return_dict=True,
94
+ ).to(model.device)
95
+
96
+ generate_kwargs = {
97
+ "input_ids": inputs["input_ids"],
98
+ "attention_mask": inputs["attention_mask"],
99
+ "max_new_tokens": 4096,
100
+ "do_sample": False,
101
+ }
102
+ out = model.generate(**generate_kwargs)
103
+ print(tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True))
104
+ ```
105
+
106
+ ## Citations
107
+
108
+ If you find our work useful, please consider citing the following paper.
109
+
110
+ ```
111
+ @misc{glm2024chatglm,
112
+ title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools},
113
+ author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang},
114
+ year={2024},
115
+ eprint={2406.12793},
116
+ archivePrefix={arXiv},
117
+ primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
118
+ }
119
+ ```
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4ForCausalLM"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151329,
8
+ "head_dim": 128,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 13696,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "glm4",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 40,
17
+ "num_key_value_heads": 2,
18
+ "pad_token_id": 151330,
19
+ "partial_rotary_factor": 0.5,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_theta": 10000.0,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.52.0.dev0",
25
+ "unsloth_fixed": true,
26
+ "use_cache": true,
27
+ "vocab_size": 151552
28
+ }