fbaldassarri commited on
Commit
970aee9
·
verified ·
1 Parent(s): 45294e0

Initial Upload

Browse files
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - es
5
+ - fr
6
+ - de
7
+ - pt
8
+ - ja
9
+ - it
10
+ - zh
11
+ - ko
12
+ - ar
13
+ - cs
14
+ - nl
15
+ pipeline_tag: text-generation
16
+ license: apache-2.0
17
+ library_name: transformers
18
+ tags:
19
+ - granite-3.3
20
+ - autoround
21
+ - auto-round
22
+ - intel-autoround
23
+ - intel
24
+ - woq
25
+ - pytorch
26
+ - ibm
27
+ - granite
28
+ - granite-3
29
+ model_name: Granite 3.3 2b instruct
30
+ base_model:
31
+ - ibm-granite/granite-3.3-2b-instruct
32
+ inference: false
33
+ model_creator: ibm-granite
34
+ prompt_template: '{prompt}'
35
+ quantized_by: fbaldassarri
36
+ ---
37
+
38
+ ## Model Information
39
+
40
+ Quantized version of [ibm-granite/granite-3.3-2b-instruct](https://huggingface.co/fbaldassarri/ibm-granite/granite-3.3-2b-instruct) using torch.float32 for quantization tuning.
41
+ - 8 bits (INT8)
42
+ - group size = 64
43
+ - Symmetrical Quantization
44
+ - Method WoQ (AutoRound format)
45
+
46
+ Fast and low memory, 2-3X speedup (slight accuracy drop at W8G64)
47
+
48
+ Quantization framework: [Intel AutoRound](https://github.com/intel/auto-round) v0.4.7
49
+
50
+ Note: this INT8 version of granite-3.3-2b-instruct has been quantized to run inference through CPU.
51
+
52
+ ## Replication Recipe
53
+
54
+ ### Step 1 Install Requirements
55
+
56
+ I suggest to install requirements into a dedicated python-virtualenv or a conda enviroment.
57
+
58
+ ```
59
+ wget https://github.com/intel/auto-round/archive/refs/tags/v0.4.7.tar.gz
60
+ tar -xvzf v0.4.7.tar.gz
61
+ cd auto-round-0.4.7
62
+ pip install -r requirements-cpu.txt --upgrade
63
+ ```
64
+
65
+ ### Step 2 Build Intel AutoRound wheel from sources
66
+
67
+ ```
68
+ pip install -vvv --no-build-isolation -e .[cpu]
69
+ ```
70
+
71
+ ### Step 3 Script for Quantization
72
+
73
+ ```
74
+ from transformers import AutoModelForCausalLM, AutoTokenizer
75
+ model_name = "ibm-granite/granite-3.3-2b-instruct"
76
+ model = AutoModelForCausalLM.from_pretrained(model_name)
77
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
78
+ from auto_round import AutoRound
79
+ bits, group_size, sym, device = 8, 64, True, 'cpu'
80
+ autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym, device=device)
81
+ autoround.quantize()
82
+ output_dir = "./AutoRound/ibm-granite_granite-3.3-2b-instruct-autoround-int8-gs64-sym"
83
+ autoround.save_quantized(output_dir, format='auto_round', inplace=True)
84
+ ```
85
+
86
+ ## License
87
+
88
+ [Apache 2.0 License](https://choosealicense.com/licenses/apache-2.0/)
89
+
90
+ ## Disclaimer
91
+
92
+ This quantized model comes with no warrenty. It has been developed only for research purposes.
added_tokens.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|end_of_cite|>": 49156,
3
+ "<|end_of_plugin|>": 49158,
4
+ "<|end_of_role|>": 49153,
5
+ "<|start_of_cite|>": 49155,
6
+ "<|start_of_plugin|>": 49157,
7
+ "<|start_of_role|>": 49152,
8
+ "<|tool_call|>": 49154
9
+ }
config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GraniteForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attention_multiplier": 0.015625,
8
+ "bos_token_id": 0,
9
+ "embedding_multiplier": 12.0,
10
+ "eos_token_id": 0,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "logits_scaling": 8.0,
16
+ "max_position_embeddings": 131072,
17
+ "mlp_bias": false,
18
+ "model_type": "granite",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 40,
21
+ "num_key_value_heads": 8,
22
+ "pad_token_id": 0,
23
+ "quantization_config": {
24
+ "act_bits": 16,
25
+ "act_data_type": "int",
26
+ "act_dynamic": true,
27
+ "act_group_size": 64,
28
+ "act_sym": true,
29
+ "amp": false,
30
+ "autoround_version": "0.4.7",
31
+ "backend": "auto_round:gptq",
32
+ "batch_size": 4,
33
+ "bits": 8,
34
+ "data_type": "int",
35
+ "dataset": "NeelNanda/pile-10k",
36
+ "enable_minmax_tuning": true,
37
+ "enable_norm_bias_tuning": false,
38
+ "enable_quanted_input": true,
39
+ "gradient_accumulate_steps": 1,
40
+ "group_size": 64,
41
+ "iters": 200,
42
+ "low_gpu_mem_usage": false,
43
+ "lr": 0.005,
44
+ "minmax_lr": 0.005,
45
+ "nsamples": 128,
46
+ "quant_method": "intel/auto-round",
47
+ "scale_dtype": "torch.float16",
48
+ "seqlen": 512,
49
+ "super_bits": null,
50
+ "super_group_size": null,
51
+ "sym": true,
52
+ "to_quant_block_names": null
53
+ },
54
+ "residual_multiplier": 0.22,
55
+ "rms_norm_eps": 1e-05,
56
+ "rope_scaling": null,
57
+ "rope_theta": 10000000.0,
58
+ "tie_word_embeddings": true,
59
+ "torch_dtype": "float16",
60
+ "transformers_version": "4.51.3",
61
+ "use_cache": true,
62
+ "vocab_size": 49159
63
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.51.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e5b2acd825fa9a05821962078c5ffa5e9e0cf2b6ef1f7617dc00e389bf1092e
3
+ size 2953514696
quantization_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 8,
3
+ "group_size": 64,
4
+ "sym": true,
5
+ "data_type": "int",
6
+ "enable_quanted_input": true,
7
+ "enable_minmax_tuning": true,
8
+ "seqlen": 512,
9
+ "batch_size": 4,
10
+ "scale_dtype": "torch.float16",
11
+ "lr": 0.005,
12
+ "minmax_lr": 0.005,
13
+ "gradient_accumulate_steps": 1,
14
+ "iters": 200,
15
+ "amp": false,
16
+ "nsamples": 128,
17
+ "low_gpu_mem_usage": false,
18
+ "to_quant_block_names": null,
19
+ "enable_norm_bias_tuning": false,
20
+ "act_bits": 16,
21
+ "act_group_size": 64,
22
+ "act_sym": true,
23
+ "act_dynamic": true,
24
+ "act_data_type": "int",
25
+ "super_bits": null,
26
+ "super_group_size": null,
27
+ "dataset": "NeelNanda/pile-10k",
28
+ "autoround_version": "0.4.7",
29
+ "quant_method": "intel/auto-round",
30
+ "backend": "auto_round:gptq"
31
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|start_of_role|>",
4
+ "<|end_of_role|>",
5
+ "<|tool_call|>",
6
+ "<|start_of_cite|>",
7
+ "<|end_of_cite|>",
8
+ "<|start_of_plugin|>",
9
+ "<|end_of_plugin|>"
10
+ ],
11
+ "bos_token": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<|end_of_text|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|end_of_text|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|end_of_text|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<|end_of_text|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<fim_prefix>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<fim_middle>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<fim_suffix>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<fim_pad>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<filename>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<gh_stars>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<issue_start>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "<issue_comment>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "<issue_closed>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "10": {
86
+ "content": "<jupyter_start>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "11": {
94
+ "content": "<jupyter_text>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "12": {
102
+ "content": "<jupyter_code>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "13": {
110
+ "content": "<jupyter_output>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "14": {
118
+ "content": "<empty_output>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "15": {
126
+ "content": "<commit_before>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "16": {
134
+ "content": "<commit_msg>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "17": {
142
+ "content": "<commit_after>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "18": {
150
+ "content": "<reponame>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "49152": {
158
+ "content": "<|start_of_role|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "49153": {
166
+ "content": "<|end_of_role|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "49154": {
174
+ "content": "<|tool_call|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "49155": {
182
+ "content": "<|start_of_cite|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "49156": {
190
+ "content": "<|end_of_cite|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "49157": {
198
+ "content": "<|start_of_plugin|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "49158": {
206
+ "content": "<|end_of_plugin|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|start_of_role|>",
216
+ "<|end_of_role|>",
217
+ "<|tool_call|>",
218
+ "<|start_of_cite|>",
219
+ "<|end_of_cite|>",
220
+ "<|start_of_plugin|>",
221
+ "<|end_of_plugin|>"
222
+ ],
223
+ "bos_token": "<|end_of_text|>",
224
+ "chat_template": "{# Alias tools -> available_tools #}\n{%- if tools and not available_tools -%}\n {%- set available_tools = tools -%}\n{%- endif -%}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n {%- else %}\n {%- set system_message = \" Knowledge Cutoff Date: April 2024.\n Today's Date: \" + strftime_now('%B %d, %Y') + \". You are Granite, developed by IBM.\" %}\n {%- if available_tools and documents %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request. \nWrite the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif available_tools %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}\n {%- elif documents %}\n {%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif thinking %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts between <think></think> and write your response between <response></response> for each user query.\" %}\n {%- else %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\" %}\n {%- endif %}\n {%- if 'citations' in controls and documents %}\n {%- set system_message = system_message + ' \nUse the symbols <|start_of_cite|> and <|end_of_cite|> to indicate when a fact comes from a document in the search result, e.g <|start_of_cite|> {document_id: 1}my fact <|end_of_cite|> for a fact from document 1. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}\n {%- endif %}\n {%- if 'hallucinations' in controls and documents %}\n {%- set system_message = system_message + ' \nFinally, after the response is written, include a numbered list of sentences from the response with a corresponding risk value that are hallucinated and not based in the documents.' %}\n {%- endif %}\n {%- set loop_messages = messages %}\n {%- endif %}\n {{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|>\n' }}\n {%- if available_tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>' }}\n {{- available_tools | tojson(indent=4) }}\n {{- '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if documents %}\n {%- for document in documents %}\n {{- '<|start_of_role|>document {\"document_id\": \"' + document['doc_id'] | string + '\"}<|end_of_role|>\n' }}\n {{- document['text'] }}\n {{- '<|end_of_text|>\n' }}\n {%- endfor %}\n {%- endif %}\n {%- for message in loop_messages %}\n {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant' }}\n {%- if controls %}\n {{- ' ' + controls | tojson()}}\n {%- endif %}\n {{- '<|end_of_role|>' }}\n {%- endif %}\n {%- endfor %}",
225
+ "clean_up_tokenization_spaces": true,
226
+ "eos_token": "<|end_of_text|>",
227
+ "errors": "replace",
228
+ "extra_special_tokens": {},
229
+ "model_max_length": 9223372036854775807,
230
+ "pad_token": "<|end_of_text|>",
231
+ "padding_side": "left",
232
+ "tokenizer_class": "GPT2Tokenizer",
233
+ "unk_token": "<|end_of_text|>",
234
+ "vocab_size": 49152
235
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff