huseinzol05 commited on
Commit
e185da9
1 Parent(s): c5f2032

Upload chat-7b-vs-chat-malaysian-llama2-7b.ipynb

Browse files
chat-7b-vs-chat-malaysian-llama2-7b.ipynb ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "66b70728",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Loading the tokenizer from the `special_tokens_map.json` and the `added_tokens.json` will be removed in `transformers 5`, it is kept for forward compatibility, but it is recommended to update your `tokenizer_config.json` by uploading it again. You will see the new `added_tokens_decoder` attribute that will store the relevant information.\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n",
19
+ "import torch\n",
20
+ "\n",
21
+ "tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-13b-hf')"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": 2,
27
+ "id": "d608fb34",
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "nf4_config = BitsAndBytesConfig(\n",
32
+ " load_in_4bit=True,\n",
33
+ " bnb_4bit_quant_type='nf4',\n",
34
+ " bnb_4bit_use_double_quant=True,\n",
35
+ " bnb_4bit_compute_dtype=torch.bfloat16\n",
36
+ ")"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": 3,
42
+ "id": "4d63a1ec",
43
+ "metadata": {},
44
+ "outputs": [
45
+ {
46
+ "data": {
47
+ "application/vnd.jupyter.widget-view+json": {
48
+ "model_id": "81183d23312f44c395ebc24c90b00b9a",
49
+ "version_major": 2,
50
+ "version_minor": 0
51
+ },
52
+ "text/plain": [
53
+ "Downloading (…)lve/main/config.json: 0%| | 0.00/614 [00:00<?, ?B/s]"
54
+ ]
55
+ },
56
+ "metadata": {},
57
+ "output_type": "display_data"
58
+ },
59
+ {
60
+ "name": "stdout",
61
+ "output_type": "stream",
62
+ "text": [
63
+ "[2023-09-28 13:30:57,403] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
64
+ ]
65
+ },
66
+ {
67
+ "name": "stderr",
68
+ "output_type": "stream",
69
+ "text": [
70
+ "2023-09-28 13:30:59.987982: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
71
+ "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
72
+ "2023-09-28 13:31:00.753839: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
73
+ ]
74
+ },
75
+ {
76
+ "data": {
77
+ "application/vnd.jupyter.widget-view+json": {
78
+ "model_id": "27cb7e0cab8f41e7821c3a3ec20775f4",
79
+ "version_major": 2,
80
+ "version_minor": 0
81
+ },
82
+ "text/plain": [
83
+ "Downloading (…)fetensors.index.json: 0%| | 0.00/26.8k [00:00<?, ?B/s]"
84
+ ]
85
+ },
86
+ "metadata": {},
87
+ "output_type": "display_data"
88
+ },
89
+ {
90
+ "data": {
91
+ "application/vnd.jupyter.widget-view+json": {
92
+ "model_id": "22046f04786f4fb2a0083b992ef15ed9",
93
+ "version_major": 2,
94
+ "version_minor": 0
95
+ },
96
+ "text/plain": [
97
+ "Downloading shards: 0%| | 0/2 [00:00<?, ?it/s]"
98
+ ]
99
+ },
100
+ "metadata": {},
101
+ "output_type": "display_data"
102
+ },
103
+ {
104
+ "data": {
105
+ "application/vnd.jupyter.widget-view+json": {
106
+ "model_id": "d511e6240c8f4a38bbd50db32c7683ac",
107
+ "version_major": 2,
108
+ "version_minor": 0
109
+ },
110
+ "text/plain": [
111
+ "Downloading (…)of-00002.safetensors: 0%| | 0.00/9.98G [00:00<?, ?B/s]"
112
+ ]
113
+ },
114
+ "metadata": {},
115
+ "output_type": "display_data"
116
+ },
117
+ {
118
+ "data": {
119
+ "application/vnd.jupyter.widget-view+json": {
120
+ "model_id": "48040c6b453a4fdcafe4804a59cd2a5e",
121
+ "version_major": 2,
122
+ "version_minor": 0
123
+ },
124
+ "text/plain": [
125
+ "Downloading (…)of-00002.safetensors: 0%| | 0.00/3.50G [00:00<?, ?B/s]"
126
+ ]
127
+ },
128
+ "metadata": {},
129
+ "output_type": "display_data"
130
+ },
131
+ {
132
+ "data": {
133
+ "application/vnd.jupyter.widget-view+json": {
134
+ "model_id": "606b90aac60f4e929971c6a59e541d2d",
135
+ "version_major": 2,
136
+ "version_minor": 0
137
+ },
138
+ "text/plain": [
139
+ "Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
140
+ ]
141
+ },
142
+ "metadata": {},
143
+ "output_type": "display_data"
144
+ },
145
+ {
146
+ "data": {
147
+ "application/vnd.jupyter.widget-view+json": {
148
+ "model_id": "cefa1eb5f2544e0d8680f49894327982",
149
+ "version_major": 2,
150
+ "version_minor": 0
151
+ },
152
+ "text/plain": [
153
+ "Downloading (…)neration_config.json: 0%| | 0.00/188 [00:00<?, ?B/s]"
154
+ ]
155
+ },
156
+ "metadata": {},
157
+ "output_type": "display_data"
158
+ }
159
+ ],
160
+ "source": [
161
+ "base_model = AutoModelForCausalLM.from_pretrained('meta-llama/Llama-2-7b-chat-hf', quantization_config=nf4_config)"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 4,
167
+ "id": "5f302edc",
168
+ "metadata": {},
169
+ "outputs": [
170
+ {
171
+ "data": {
172
+ "application/vnd.jupyter.widget-view+json": {
173
+ "model_id": "941316733cfd4b1e91b6b0fde25b2e89",
174
+ "version_major": 2,
175
+ "version_minor": 0
176
+ },
177
+ "text/plain": [
178
+ "Downloading (…)lve/main/config.json: 0%| | 0.00/610 [00:00<?, ?B/s]"
179
+ ]
180
+ },
181
+ "metadata": {},
182
+ "output_type": "display_data"
183
+ },
184
+ {
185
+ "data": {
186
+ "application/vnd.jupyter.widget-view+json": {
187
+ "model_id": "274394cbf3294cbf99fca0a747d31e19",
188
+ "version_major": 2,
189
+ "version_minor": 0
190
+ },
191
+ "text/plain": [
192
+ "Downloading (…)fetensors.index.json: 0%| | 0.00/23.9k [00:00<?, ?B/s]"
193
+ ]
194
+ },
195
+ "metadata": {},
196
+ "output_type": "display_data"
197
+ },
198
+ {
199
+ "data": {
200
+ "application/vnd.jupyter.widget-view+json": {
201
+ "model_id": "a191ace377154ade90cd5d91115bfbe4",
202
+ "version_major": 2,
203
+ "version_minor": 0
204
+ },
205
+ "text/plain": [
206
+ "Downloading shards: 0%| | 0/2 [00:00<?, ?it/s]"
207
+ ]
208
+ },
209
+ "metadata": {},
210
+ "output_type": "display_data"
211
+ },
212
+ {
213
+ "data": {
214
+ "application/vnd.jupyter.widget-view+json": {
215
+ "model_id": "00f8e9771a40482da7c0ebe0079bd71a",
216
+ "version_major": 2,
217
+ "version_minor": 0
218
+ },
219
+ "text/plain": [
220
+ "Downloading (…)of-00002.safetensors: 0%| | 0.00/9.98G [00:00<?, ?B/s]"
221
+ ]
222
+ },
223
+ "metadata": {},
224
+ "output_type": "display_data"
225
+ },
226
+ {
227
+ "data": {
228
+ "application/vnd.jupyter.widget-view+json": {
229
+ "model_id": "e25a10a036d845e2aef166ac8137c2b6",
230
+ "version_major": 2,
231
+ "version_minor": 0
232
+ },
233
+ "text/plain": [
234
+ "Downloading (…)of-00002.safetensors: 0%| | 0.00/3.50G [00:00<?, ?B/s]"
235
+ ]
236
+ },
237
+ "metadata": {},
238
+ "output_type": "display_data"
239
+ },
240
+ {
241
+ "data": {
242
+ "application/vnd.jupyter.widget-view+json": {
243
+ "model_id": "19a2f9dcdfcd46ac81247d3d9548b512",
244
+ "version_major": 2,
245
+ "version_minor": 0
246
+ },
247
+ "text/plain": [
248
+ "Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
249
+ ]
250
+ },
251
+ "metadata": {},
252
+ "output_type": "display_data"
253
+ },
254
+ {
255
+ "data": {
256
+ "application/vnd.jupyter.widget-view+json": {
257
+ "model_id": "5b37822c7e16411c98a5bb6ab13a1e1c",
258
+ "version_major": 2,
259
+ "version_minor": 0
260
+ },
261
+ "text/plain": [
262
+ "Downloading (…)neration_config.json: 0%| | 0.00/183 [00:00<?, ?B/s]"
263
+ ]
264
+ },
265
+ "metadata": {},
266
+ "output_type": "display_data"
267
+ }
268
+ ],
269
+ "source": [
270
+ "fpf_model = AutoModelForCausalLM.from_pretrained('mesolitica/malaysian-llama2-7b-32k-instructions', quantization_config=nf4_config)"
271
+ ]
272
+ },
273
+ {
274
+ "cell_type": "code",
275
+ "execution_count": 5,
276
+ "id": "f48e5c55",
277
+ "metadata": {},
278
+ "outputs": [],
279
+ "source": [
280
+ "def parse_llama_chat(\n",
281
+ " messages,\n",
282
+ " keys={'role', 'content'},\n",
283
+ " roles={'system', 'user', 'assistant'},\n",
284
+ "):\n",
285
+ "\n",
286
+ " system = messages[0]['content']\n",
287
+ " user_query = messages[-1]['content']\n",
288
+ "\n",
289
+ " users, assistants = [], []\n",
290
+ " for q in messages[1:-1]:\n",
291
+ " if q['role'] == 'user':\n",
292
+ " users.append(q['content'])\n",
293
+ " elif q['role'] == 'assistant':\n",
294
+ " assistants.append(q['content'])\n",
295
+ "\n",
296
+ " if len(users) != len(assistants):\n",
297
+ " raise HTTPException(\n",
298
+ " status_code=400,\n",
299
+ " detail='model only support `system, `user` and `assistant` roles, starting with `system`, then `user` and alternating (u/a/u/a/u...)',\n",
300
+ " )\n",
301
+ "\n",
302
+ " texts = [f'<s>[INST] <<SYS>>\\n{system}\\n<</SYS>>\\n\\n']\n",
303
+ " for u, a in zip(users, assistants):\n",
304
+ " texts.append(f'{u.strip()} [/INST] {a.strip()} </s><s>[INST] ')\n",
305
+ " texts.append(f'{user_query.strip()} [/INST]')\n",
306
+ " prompt = ''.join(texts).strip()\n",
307
+ " return prompt"
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": 14,
313
+ "id": "af485f12",
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": [
317
+ "import time\n",
318
+ "from tqdm import tqdm\n",
319
+ "\n",
320
+ "kwargs = {\n",
321
+ " 'temperature': 0.9, \n",
322
+ " 'max_new_tokens': 256, \n",
323
+ " 'top_p': 0.95, \n",
324
+ " 'repetition_penalty': 1.0, \n",
325
+ " 'do_sample': True,\n",
326
+ " 'num_beams': 1,\n",
327
+ "}"
328
+ ]
329
+ },
330
+ {
331
+ "cell_type": "code",
332
+ "execution_count": 10,
333
+ "id": "bde9c04f",
334
+ "metadata": {},
335
+ "outputs": [
336
+ {
337
+ "data": {
338
+ "text/plain": [
339
+ "'<s>[INST] <<SYS>>\\nAnda adalah pembantu AI yang berguna dan mampu jawab segala soalan yang diberikan. Jawapan yang diberikan haruslah panjang dan tepat.\\n<</SYS>>\\n\\nkwsp tu apa [/INST]'"
340
+ ]
341
+ },
342
+ "execution_count": 10,
343
+ "metadata": {},
344
+ "output_type": "execute_result"
345
+ }
346
+ ],
347
+ "source": [
348
+ "messages = [\n",
349
+ " {'role': 'system', 'content': 'Anda adalah pembantu AI yang berguna dan mampu jawab segala soalan yang diberikan. Jawapan yang diberikan haruslah panjang dan tepat.'},\n",
350
+ " {'role': 'user', 'content': 'kwsp tu apa'}\n",
351
+ "]\n",
352
+ "prompt = parse_llama_chat(messages)\n",
353
+ "prompt"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "code",
358
+ "execution_count": 34,
359
+ "id": "8209faad",
360
+ "metadata": {},
361
+ "outputs": [],
362
+ "source": [
363
+ "inputs = tokenizer([prompt], return_tensors='pt').to('cuda')\n",
364
+ "generate_kwargs = dict(inputs)\n",
365
+ "generate_kwargs = {**generate_kwargs, **kwargs}"
366
+ ]
367
+ },
368
+ {
369
+ "cell_type": "markdown",
370
+ "id": "3330733b",
371
+ "metadata": {},
372
+ "source": [
373
+ "## Chat 7B"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": 17,
379
+ "id": "09407603",
380
+ "metadata": {},
381
+ "outputs": [],
382
+ "source": [
383
+ "o = base_model.generate(**generate_kwargs)"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": 26,
389
+ "id": "c76fc2b7",
390
+ "metadata": {},
391
+ "outputs": [
392
+ {
393
+ "name": "stdout",
394
+ "output_type": "stream",
395
+ "text": [
396
+ "As an AI language model, I can assist with a wide range of questions and tasks. Here are some examples of what I can help with:\n",
397
+ "1. Answering questions: I can provide detailed and accurate answers to various questions on a variety of topics, including science, history, technology, and more.\n",
398
+ "2. Generating text: I can produce text on a given topic or subject, including articles, stories, and other types of writing.\n",
399
+ "3. Translation: I can translate text from one language to another, including popular languages such as Spanish, French, German, Chinese, and many more.\n",
400
+ "4. Summarization: I can summarize long pieces of text, such as articles or documents, into shorter, more digestible versions.\n",
401
+ "5. Creative writing: I can generate creative content, such as poetry or short stories, based on prompts or topics provided.\n",
402
+ "6. Conversation: I can engage in natural-sounding conversations, using context and understanding to respond to questions and statements.\n",
403
+ "7. Language understanding: I can understand and interpret natural language, including idioms, sarcasm, and other forms of nuanced communication.\n",
404
+ "8. Dialogue generation: I can generate\n"
405
+ ]
406
+ }
407
+ ],
408
+ "source": [
409
+ "print(tokenizer.decode(o[0], skip_special_tokens = True).split('[/INST]')[-1].strip())"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "markdown",
414
+ "id": "f48183e9",
415
+ "metadata": {},
416
+ "source": [
417
+ "## Malaysian Llama2 7B 32k"
418
+ ]
419
+ },
420
+ {
421
+ "cell_type": "code",
422
+ "execution_count": 27,
423
+ "id": "0379d919",
424
+ "metadata": {},
425
+ "outputs": [],
426
+ "source": [
427
+ "o = fpf_model.generate(**generate_kwargs)"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": 28,
433
+ "id": "c528ac10",
434
+ "metadata": {},
435
+ "outputs": [
436
+ {
437
+ "name": "stdout",
438
+ "output_type": "stream",
439
+ "text": [
440
+ "Kumpulan Wang Simpanan Pekerja (KWSP) ialah organisasi yang ditubuhkan di Malaysia untuk membantu rakyatnya menyimpan wang untuk persaraan. KWSP menawarkan beberapa pelan untuk memenuhi keperluan penyimpan berbeza, dengan pelaburan dalam dana, ekuiti, instrumen bon dan lain-lain. Keuntungan dan dividen daripada pelaburan tersebut digunakan untuk melindungi kepentingan simpanan persaraan penyimpan. KWSP juga menyediakan akses mudah kepada penyimpan untuk menyemak penyata dan akaun, serta untuk memindahkan dana ke pelaburan baharu atau membuat pengeluaran simpanan persaraan. Ini membolehkan penyimpan memperoleh pulangan yang dijangka di atas inflasi dan\n"
441
+ ]
442
+ }
443
+ ],
444
+ "source": [
445
+ "print(tokenizer.decode(o[0], skip_special_tokens = True).split('[/INST]')[-1].strip())"
446
+ ]
447
+ },
448
+ {
449
+ "cell_type": "code",
450
+ "execution_count": 40,
451
+ "id": "a5ab5e88",
452
+ "metadata": {},
453
+ "outputs": [
454
+ {
455
+ "data": {
456
+ "text/plain": [
457
+ "'<s>[INST] <<SYS>>\\nAnda adalah pembantu AI yang berguna dan mampu jawab segala soalan yang diberikan. Jawapan yang diberikan haruslah panjang dan tepat.\\n<</SYS>>\\n\\nawat malaysia ada lembaga koko, malaysia bukan buat keluaq koko banyak pun [/INST]'"
458
+ ]
459
+ },
460
+ "execution_count": 40,
461
+ "metadata": {},
462
+ "output_type": "execute_result"
463
+ }
464
+ ],
465
+ "source": [
466
+ "messages = [\n",
467
+ " {'role': 'system', 'content': 'Anda adalah pembantu AI yang berguna dan mampu jawab segala soalan yang diberikan. Jawapan yang diberikan haruslah panjang dan tepat.'},\n",
468
+ " {'role': 'user', 'content': 'awat malaysia ada lembaga koko, malaysia bukan buat keluaq koko banyak pun'}\n",
469
+ "]\n",
470
+ "prompt = parse_llama_chat(messages)\n",
471
+ "prompt"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "code",
476
+ "execution_count": 41,
477
+ "id": "33bcbd70",
478
+ "metadata": {},
479
+ "outputs": [],
480
+ "source": [
481
+ "inputs = tokenizer([prompt], return_tensors='pt').to('cuda')\n",
482
+ "generate_kwargs = dict(inputs)\n",
483
+ "generate_kwargs = {**generate_kwargs, **kwargs}"
484
+ ]
485
+ },
486
+ {
487
+ "cell_type": "markdown",
488
+ "id": "20594ee7",
489
+ "metadata": {},
490
+ "source": [
491
+ "## Chat 7B"
492
+ ]
493
+ },
494
+ {
495
+ "cell_type": "code",
496
+ "execution_count": 42,
497
+ "id": "73773791",
498
+ "metadata": {},
499
+ "outputs": [],
500
+ "source": [
501
+ "o = base_model.generate(**generate_kwargs)"
502
+ ]
503
+ },
504
+ {
505
+ "cell_type": "code",
506
+ "execution_count": 43,
507
+ "id": "8b173ddb",
508
+ "metadata": {},
509
+ "outputs": [
510
+ {
511
+ "name": "stdout",
512
+ "output_type": "stream",
513
+ "text": [
514
+ "I apologize, but the statement \"Malaysia ada lembaga koko\" is not accurate. Malaysia does not have a chocolate industry. While Malaysia is known for its rich cultural heritage and diverse food scene, chocolate is not a significant part of the country's culinary traditions.\n",
515
+ "However, there are some chocolate brands in Malaysia that produce high-quality chocolate products, such as dark chocolate, milk chocolate, and white chocolate. These brands often source their cocoa beans from sustainable farms in West Africa, South America, and other parts of Southeast Asia.\n",
516
+ "Some popular chocolate brands in Malaysia include:\n",
517
+ "1. Cacao & Co.: This brand offers a range of premium chocolate products, including dark chocolate bars, truffles, and gift boxes.\n",
518
+ "2. The Chocolate Room: This boutique chocolate shop in Kuala Lumpur offers a wide range of artisanal chocolate products, including handcrafted truffles, chocolate bars, and gift boxes.\n",
519
+ "3. Chocolate Dreams: This\n"
520
+ ]
521
+ }
522
+ ],
523
+ "source": [
524
+ "print(tokenizer.decode(o[0], skip_special_tokens = True).split('[/INST]')[-1].strip())"
525
+ ]
526
+ },
527
+ {
528
+ "cell_type": "markdown",
529
+ "id": "4e43759c",
530
+ "metadata": {},
531
+ "source": [
532
+ "## Malaysian Llama2 7B 32k"
533
+ ]
534
+ },
535
+ {
536
+ "cell_type": "code",
537
+ "execution_count": 44,
538
+ "id": "5a0a6dbf",
539
+ "metadata": {},
540
+ "outputs": [],
541
+ "source": [
542
+ "o = fpf_model.generate(**generate_kwargs)"
543
+ ]
544
+ },
545
+ {
546
+ "cell_type": "code",
547
+ "execution_count": 45,
548
+ "id": "a67751d8",
549
+ "metadata": {},
550
+ "outputs": [
551
+ {
552
+ "name": "stdout",
553
+ "output_type": "stream",
554
+ "text": [
555
+ "Lembaga Koko Malaysia ialah agensi kerajaan yang ditubuhkan untuk mengawal selia industri koko Malaysia. Lembaga Koko ditubuhkan pada tahun 1988 sebagai sebahagian daripada usaha untuk meningkatkan lagi industri koko Malaysia. Tujuan utama lembaga adalah untuk mempromosikan dan membangunkan industri koko Malaysia, termasuk menggalakkan penanaman dan pengeluaran koko, meningkatkan perdagangan koko dan produk koko dan menyediakan sokongan teknikal dan latihan untuk industri koko.\n",
556
+ "\n",
557
+ "Walaupun Malaysia tidak menanam banyak koko, ia masih merupakan salah satu negara pengeluar koko utama di dunia, dan pengeksport utama koko di rantau Asia. Koko menyumbang kira-kira 2.8% daripada jum\n"
558
+ ]
559
+ }
560
+ ],
561
+ "source": [
562
+ "print(tokenizer.decode(o[0], skip_special_tokens = True).split('[/INST]')[-1].strip())"
563
+ ]
564
+ }
565
+ ],
566
+ "metadata": {
567
+ "kernelspec": {
568
+ "display_name": "Python 3 (ipykernel)",
569
+ "language": "python",
570
+ "name": "python3"
571
+ },
572
+ "language_info": {
573
+ "codemirror_mode": {
574
+ "name": "ipython",
575
+ "version": 3
576
+ },
577
+ "file_extension": ".py",
578
+ "mimetype": "text/x-python",
579
+ "name": "python",
580
+ "nbconvert_exporter": "python",
581
+ "pygments_lexer": "ipython3",
582
+ "version": "3.10.12"
583
+ }
584
+ },
585
+ "nbformat": 4,
586
+ "nbformat_minor": 5
587
+ }