CharlieFRuan commited on
Commit
17a79e3
·
1 Parent(s): 83a6366

Initial commit

Browse files
Files changed (5) hide show
  1. logs.txt +0 -0
  2. mlc-chat-config.json +40 -0
  3. tokenizer.json +0 -0
  4. tokenizer.model +3 -0
  5. tokenizer_config.json +35 -0
logs.txt ADDED
The diff for this file is too large to render. See raw diff
 
mlc-chat-config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "llama",
3
+ "quantization": "q4f16_1",
4
+ "model_config": {
5
+ "hidden_size": 8192,
6
+ "intermediate_size": 28672,
7
+ "num_attention_heads": 64,
8
+ "num_hidden_layers": 80,
9
+ "rms_norm_eps": 1e-05,
10
+ "vocab_size": 32000,
11
+ "position_embedding_base": 10000,
12
+ "context_window_size": 4096,
13
+ "prefill_chunk_size": 4096,
14
+ "num_key_value_heads": 8,
15
+ "head_dim": 128,
16
+ "tensor_parallel_shards": 1
17
+ },
18
+ "vocab_size": 32000,
19
+ "context_window_size": 4096,
20
+ "sliding_window_size": -1,
21
+ "prefill_chunk_size": 4096,
22
+ "attention_sink_size": -1,
23
+ "tensor_parallel_shards": 1,
24
+ "mean_gen_len": 128,
25
+ "max_gen_len": 512,
26
+ "shift_fill_factor": 0.3,
27
+ "temperature": 0.6,
28
+ "repetition_penalty": 1.0,
29
+ "top_p": 0.9,
30
+ "conv_template": "llama-2",
31
+ "pad_token_id": 0,
32
+ "bos_token_id": 1,
33
+ "eos_token_id": 2,
34
+ "tokenizer_files": [
35
+ "tokenizer.model",
36
+ "tokenizer.json",
37
+ "tokenizer_config.json"
38
+ ],
39
+ "version": "0.1.0"
40
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }