Vectorrent commited on
Commit
19697dd
·
verified ·
1 Parent(s): 3151f99

Upload 3 files

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +3 -19
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +7 -26
special_tokens_map.json CHANGED
@@ -1,36 +1,20 @@
1
  {
2
- "additional_special_tokens": [
3
- {
4
- "content": "<|im_start|>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "<|im_end|>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- }
17
- ],
18
  "bos_token": {
19
- "content": "<|bos|>",
20
  "lstrip": false,
21
  "normalized": false,
22
  "rstrip": false,
23
  "single_word": false
24
  },
25
  "eos_token": {
26
- "content": "<|eos|>",
27
  "lstrip": false,
28
  "normalized": false,
29
  "rstrip": false,
30
  "single_word": false
31
  },
32
  "pad_token": {
33
- "content": "<|pad|>",
34
  "lstrip": false,
35
  "normalized": false,
36
  "rstrip": false,
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "bos_token": {
3
+ "content": "<|im_start|>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|im_end|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "<|endoftext|>",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "<|pad|>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,22 +9,6 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "<|bos|>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "<|eos|>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "3": {
28
  "content": "<|im_start|>",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +16,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "4": {
36
  "content": "<|im_end|>",
37
  "lstrip": false,
38
  "normalized": false,
@@ -41,14 +25,11 @@
41
  "special": true
42
  }
43
  },
44
- "additional_special_tokens": [
45
- "<|im_start|>",
46
- "<|im_end|>"
47
- ],
48
- "bos_token": "<|bos|>",
49
- "clean_up_tokenization_spaces": true,
50
- "eos_token": "<|eos|>",
51
  "model_max_length": 1000000000000000019884624838656,
52
- "pad_token": "<|pad|>",
53
  "tokenizer_class": "PreTrainedTokenizerFast"
54
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "<|endoftext|>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "content": "<|im_start|>",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
  "content": "<|im_end|>",
21
  "lstrip": false,
22
  "normalized": false,
 
25
  "special": true
26
  }
27
  },
28
+ "bos_token": "<|im_start|>",
29
+ "clean_up_tokenization_spaces": false,
30
+ "eos_token": "<|im_end|>",
31
+ "extra_special_tokens": {},
 
 
 
32
  "model_max_length": 1000000000000000019884624838656,
33
+ "pad_token": "<|endoftext|>",
34
  "tokenizer_class": "PreTrainedTokenizerFast"
35
  }