Safetensors
codebyzeb commited on
Commit
88a9dcf
·
verified ·
1 Parent(s): 29dd51b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dutch/config.json +31 -0
  2. Dutch/generation_config.json +6 -0
  3. Dutch/model.safetensors +3 -0
  4. Dutch/special_tokens_map.json +30 -0
  5. Dutch/tokenizer.json +167 -0
  6. Dutch/tokenizer_config.json +44 -0
  7. Dutch/training_args.bin +3 -0
  8. Dutch/vocab.json +1 -0
  9. English/config.json +31 -0
  10. English/generation_config.json +6 -0
  11. English/model.safetensors +3 -0
  12. English/special_tokens_map.json +30 -0
  13. English/tokenizer.json +164 -0
  14. English/tokenizer_config.json +44 -0
  15. English/training_args.bin +3 -0
  16. English/vocab.json +1 -0
  17. EnglishUK/config.json +31 -0
  18. EnglishUK/generation_config.json +6 -0
  19. EnglishUK/model.safetensors +3 -0
  20. EnglishUK/special_tokens_map.json +30 -0
  21. EnglishUK/tokenizer.json +168 -0
  22. EnglishUK/tokenizer_config.json +44 -0
  23. EnglishUK/training_args.bin +3 -0
  24. EnglishUK/vocab.json +1 -0
  25. Estonian/config.json +31 -0
  26. Estonian/generation_config.json +6 -0
  27. Estonian/model.safetensors +3 -0
  28. Estonian/special_tokens_map.json +30 -0
  29. Estonian/tokenizer.json +185 -0
  30. Estonian/tokenizer_config.json +44 -0
  31. Estonian/training_args.bin +3 -0
  32. Estonian/vocab.json +1 -0
  33. French/config.json +31 -0
  34. French/generation_config.json +6 -0
  35. French/model.safetensors +3 -0
  36. French/special_tokens_map.json +30 -0
  37. French/tokenizer.json +156 -0
  38. French/tokenizer_config.json +44 -0
  39. French/training_args.bin +3 -0
  40. French/vocab.json +1 -0
  41. German/config.json +31 -0
  42. German/generation_config.json +6 -0
  43. German/model.safetensors +3 -0
  44. German/special_tokens_map.json +30 -0
  45. German/tokenizer.json +162 -0
  46. German/tokenizer_config.json +44 -0
  47. German/training_args.bin +3 -0
  48. German/vocab.json +1 -0
  49. Indonesian/config.json +31 -0
  50. Indonesian/generation_config.json +6 -0
Dutch/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 50
31
+ }
Dutch/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Dutch/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb382725a383237baa18d293bec85426226eed75c349ecf65f51c75903cf475f
3
+ size 19277120
Dutch/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Dutch/tokenizer.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "z": 4,
119
+ "oː": 5,
120
+ "j": 6,
121
+ "ãː": 7,
122
+ "ɦ": 8,
123
+ "ɾ": 9,
124
+ "d": 10,
125
+ "i": 11,
126
+ "ɛ": 12,
127
+ "p": 13,
128
+ "ɪ": 14,
129
+ "k": 15,
130
+ "ɑ": 16,
131
+ "l": 17,
132
+ "ɛː": 18,
133
+ "n": 19,
134
+ "s": 20,
135
+ "v": 21,
136
+ "ə": 22,
137
+ "ɛi": 23,
138
+ "ʋ": 24,
139
+ "t": 25,
140
+ "m": 26,
141
+ "ɣ": 27,
142
+ "ʏ": 28,
143
+ "ɔ": 29,
144
+ "x": 30,
145
+ "u": 31,
146
+ "f": 32,
147
+ "ŋ": 33,
148
+ "øː": 34,
149
+ "b": 35,
150
+ "ɔː": 36,
151
+ "ʌu": 37,
152
+ "y": 38,
153
+ "œy": 39,
154
+ "tʲ": 40,
155
+ "w": 41,
156
+ "ʃ": 42,
157
+ "t̠ʃ": 43,
158
+ "ɲ": 44,
159
+ "ʒ": 45,
160
+ "iː": 46,
161
+ "ɡ": 47,
162
+ "d̠ʒ": 48,
163
+ "ã": 49
164
+ },
165
+ "unk_token": "UNK"
166
+ }
167
+ }
Dutch/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Dutch/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2c9ee60f7de58a12c627b43469ecaa3e60ff93c50c070b05a73528e3e59ac86
3
+ size 5368
Dutch/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"z":4,"oː":5,"j":6,"ãː":7,"ɦ":8,"ɾ":9,"d":10,"i":11,"ɛ":12,"p":13,"ɪ":14,"k":15,"ɑ":16,"l":17,"ɛː":18,"n":19,"s":20,"v":21,"ə":22,"ɛi":23,"ʋ":24,"t":25,"m":26,"ɣ":27,"ʏ":28,"ɔ":29,"x":30,"u":31,"f":32,"ŋ":33,"øː":34,"b":35,"ɔː":36,"ʌu":37,"y":38,"œy":39,"tʲ":40,"w":41,"ʃ":42,"t̠ʃ":43,"ɲ":44,"ʒ":45,"iː":46,"ɡ":47,"d̠ʒ":48,"ã":49}
English/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 47
31
+ }
English/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
English/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c54b765e3060afc585e5502181ae99612b08693df1ac0f0601d50c91a3830cee
3
+ size 19274048
English/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
English/tokenizer.json ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "d̠ʒ": 4,
119
+ "ʌ": 5,
120
+ "s": 6,
121
+ "t": 7,
122
+ "l": 8,
123
+ "aɪ": 9,
124
+ "k": 10,
125
+ "j": 11,
126
+ "ʊ": 12,
127
+ "ɹ": 13,
128
+ "b": 14,
129
+ "æ": 15,
130
+ "h": 16,
131
+ "oʊ": 17,
132
+ "m": 18,
133
+ "iː": 19,
134
+ "ð": 20,
135
+ "ɛ": 21,
136
+ "z": 22,
137
+ "f": 23,
138
+ "eɪ": 24,
139
+ "w": 25,
140
+ "ɪ": 26,
141
+ "ɡ": 27,
142
+ "ɑ": 28,
143
+ "ə": 29,
144
+ "p": 30,
145
+ "uː": 31,
146
+ "i": 32,
147
+ "θ": 33,
148
+ "ŋ": 34,
149
+ "ɔ": 35,
150
+ "ɔɪ": 36,
151
+ "n": 37,
152
+ "d": 38,
153
+ "aʊ": 39,
154
+ "v": 40,
155
+ "ɜː": 41,
156
+ "t̠ʃ": 42,
157
+ "ʃ": 43,
158
+ "iə": 44,
159
+ "ʒ": 45,
160
+ "x": 46
161
+ },
162
+ "unk_token": "UNK"
163
+ }
164
+ }
English/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
English/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b88208cc696a256424f3bebcb6f4e5ad96d4bc49d36f2ed165f53a5054f166
3
+ size 5368
English/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"d̠ʒ":4,"ʌ":5,"s":6,"t":7,"l":8,"aɪ":9,"k":10,"j":11,"ʊ":12,"ɹ":13,"b":14,"æ":15,"h":16,"oʊ":17,"m":18,"iː":19,"ð":20,"ɛ":21,"z":22,"f":23,"eɪ":24,"w":25,"ɪ":26,"ɡ":27,"ɑ":28,"ə":29,"p":30,"uː":31,"i":32,"θ":33,"ŋ":34,"ɔ":35,"ɔɪ":36,"n":37,"d":38,"aʊ":39,"v":40,"ɜː":41,"t̠ʃ":42,"ʃ":43,"iə":44,"ʒ":45,"x":46}
EnglishUK/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 51
31
+ }
EnglishUK/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
EnglishUK/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7fd1d94d4cdf2ed713efb0d5efca1700bbea2090d49515025d0f20a92bb3e0
3
+ size 19278144
EnglishUK/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
EnglishUK/tokenizer.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "ð": 4,
119
+ "æ": 5,
120
+ "tʰ": 6,
121
+ "ɡ": 7,
122
+ "ʊ": 8,
123
+ "d": 9,
124
+ "ɑː": 10,
125
+ "l": 11,
126
+ "ɪ": 12,
127
+ "n": 13,
128
+ "eɪ": 14,
129
+ "t̠ʃ": 15,
130
+ "w": 16,
131
+ "ɒ": 17,
132
+ "ʌ": 18,
133
+ "z": 19,
134
+ "m": 20,
135
+ "iː": 21,
136
+ "aɪ": 22,
137
+ "h": 23,
138
+ "e": 24,
139
+ "kʰ": 25,
140
+ "s": 26,
141
+ "ə": 27,
142
+ "ɔː": 28,
143
+ "ɹ": 29,
144
+ "i": 30,
145
+ "əʊ": 31,
146
+ "uː": 32,
147
+ "j": 33,
148
+ "ɪə": 34,
149
+ "ɔɪ": 35,
150
+ "v": 36,
151
+ "f": 37,
152
+ "ɜː": 38,
153
+ "b": 39,
154
+ "pʰ": 40,
155
+ "d̠ʒ": 41,
156
+ "ɐ": 42,
157
+ "eə": 43,
158
+ "ʃ": 44,
159
+ "θ": 45,
160
+ "ŋ": 46,
161
+ "aʊ": 47,
162
+ "ʊə": 48,
163
+ "n̩": 49,
164
+ "ʒ": 50
165
+ },
166
+ "unk_token": "UNK"
167
+ }
168
+ }
EnglishUK/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
EnglishUK/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d9d382e572636f068bff05dc72ed1291fe1d92a81f50d2339232a20ae283c45
3
+ size 5368
EnglishUK/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"ð":4,"æ":5,"tʰ":6,"ɡ":7,"ʊ":8,"d":9,"ɑː":10,"l":11,"ɪ":12,"n":13,"eɪ":14,"t̠ʃ":15,"w":16,"ɒ":17,"ʌ":18,"z":19,"m":20,"iː":21,"aɪ":22,"h":23,"e":24,"kʰ":25,"s":26,"ə":27,"ɔː":28,"ɹ":29,"i":30,"əʊ":31,"uː":32,"j":33,"ɪə":34,"ɔɪ":35,"v":36,"f":37,"ɜː":38,"b":39,"pʰ":40,"d̠ʒ":41,"ɐ":42,"eə":43,"ʃ":44,"θ":45,"ŋ":46,"aʊ":47,"ʊə":48,"n̩":49,"ʒ":50}
Estonian/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 68
31
+ }
Estonian/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Estonian/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78c66010ba3c8cc625dad3ad613e7d29b20590f75e414439d1070c5ce184b5c8
3
+ size 19295552
Estonian/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Estonian/tokenizer.json ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "n": 4,
119
+ "o": 5,
120
+ "t": 6,
121
+ "ʃ": 7,
122
+ "a": 8,
123
+ "uː": 9,
124
+ "m": 10,
125
+ "u": 11,
126
+ "tʲ": 12,
127
+ "i": 13,
128
+ "s": 14,
129
+ "eː": 15,
130
+ "d": 16,
131
+ "iː": 17,
132
+ "k": 18,
133
+ "ɡ": 19,
134
+ "ɑ": 20,
135
+ "ɤ": 21,
136
+ "ʊ": 22,
137
+ "sʲ": 23,
138
+ "j": 24,
139
+ "aː": 25,
140
+ "h": 26,
141
+ "v": 27,
142
+ "æi": 28,
143
+ "kː": 29,
144
+ "e": 30,
145
+ "ɪ": 31,
146
+ "tː": 32,
147
+ "r": 33,
148
+ "ɛ": 34,
149
+ "mː": 35,
150
+ "p": 36,
151
+ "sː": 37,
152
+ "æ": 38,
153
+ "l": 39,
154
+ "pː": 40,
155
+ "yː": 41,
156
+ "æː": 42,
157
+ "b": 43,
158
+ "ɔ": 44,
159
+ "ɤː": 45,
160
+ "lː": 46,
161
+ "ø": 47,
162
+ "øː": 48,
163
+ "ŋ": 49,
164
+ "y": 50,
165
+ "oː": 51,
166
+ "rː": 52,
167
+ "ɲ": 53,
168
+ "nː": 54,
169
+ "w": 55,
170
+ "tʲː": 56,
171
+ "øɪ̯": 57,
172
+ "f": 58,
173
+ "dʲ": 59,
174
+ "sʲː": 60,
175
+ "t̠ʃ": 61,
176
+ "ʃː": 62,
177
+ "ʒ": 63,
178
+ "z": 64,
179
+ "fː": 65,
180
+ "dː": 66,
181
+ "yi": 67
182
+ },
183
+ "unk_token": "UNK"
184
+ }
185
+ }
Estonian/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Estonian/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b389f6834d44b7985861e48d2b947055cdcc9b0224cf54f56f1ed42aaa40321c
3
+ size 5368
Estonian/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"n":4,"o":5,"t":6,"ʃ":7,"a":8,"uː":9,"m":10,"u":11,"tʲ":12,"i":13,"s":14,"eː":15,"d":16,"iː":17,"k":18,"ɡ":19,"ɑ":20,"ɤ":21,"ʊ":22,"sʲ":23,"j":24,"aː":25,"h":26,"v":27,"æi":28,"kː":29,"e":30,"ɪ":31,"tː":32,"r":33,"ɛ":34,"mː":35,"p":36,"sː":37,"æ":38,"l":39,"pː":40,"yː":41,"æː":42,"b":43,"ɔ":44,"ɤː":45,"lː":46,"ø":47,"øː":48,"ŋ":49,"y":50,"oː":51,"rː":52,"ɲ":53,"nː":54,"w":55,"tʲː":56,"øɪ̯":57,"f":58,"dʲ":59,"sʲː":60,"t̠ʃ":61,"ʃː":62,"ʒ":63,"z":64,"fː":65,"dː":66,"yi":67}
French/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 39
31
+ }
French/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
French/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe74d8ebbd538360b2944bffb5f8ee45e043767da7d4cb815304e298f35fcb49
3
+ size 19265856
French/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
French/tokenizer.json ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "m": 4,
119
+ "a": 5,
120
+ "ɑ̃": 6,
121
+ "d": 7,
122
+ "ɔ": 8,
123
+ "n": 9,
124
+ "b": 10,
125
+ "ʁ": 11,
126
+ "ə": 12,
127
+ "ɡ": 13,
128
+ "ʒ": 14,
129
+ "i": 15,
130
+ "v": 16,
131
+ "t": 17,
132
+ "k": 18,
133
+ "o": 19,
134
+ "ɛ̃": 20,
135
+ "w": 21,
136
+ "y": 22,
137
+ "j": 23,
138
+ "e": 24,
139
+ "ɔ̃": 25,
140
+ "p": 26,
141
+ "ɛ": 27,
142
+ "f": 28,
143
+ "s": 29,
144
+ "z": 30,
145
+ "l": 31,
146
+ "u": 32,
147
+ "ʃ": 33,
148
+ "œ": 34,
149
+ "ø": 35,
150
+ "ɲ": 36,
151
+ "t̠ʃ": 37,
152
+ "d̠ʒ": 38
153
+ },
154
+ "unk_token": "UNK"
155
+ }
156
+ }
French/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
French/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9f1aacd6bb9bca003a1e88dc2c07742287013b87e4849118db77562308dc4b
3
+ size 5368
French/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"m":4,"a":5,"ɑ̃":6,"d":7,"ɔ":8,"n":9,"b":10,"ʁ":11,"ə":12,"ɡ":13,"ʒ":14,"i":15,"v":16,"t":17,"k":18,"o":19,"ɛ̃":20,"w":21,"y":22,"j":23,"e":24,"ɔ̃":25,"p":26,"ɛ":27,"f":28,"s":29,"z":30,"l":31,"u":32,"ʃ":33,"œ":34,"ø":35,"ɲ":36,"t̠ʃ":37,"d̠ʒ":38}
German/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 45
31
+ }
German/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
German/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e3c537e434f8ef587c16ed7d06e2c14cd2b3baa024b2a73eac0e57800f6cdb7
3
+ size 19272000
German/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
German/tokenizer.json ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "aː": 4,
119
+ "oː": 5,
120
+ "a": 6,
121
+ "b": 7,
122
+ "x": 8,
123
+ "v": 9,
124
+ "øː": 10,
125
+ "n": 11,
126
+ "ɛː": 12,
127
+ "f": 13,
128
+ "l": 14,
129
+ "iː": 15,
130
+ "yː": 16,
131
+ "j": 17,
132
+ "uː": 18,
133
+ "h": 19,
134
+ "ʊ": 20,
135
+ "m": 21,
136
+ "ɔ": 22,
137
+ "ɪ": 23,
138
+ "eː": 24,
139
+ "ə": 25,
140
+ "d̺": 26,
141
+ "t̺ʰ": 27,
142
+ "ɛ": 28,
143
+ "ŋ": 29,
144
+ "ç": 30,
145
+ "œ": 31,
146
+ "kʰ": 32,
147
+ "ʀ": 33,
148
+ "ɡ": 34,
149
+ "pʰ": 35,
150
+ "ʏ": 36,
151
+ "s": 37,
152
+ "z": 38,
153
+ "ts": 39,
154
+ "ʃ": 40,
155
+ "ɐ": 41,
156
+ "pf": 42,
157
+ "t̠ʃ": 43,
158
+ "d̠ʒ": 44
159
+ },
160
+ "unk_token": "UNK"
161
+ }
162
+ }
German/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
German/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ede9368d975ec1f90fbba3e66667a435f52789fa9eea3135eba089250d1163
3
+ size 5368
German/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"aː":4,"oː":5,"a":6,"b":7,"x":8,"v":9,"øː":10,"n":11,"ɛː":12,"f":13,"l":14,"iː":15,"yː":16,"j":17,"uː":18,"h":19,"ʊ":20,"m":21,"ɔ":22,"ɪ":23,"eː":24,"ə":25,"d̺":26,"t̺ʰ":27,"ɛ":28,"ŋ":29,"ç":30,"œ":31,"kʰ":32,"ʀ":33,"ɡ":34,"pʰ":35,"ʏ":36,"s":37,"z":38,"ts":39,"ʃ":40,"ɐ":41,"pf":42,"t̠ʃ":43,"d̠ʒ":44}
Indonesian/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 256,
14
+ "n_head": 8,
15
+ "n_inner": 1024,
16
+ "n_layer": 6,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 31
31
+ }
Indonesian/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }