Safetensors
codebyzeb commited on
Commit
a93858a
·
verified ·
1 Parent(s): d6f07d9

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Cantonese/config.json +31 -0
  2. Cantonese/generation_config.json +6 -0
  3. Cantonese/model.safetensors +3 -0
  4. Cantonese/special_tokens_map.json +30 -0
  5. Cantonese/tokenizer.json +269 -0
  6. Cantonese/tokenizer_config.json +44 -0
  7. Cantonese/training_args.bin +3 -0
  8. Cantonese/vocab.json +1 -0
  9. Dutch/config.json +31 -0
  10. Dutch/generation_config.json +6 -0
  11. Dutch/model.safetensors +3 -0
  12. Dutch/special_tokens_map.json +30 -0
  13. Dutch/tokenizer.json +167 -0
  14. Dutch/tokenizer_config.json +44 -0
  15. Dutch/training_args.bin +3 -0
  16. Dutch/vocab.json +1 -0
  17. EnglishNA/config.json +31 -0
  18. EnglishNA/generation_config.json +6 -0
  19. EnglishNA/model.safetensors +3 -0
  20. EnglishNA/special_tokens_map.json +30 -0
  21. EnglishNA/tokenizer.json +164 -0
  22. EnglishNA/tokenizer_config.json +44 -0
  23. EnglishNA/training_args.bin +3 -0
  24. EnglishNA/vocab.json +1 -0
  25. EnglishUK/config.json +31 -0
  26. EnglishUK/generation_config.json +6 -0
  27. EnglishUK/model.safetensors +3 -0
  28. EnglishUK/special_tokens_map.json +30 -0
  29. EnglishUK/tokenizer.json +168 -0
  30. EnglishUK/tokenizer_config.json +44 -0
  31. EnglishUK/training_args.bin +3 -0
  32. EnglishUK/vocab.json +1 -0
  33. Estonian/config.json +31 -0
  34. Estonian/generation_config.json +6 -0
  35. Estonian/model.safetensors +3 -0
  36. Estonian/special_tokens_map.json +30 -0
  37. Estonian/tokenizer.json +185 -0
  38. Estonian/tokenizer_config.json +44 -0
  39. Estonian/training_args.bin +3 -0
  40. Estonian/vocab.json +1 -0
  41. French/config.json +31 -0
  42. French/generation_config.json +6 -0
  43. French/model.safetensors +3 -0
  44. French/special_tokens_map.json +30 -0
  45. French/tokenizer.json +156 -0
  46. French/tokenizer_config.json +44 -0
  47. French/training_args.bin +3 -0
  48. French/vocab.json +1 -0
  49. German/config.json +31 -0
  50. German/generation_config.json +6 -0
Cantonese/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 152
31
+ }
Cantonese/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Cantonese/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e80e5a76e879718c865ae642eac461076dbe1cd2c3245f386ffe6f9109d2a987
3
+ size 3387304
Cantonese/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Cantonese/tokenizer.json ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "WhitespaceSplit"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "j": 4,
119
+ "ɐ˥": 5,
120
+ "t": 6,
121
+ "k": 7,
122
+ "ɐu˧˥": 8,
123
+ "i˨": 9,
124
+ "n": 10,
125
+ "i˧˩̰": 11,
126
+ "y˨": 12,
127
+ "s": 13,
128
+ "ɐ˨": 14,
129
+ "p": 15,
130
+ "ts": 16,
131
+ "ɐu˥": 17,
132
+ "ɪ̞˧˥": 18,
133
+ "ŋ": 19,
134
+ "ɵ˧": 20,
135
+ "a̞˧": 21,
136
+ "l": 22,
137
+ "ʊ̟˥": 23,
138
+ "a̞˧˩̰": 24,
139
+ "ɛ˥": 25,
140
+ "ei˩˧": 26,
141
+ "w": 27,
142
+ "a̞˨": 28,
143
+ "ɐi˧˥": 29,
144
+ "a̞˧˥": 30,
145
+ "m̩˧˥": 31,
146
+ "m": 32,
147
+ "ou˥": 33,
148
+ "ei˥": 34,
149
+ "i˧": 35,
150
+ "ɔ̽˧˥": 36,
151
+ "tʰ": 37,
152
+ "i˥": 38,
153
+ "f": 39,
154
+ "aːĭ˧": 40,
155
+ "h": 41,
156
+ "ɵy˧": 42,
157
+ "a̞˥": 43,
158
+ "ei˧˩̰": 44,
159
+ "ou˨": 45,
160
+ "ɔ̽˧": 46,
161
+ "ɐi˧˩̰": 47,
162
+ "u˧": 48,
163
+ "ɔːĭ˥": 49,
164
+ "ɐu˨": 50,
165
+ "ei˧˥": 51,
166
+ "ɐi˨": 52,
167
+ "ʊ̟˧˩̰": 53,
168
+ "ʊ̟˨": 54,
169
+ "a̞˩˧": 55,
170
+ "ou˧˥": 56,
171
+ "aːĭ˧˥": 57,
172
+ "ɔ̽˨": 58,
173
+ "ɛ˩˧": 59,
174
+ "ɪ̞˨": 60,
175
+ "iːŭ˧": 61,
176
+ "ɛ˧˩̰": 62,
177
+ "m̩˧˩̰": 63,
178
+ "ɵ˧˥": 64,
179
+ "ei˧": 65,
180
+ "ɐu˧˩̰": 66,
181
+ "m̩˧": 67,
182
+ "ɐ˧˥": 68,
183
+ "ɐu˩˧": 69,
184
+ "ɐi˥": 70,
185
+ "ɔ̽˥": 71,
186
+ "ɔ̽˧˩̰": 72,
187
+ "ɔːĭ˧": 73,
188
+ "ou˩˧": 74,
189
+ "m̩˥": 75,
190
+ "ɐ˧": 76,
191
+ "tsʰ": 77,
192
+ "ɛ˧˥": 78,
193
+ "i˧˥": 79,
194
+ "ɔ̽˩˧": 80,
195
+ "kʰ": 81,
196
+ "ɐ˧˩̰": 82,
197
+ "aːŭ˧˥": 83,
198
+ "pʰ": 84,
199
+ "aːĭ˧˩̰": 85,
200
+ "ɵy˩˧": 86,
201
+ "ɛ˧": 87,
202
+ "u˧˥": 88,
203
+ "ɛ˨": 89,
204
+ "ʊ̟˧": 90,
205
+ "u˥": 91,
206
+ "m̩˩˧": 92,
207
+ "aːŭ˧": 93,
208
+ "œ̞˩˧": 94,
209
+ "i˩˧": 95,
210
+ "ɪ̞˧˩̰": 96,
211
+ "u˨": 97,
212
+ "ɪ̞˥": 98,
213
+ "iːŭ˧˩̰": 99,
214
+ "œ̞˧˥": 100,
215
+ "y˧": 101,
216
+ "uːĭ˩˧": 102,
217
+ "uːĭ˥": 103,
218
+ "ɵy˧˥": 104,
219
+ "y˧˩̰": 105,
220
+ "ɔːĭ˧˥": 106,
221
+ "ɛ": 107,
222
+ "ou˧": 108,
223
+ "ei˨": 109,
224
+ "ɵ˥": 110,
225
+ "u˧˩̰": 111,
226
+ "y˥": 112,
227
+ "œ̞˥": 113,
228
+ "œ̞˧˩̰": 114,
229
+ "aːĭ˨": 115,
230
+ "ɐ˩˧": 116,
231
+ "œ̞˧": 117,
232
+ "uːĭ˧˥": 118,
233
+ "ɐu˧": 119,
234
+ "ɐi˩˧": 120,
235
+ "ɐi˧": 121,
236
+ "ou˧˩̰": 122,
237
+ "aːĭ˥": 123,
238
+ "aːŭ˥": 124,
239
+ "ŋ˩˧": 125,
240
+ "y˧˥": 126,
241
+ "iːŭ˥": 127,
242
+ "ɔːĭ˨": 128,
243
+ "ʊ̟˧˥": 129,
244
+ "iːŭ˧˥": 130,
245
+ "ɵy˥": 131,
246
+ "ɔːĭ˧˩̰": 132,
247
+ "uːĭ˧": 133,
248
+ "ɵy˧˩̰": 134,
249
+ "œ̞˨": 135,
250
+ "m̩˨": 136,
251
+ "aːŭ˧˩̰": 137,
252
+ "y˩˧": 138,
253
+ "aːŭ˩˧": 139,
254
+ "aːĭ˩˧": 140,
255
+ "uːĭ˨": 141,
256
+ "ɵy˨": 142,
257
+ "aːŭ˨": 143,
258
+ "ɪ̞˧": 144,
259
+ "ɵ˨": 145,
260
+ "iːŭ˩˧": 146,
261
+ "iːŭ˨": 147,
262
+ "ɵ˧˩̰": 148,
263
+ "uːĭ˧˩̰": 149,
264
+ "u˩˧": 150,
265
+ "ŋ˧˩̰": 151
266
+ },
267
+ "unk_token": "UNK"
268
+ }
269
+ }
Cantonese/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Cantonese/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b843b2d3e207797e1396680aa7485a6a0bdcef4077450466fa42f9c477b48e
3
+ size 5368
Cantonese/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"j":4,"ɐ˥":5,"t":6,"k":7,"ɐu˧˥":8,"i˨":9,"n":10,"i˧˩̰":11,"y˨":12,"s":13,"ɐ˨":14,"p":15,"ts":16,"ɐu˥":17,"ɪ̞˧˥":18,"ŋ":19,"ɵ˧":20,"a̞˧":21,"l":22,"ʊ̟˥":23,"a̞˧˩̰":24,"ɛ˥":25,"ei˩˧":26,"w":27,"a̞˨":28,"ɐi˧˥":29,"a̞˧˥":30,"m̩˧˥":31,"m":32,"ou˥":33,"ei˥":34,"i˧":35,"ɔ̽˧˥":36,"tʰ":37,"i˥":38,"f":39,"aːĭ˧":40,"h":41,"ɵy˧":42,"a̞˥":43,"ei˧˩̰":44,"ou˨":45,"ɔ̽˧":46,"ɐi˧˩̰":47,"u˧":48,"ɔːĭ˥":49,"ɐu˨":50,"ei˧˥":51,"ɐi˨":52,"ʊ̟˧˩̰":53,"ʊ̟˨":54,"a̞˩˧":55,"ou˧˥":56,"aːĭ˧˥":57,"ɔ̽˨":58,"ɛ˩˧":59,"ɪ̞˨":60,"iːŭ˧":61,"ɛ˧˩̰":62,"m̩˧˩̰":63,"ɵ˧˥":64,"ei˧":65,"ɐu˧˩̰":66,"m̩˧":67,"ɐ˧˥":68,"ɐu˩˧":69,"ɐi˥":70,"ɔ̽˥":71,"ɔ̽˧˩̰":72,"ɔːĭ˧":73,"ou˩˧":74,"m̩˥":75,"ɐ˧":76,"tsʰ":77,"ɛ˧˥":78,"i˧˥":79,"ɔ̽˩˧":80,"kʰ":81,"ɐ˧˩̰":82,"aːŭ˧˥":83,"pʰ":84,"aːĭ˧˩̰":85,"ɵy˩˧":86,"ɛ˧":87,"u˧˥":88,"ɛ˨":89,"ʊ̟˧":90,"u˥":91,"m̩˩˧":92,"aːŭ˧":93,"œ̞˩˧":94,"i˩˧":95,"ɪ̞˧˩̰":96,"u˨":97,"ɪ̞˥":98,"iːŭ˧˩̰":99,"œ̞˧˥":100,"y˧":101,"uːĭ˩˧":102,"uːĭ˥":103,"ɵy˧˥":104,"y˧˩̰":105,"ɔːĭ˧˥":106,"ɛ":107,"ou˧":108,"ei˨":109,"ɵ˥":110,"u˧˩̰":111,"y˥":112,"œ̞˥":113,"œ̞˧˩̰":114,"aːĭ˨":115,"ɐ˩˧":116,"œ̞˧":117,"uːĭ˧˥":118,"ɐu˧":119,"ɐi˩˧":120,"ɐi˧":121,"ou˧˩̰":122,"aːĭ˥":123,"aːŭ˥":124,"ŋ˩˧":125,"y˧˥":126,"iːŭ˥":127,"ɔːĭ˨":128,"ʊ̟˧˥":129,"iːŭ˧˥":130,"ɵy˥":131,"ɔːĭ˧˩̰":132,"uːĭ˧":133,"ɵy˧˩̰":134,"œ̞˨":135,"m̩˨":136,"aːŭ˧˩̰":137,"y˩˧":138,"aːŭ˩˧":139,"aːĭ˩˧":140,"uːĭ˨":141,"ɵy˨":142,"aːŭ˨":143,"ɪ̞˧":144,"ɵ˨":145,"iːŭ˩˧":146,"iːŭ˨":147,"ɵ˧˩̰":148,"uːĭ˧˩̰":149,"u˩˧":150,"ŋ˧˩̰":151}
Dutch/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 50
31
+ }
Dutch/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Dutch/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d997b37f113b5e7583ce581f013938c4cb1f2d94d081bc4d7d1cfca9f09b1acc
3
+ size 3335080
Dutch/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Dutch/tokenizer.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "z": 4,
119
+ "oː": 5,
120
+ "j": 6,
121
+ "ãː": 7,
122
+ "ɦ": 8,
123
+ "ɾ": 9,
124
+ "d": 10,
125
+ "i": 11,
126
+ "ɛ": 12,
127
+ "p": 13,
128
+ "ɪ": 14,
129
+ "k": 15,
130
+ "ɑ": 16,
131
+ "l": 17,
132
+ "ɛː": 18,
133
+ "n": 19,
134
+ "s": 20,
135
+ "v": 21,
136
+ "ə": 22,
137
+ "ɛi": 23,
138
+ "ʋ": 24,
139
+ "t": 25,
140
+ "m": 26,
141
+ "ɣ": 27,
142
+ "ʏ": 28,
143
+ "ɔ": 29,
144
+ "x": 30,
145
+ "u": 31,
146
+ "f": 32,
147
+ "ŋ": 33,
148
+ "øː": 34,
149
+ "b": 35,
150
+ "ɔː": 36,
151
+ "ʌu": 37,
152
+ "y": 38,
153
+ "œy": 39,
154
+ "tʲ": 40,
155
+ "w": 41,
156
+ "ʃ": 42,
157
+ "t̠ʃ": 43,
158
+ "ɲ": 44,
159
+ "ʒ": 45,
160
+ "iː": 46,
161
+ "ɡ": 47,
162
+ "d̠ʒ": 48,
163
+ "ã": 49
164
+ },
165
+ "unk_token": "UNK"
166
+ }
167
+ }
Dutch/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Dutch/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3079aa2abf5484337ccf8ff4e6bddef13c05a4e0558bf07f5790c65d85b7e185
3
+ size 5368
Dutch/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"z":4,"oː":5,"j":6,"ãː":7,"ɦ":8,"ɾ":9,"d":10,"i":11,"ɛ":12,"p":13,"ɪ":14,"k":15,"ɑ":16,"l":17,"ɛː":18,"n":19,"s":20,"v":21,"ə":22,"ɛi":23,"ʋ":24,"t":25,"m":26,"ɣ":27,"ʏ":28,"ɔ":29,"x":30,"u":31,"f":32,"ŋ":33,"øː":34,"b":35,"ɔː":36,"ʌu":37,"y":38,"œy":39,"tʲ":40,"w":41,"ʃ":42,"t̠ʃ":43,"ɲ":44,"ʒ":45,"iː":46,"ɡ":47,"d̠ʒ":48,"ã":49}
EnglishNA/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 47
31
+ }
EnglishNA/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
EnglishNA/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:273fc2318bc0e4e81c4a414587514493ce5f67caf190c1c9cd3e8967a3eade00
3
+ size 3333544
EnglishNA/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
EnglishNA/tokenizer.json ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "d̠ʒ": 4,
119
+ "ʌ": 5,
120
+ "s": 6,
121
+ "t": 7,
122
+ "l": 8,
123
+ "aɪ": 9,
124
+ "k": 10,
125
+ "j": 11,
126
+ "ʊ": 12,
127
+ "ɹ": 13,
128
+ "b": 14,
129
+ "æ": 15,
130
+ "h": 16,
131
+ "oʊ": 17,
132
+ "m": 18,
133
+ "iː": 19,
134
+ "ð": 20,
135
+ "ɛ": 21,
136
+ "z": 22,
137
+ "f": 23,
138
+ "eɪ": 24,
139
+ "w": 25,
140
+ "ɪ": 26,
141
+ "ɡ": 27,
142
+ "ɑ": 28,
143
+ "ə": 29,
144
+ "p": 30,
145
+ "uː": 31,
146
+ "i": 32,
147
+ "θ": 33,
148
+ "ŋ": 34,
149
+ "ɔ": 35,
150
+ "ɔɪ": 36,
151
+ "n": 37,
152
+ "d": 38,
153
+ "aʊ": 39,
154
+ "v": 40,
155
+ "ɜː": 41,
156
+ "t̠ʃ": 42,
157
+ "ʃ": 43,
158
+ "iə": 44,
159
+ "ʒ": 45,
160
+ "x": 46
161
+ },
162
+ "unk_token": "UNK"
163
+ }
164
+ }
EnglishNA/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
EnglishNA/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ee949648c57d5e1171f9887784b0ac9d35ee1311228155f45271dc332aaa7df
3
+ size 5368
EnglishNA/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"d̠ʒ":4,"ʌ":5,"s":6,"t":7,"l":8,"aɪ":9,"k":10,"j":11,"ʊ":12,"ɹ":13,"b":14,"æ":15,"h":16,"oʊ":17,"m":18,"iː":19,"ð":20,"ɛ":21,"z":22,"f":23,"eɪ":24,"w":25,"ɪ":26,"ɡ":27,"ɑ":28,"ə":29,"p":30,"uː":31,"i":32,"θ":33,"ŋ":34,"ɔ":35,"ɔɪ":36,"n":37,"d":38,"aʊ":39,"v":40,"ɜː":41,"t̠ʃ":42,"ʃ":43,"iə":44,"ʒ":45,"x":46}
EnglishUK/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 51
31
+ }
EnglishUK/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
EnglishUK/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e4c9057fadb4c7c4b6f32215fb526998cb95f5fd1011bd83a683f6f8a103cca
3
+ size 3335592
EnglishUK/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
EnglishUK/tokenizer.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "ð": 4,
119
+ "æ": 5,
120
+ "tʰ": 6,
121
+ "ɡ": 7,
122
+ "ʊ": 8,
123
+ "d": 9,
124
+ "ɑː": 10,
125
+ "l": 11,
126
+ "ɪ": 12,
127
+ "n": 13,
128
+ "eɪ": 14,
129
+ "t̠ʃ": 15,
130
+ "w": 16,
131
+ "ɒ": 17,
132
+ "ʌ": 18,
133
+ "z": 19,
134
+ "m": 20,
135
+ "iː": 21,
136
+ "aɪ": 22,
137
+ "h": 23,
138
+ "e": 24,
139
+ "kʰ": 25,
140
+ "s": 26,
141
+ "ə": 27,
142
+ "ɔː": 28,
143
+ "ɹ": 29,
144
+ "i": 30,
145
+ "əʊ": 31,
146
+ "uː": 32,
147
+ "j": 33,
148
+ "ɪə": 34,
149
+ "ɔɪ": 35,
150
+ "v": 36,
151
+ "f": 37,
152
+ "ɜː": 38,
153
+ "b": 39,
154
+ "pʰ": 40,
155
+ "d̠ʒ": 41,
156
+ "ɐ": 42,
157
+ "eə": 43,
158
+ "ʃ": 44,
159
+ "θ": 45,
160
+ "ŋ": 46,
161
+ "aʊ": 47,
162
+ "ʊə": 48,
163
+ "n̩": 49,
164
+ "ʒ": 50
165
+ },
166
+ "unk_token": "UNK"
167
+ }
168
+ }
EnglishUK/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
EnglishUK/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:383bf2e423c7742c010ffe46113f10f008fc82558484de49464279c0dcf24a23
3
+ size 5368
EnglishUK/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"ð":4,"æ":5,"tʰ":6,"ɡ":7,"ʊ":8,"d":9,"ɑː":10,"l":11,"ɪ":12,"n":13,"eɪ":14,"t̠ʃ":15,"w":16,"ɒ":17,"ʌ":18,"z":19,"m":20,"iː":21,"aɪ":22,"h":23,"e":24,"kʰ":25,"s":26,"ə":27,"ɔː":28,"ɹ":29,"i":30,"əʊ":31,"uː":32,"j":33,"ɪə":34,"ɔɪ":35,"v":36,"f":37,"ɜː":38,"b":39,"pʰ":40,"d̠ʒ":41,"ɐ":42,"eə":43,"ʃ":44,"θ":45,"ŋ":46,"aʊ":47,"ʊə":48,"n̩":49,"ʒ":50}
Estonian/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 68
31
+ }
Estonian/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Estonian/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afee2c8ad18b21e611292f658db3801ecafae03015168233fc9a039f2175e0c8
3
+ size 3344296
Estonian/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Estonian/tokenizer.json ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "n": 4,
119
+ "o": 5,
120
+ "t": 6,
121
+ "ʃ": 7,
122
+ "a": 8,
123
+ "uː": 9,
124
+ "m": 10,
125
+ "u": 11,
126
+ "tʲ": 12,
127
+ "i": 13,
128
+ "s": 14,
129
+ "eː": 15,
130
+ "d": 16,
131
+ "iː": 17,
132
+ "k": 18,
133
+ "ɡ": 19,
134
+ "ɑ": 20,
135
+ "ɤ": 21,
136
+ "ʊ": 22,
137
+ "sʲ": 23,
138
+ "j": 24,
139
+ "aː": 25,
140
+ "h": 26,
141
+ "v": 27,
142
+ "æi": 28,
143
+ "kː": 29,
144
+ "e": 30,
145
+ "ɪ": 31,
146
+ "tː": 32,
147
+ "r": 33,
148
+ "ɛ": 34,
149
+ "mː": 35,
150
+ "p": 36,
151
+ "sː": 37,
152
+ "æ": 38,
153
+ "l": 39,
154
+ "pː": 40,
155
+ "yː": 41,
156
+ "æː": 42,
157
+ "b": 43,
158
+ "ɔ": 44,
159
+ "ɤː": 45,
160
+ "lː": 46,
161
+ "ø": 47,
162
+ "øː": 48,
163
+ "ŋ": 49,
164
+ "y": 50,
165
+ "oː": 51,
166
+ "rː": 52,
167
+ "ɲ": 53,
168
+ "nː": 54,
169
+ "w": 55,
170
+ "tʲː": 56,
171
+ "øɪ̯": 57,
172
+ "f": 58,
173
+ "dʲ": 59,
174
+ "sʲː": 60,
175
+ "t̠ʃ": 61,
176
+ "ʃː": 62,
177
+ "ʒ": 63,
178
+ "z": 64,
179
+ "fː": 65,
180
+ "dː": 66,
181
+ "yi": 67
182
+ },
183
+ "unk_token": "UNK"
184
+ }
185
+ }
Estonian/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Estonian/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d97e5ae5878d9838daf01a6d58ddfa4f068f3912a2fe181a8f6bf2e3d1465a3
3
+ size 5368
Estonian/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"n":4,"o":5,"t":6,"ʃ":7,"a":8,"uː":9,"m":10,"u":11,"tʲ":12,"i":13,"s":14,"eː":15,"d":16,"iː":17,"k":18,"ɡ":19,"ɑ":20,"ɤ":21,"ʊ":22,"sʲ":23,"j":24,"aː":25,"h":26,"v":27,"æi":28,"kː":29,"e":30,"ɪ":31,"tː":32,"r":33,"ɛ":34,"mː":35,"p":36,"sː":37,"æ":38,"l":39,"pː":40,"yː":41,"æː":42,"b":43,"ɔ":44,"ɤː":45,"lː":46,"ø":47,"øː":48,"ŋ":49,"y":50,"oː":51,"rː":52,"ɲ":53,"nː":54,"w":55,"tʲː":56,"øɪ̯":57,"f":58,"dʲ":59,"sʲː":60,"t̠ʃ":61,"ʃː":62,"ʒ":63,"z":64,"fː":65,"dː":66,"yi":67}
French/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 39
31
+ }
French/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
French/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6990ba97ea1ba903cc98ed8d71dcbf40cb7016357e45128a75f085d79837922a
3
+ size 3329448
French/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
French/tokenizer.json ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "m": 4,
119
+ "a": 5,
120
+ "ɑ̃": 6,
121
+ "d": 7,
122
+ "ɔ": 8,
123
+ "n": 9,
124
+ "b": 10,
125
+ "ʁ": 11,
126
+ "ə": 12,
127
+ "ɡ": 13,
128
+ "ʒ": 14,
129
+ "i": 15,
130
+ "v": 16,
131
+ "t": 17,
132
+ "k": 18,
133
+ "o": 19,
134
+ "ɛ̃": 20,
135
+ "w": 21,
136
+ "y": 22,
137
+ "j": 23,
138
+ "e": 24,
139
+ "ɔ̃": 25,
140
+ "p": 26,
141
+ "ɛ": 27,
142
+ "f": 28,
143
+ "s": 29,
144
+ "z": 30,
145
+ "l": 31,
146
+ "u": 32,
147
+ "ʃ": 33,
148
+ "œ": 34,
149
+ "ø": 35,
150
+ "ɲ": 36,
151
+ "t̠ʃ": 37,
152
+ "d̠ʒ": 38
153
+ },
154
+ "unk_token": "UNK"
155
+ }
156
+ }
French/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
French/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19db843d5235caa94bee0fa15a7d2f0b4c32bb2fdb0e11a2352df8221d9016b9
3
+ size 5368
French/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"m":4,"a":5,"ɑ̃":6,"d":7,"ɔ":8,"n":9,"b":10,"ʁ":11,"ə":12,"ɡ":13,"ʒ":14,"i":15,"v":16,"t":17,"k":18,"o":19,"ɛ̃":20,"w":21,"y":22,"j":23,"e":24,"ɔ̃":25,"p":26,"ɛ":27,"f":28,"s":29,"z":30,"l":31,"u":32,"ʃ":33,"œ":34,"ø":35,"ɲ":36,"t̠ʃ":37,"d̠ʒ":38}
German/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 4,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 45
31
+ }
German/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }