Safetensors
codebyzeb commited on
Commit
9e9685b
·
verified ·
1 Parent(s): 4270ad1

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Basque/config.json +31 -0
  2. Basque/generation_config.json +6 -0
  3. Basque/model.safetensors +3 -0
  4. Basque/special_tokens_map.json +30 -0
  5. Basque/tokenizer.json +159 -0
  6. Basque/tokenizer_config.json +44 -0
  7. Basque/training_args.bin +3 -0
  8. Basque/vocab.json +1 -0
  9. Cantonese/config.json +31 -0
  10. Cantonese/generation_config.json +6 -0
  11. Cantonese/model.safetensors +3 -0
  12. Cantonese/special_tokens_map.json +30 -0
  13. Cantonese/tokenizer.json +269 -0
  14. Cantonese/tokenizer_config.json +44 -0
  15. Cantonese/training_args.bin +3 -0
  16. Cantonese/vocab.json +1 -0
  17. Catalan/config.json +31 -0
  18. Catalan/generation_config.json +6 -0
  19. Catalan/model.safetensors +3 -0
  20. Catalan/special_tokens_map.json +30 -0
  21. Catalan/tokenizer.json +157 -0
  22. Catalan/tokenizer_config.json +44 -0
  23. Catalan/training_args.bin +3 -0
  24. Catalan/vocab.json +1 -0
  25. Croatian/config.json +31 -0
  26. Croatian/generation_config.json +6 -0
  27. Croatian/model.safetensors +3 -0
  28. Croatian/special_tokens_map.json +30 -0
  29. Croatian/tokenizer.json +152 -0
  30. Croatian/tokenizer_config.json +44 -0
  31. Croatian/training_args.bin +3 -0
  32. Croatian/vocab.json +1 -0
  33. Danish/config.json +31 -0
  34. Danish/generation_config.json +6 -0
  35. Danish/model.safetensors +3 -0
  36. Danish/special_tokens_map.json +30 -0
  37. Danish/tokenizer.json +172 -0
  38. Danish/tokenizer_config.json +44 -0
  39. Danish/training_args.bin +3 -0
  40. Danish/vocab.json +1 -0
  41. Dutch/config.json +31 -0
  42. Dutch/generation_config.json +6 -0
  43. Dutch/model.safetensors +3 -0
  44. Dutch/special_tokens_map.json +30 -0
  45. Dutch/tokenizer.json +167 -0
  46. Dutch/tokenizer_config.json +44 -0
  47. Dutch/training_args.bin +3 -0
  48. Dutch/vocab.json +1 -0
  49. EnglishNA/config.json +31 -0
  50. EnglishNA/generation_config.json +6 -0
Basque/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 42
31
+ }
Basque/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Basque/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce322b9b6b1180c639fcb02a177395ce5a1bcde446a9dbe983a98dfe3821ca93
3
+ size 2536728
Basque/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Basque/tokenizer.json ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "b": 4,
119
+ "ai̯": 5,
120
+ "e": 6,
121
+ "s̪̻": 7,
122
+ "ɟ": 8,
123
+ "ei̯": 9,
124
+ "t̺s̺": 10,
125
+ "i": 11,
126
+ "oi̯": 12,
127
+ "a": 13,
128
+ "ɾ": 14,
129
+ "k": 15,
130
+ "t̠ʃ": 16,
131
+ "s̺": 17,
132
+ "l": 18,
133
+ "p": 19,
134
+ "o": 20,
135
+ "r": 21,
136
+ "t̪": 22,
137
+ "u": 23,
138
+ "n": 24,
139
+ "m": 25,
140
+ "ð": 26,
141
+ "t̪̻s̪̻": 27,
142
+ "β": 28,
143
+ "ʎ": 29,
144
+ "ɡ": 30,
145
+ "ɣ": 31,
146
+ "au̯": 32,
147
+ "c": 33,
148
+ "j": 34,
149
+ "d̪": 35,
150
+ "ʃ": 36,
151
+ "ɲ": 37,
152
+ "f": 38,
153
+ "eu̯": 39,
154
+ "θ": 40,
155
+ "x": 41
156
+ },
157
+ "unk_token": "UNK"
158
+ }
159
+ }
Basque/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Basque/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:743e25492f2a0dd2aac1435f2a19a6367f3618135945a12fa336ec4c809cb4e8
3
+ size 5368
Basque/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"b":4,"ai̯":5,"e":6,"s̪̻":7,"ɟ":8,"ei̯":9,"t̺s̺":10,"i":11,"oi̯":12,"a":13,"ɾ":14,"k":15,"t̠ʃ":16,"s̺":17,"l":18,"p":19,"o":20,"r":21,"t̪":22,"u":23,"n":24,"m":25,"ð":26,"t̪̻s̪̻":27,"β":28,"ʎ":29,"ɡ":30,"ɣ":31,"au̯":32,"c":33,"j":34,"d̪":35,"ʃ":36,"ɲ":37,"f":38,"eu̯":39,"θ":40,"x":41}
Cantonese/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 152
31
+ }
Cantonese/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Cantonese/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b48efa4f503665cc6436b384127aac0bef14d1cc791bd59884828eb52fe72b57
3
+ size 2593048
Cantonese/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Cantonese/tokenizer.json ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "WhitespaceSplit"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "j": 4,
119
+ "ɐ˥": 5,
120
+ "t": 6,
121
+ "k": 7,
122
+ "ɐu˧˥": 8,
123
+ "i˨": 9,
124
+ "n": 10,
125
+ "i˧˩̰": 11,
126
+ "y˨": 12,
127
+ "s": 13,
128
+ "ɐ˨": 14,
129
+ "p": 15,
130
+ "ts": 16,
131
+ "ɐu˥": 17,
132
+ "ɪ̞˧˥": 18,
133
+ "ŋ": 19,
134
+ "ɵ˧": 20,
135
+ "a̞˧": 21,
136
+ "l": 22,
137
+ "ʊ̟˥": 23,
138
+ "a̞˧˩̰": 24,
139
+ "ɛ˥": 25,
140
+ "ei˩˧": 26,
141
+ "w": 27,
142
+ "a̞˨": 28,
143
+ "ɐi˧˥": 29,
144
+ "a̞˧˥": 30,
145
+ "m̩˧˥": 31,
146
+ "m": 32,
147
+ "ou˥": 33,
148
+ "ei˥": 34,
149
+ "i˧": 35,
150
+ "ɔ̽˧˥": 36,
151
+ "tʰ": 37,
152
+ "i˥": 38,
153
+ "f": 39,
154
+ "aːĭ˧": 40,
155
+ "h": 41,
156
+ "ɵy˧": 42,
157
+ "a̞˥": 43,
158
+ "ei˧˩̰": 44,
159
+ "ou˨": 45,
160
+ "ɔ̽˧": 46,
161
+ "ɐi˧˩̰": 47,
162
+ "u˧": 48,
163
+ "ɔːĭ˥": 49,
164
+ "ɐu˨": 50,
165
+ "ei˧˥": 51,
166
+ "ɐi˨": 52,
167
+ "ʊ̟˧˩̰": 53,
168
+ "ʊ̟˨": 54,
169
+ "a̞˩˧": 55,
170
+ "ou˧˥": 56,
171
+ "aːĭ˧˥": 57,
172
+ "ɔ̽˨": 58,
173
+ "ɛ˩˧": 59,
174
+ "ɪ̞˨": 60,
175
+ "iːŭ˧": 61,
176
+ "ɛ˧˩̰": 62,
177
+ "m̩˧˩̰": 63,
178
+ "ɵ˧˥": 64,
179
+ "ei˧": 65,
180
+ "ɐu˧˩̰": 66,
181
+ "m̩˧": 67,
182
+ "ɐ˧˥": 68,
183
+ "ɐu˩˧": 69,
184
+ "ɐi˥": 70,
185
+ "ɔ̽˥": 71,
186
+ "ɔ̽˧˩̰": 72,
187
+ "ɔːĭ˧": 73,
188
+ "ou˩˧": 74,
189
+ "m̩˥": 75,
190
+ "ɐ˧": 76,
191
+ "tsʰ": 77,
192
+ "ɛ˧˥": 78,
193
+ "i˧˥": 79,
194
+ "ɔ̽˩˧": 80,
195
+ "kʰ": 81,
196
+ "ɐ˧˩̰": 82,
197
+ "aːŭ˧˥": 83,
198
+ "pʰ": 84,
199
+ "aːĭ˧˩̰": 85,
200
+ "ɵy˩˧": 86,
201
+ "ɛ˧": 87,
202
+ "u˧˥": 88,
203
+ "ɛ˨": 89,
204
+ "ʊ̟˧": 90,
205
+ "u˥": 91,
206
+ "m̩˩˧": 92,
207
+ "aːŭ˧": 93,
208
+ "œ̞˩˧": 94,
209
+ "i˩˧": 95,
210
+ "ɪ̞˧˩̰": 96,
211
+ "u˨": 97,
212
+ "ɪ̞˥": 98,
213
+ "iːŭ˧˩̰": 99,
214
+ "œ̞˧˥": 100,
215
+ "y˧": 101,
216
+ "uːĭ˩˧": 102,
217
+ "uːĭ˥": 103,
218
+ "ɵy˧˥": 104,
219
+ "y˧˩̰": 105,
220
+ "ɔːĭ˧˥": 106,
221
+ "ɛ": 107,
222
+ "ou˧": 108,
223
+ "ei˨": 109,
224
+ "ɵ˥": 110,
225
+ "u˧˩̰": 111,
226
+ "y˥": 112,
227
+ "œ̞˥": 113,
228
+ "œ̞˧˩̰": 114,
229
+ "aːĭ˨": 115,
230
+ "ɐ˩˧": 116,
231
+ "œ̞˧": 117,
232
+ "uːĭ˧˥": 118,
233
+ "ɐu˧": 119,
234
+ "ɐi˩˧": 120,
235
+ "ɐi˧": 121,
236
+ "ou˧˩̰": 122,
237
+ "aːĭ˥": 123,
238
+ "aːŭ˥": 124,
239
+ "ŋ˩˧": 125,
240
+ "y˧˥": 126,
241
+ "iːŭ˥": 127,
242
+ "ɔːĭ˨": 128,
243
+ "ʊ̟˧˥": 129,
244
+ "iːŭ˧˥": 130,
245
+ "ɵy˥": 131,
246
+ "ɔːĭ˧˩̰": 132,
247
+ "uːĭ˧": 133,
248
+ "ɵy˧˩̰": 134,
249
+ "œ̞˨": 135,
250
+ "m̩˨": 136,
251
+ "aːŭ˧˩̰": 137,
252
+ "y˩˧": 138,
253
+ "aːŭ˩˧": 139,
254
+ "aːĭ˩˧": 140,
255
+ "uːĭ˨": 141,
256
+ "ɵy˨": 142,
257
+ "aːŭ˨": 143,
258
+ "ɪ̞˧": 144,
259
+ "ɵ˨": 145,
260
+ "iːŭ˩˧": 146,
261
+ "iːŭ˨": 147,
262
+ "ɵ˧˩̰": 148,
263
+ "uːĭ˧˩̰": 149,
264
+ "u˩˧": 150,
265
+ "ŋ˧˩̰": 151
266
+ },
267
+ "unk_token": "UNK"
268
+ }
269
+ }
Cantonese/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Cantonese/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92167da0e935ca89f7238c7dc0210aedb733dd0e02ec79d330b22ab9ee613d4
3
+ size 5368
Cantonese/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"j":4,"ɐ˥":5,"t":6,"k":7,"ɐu˧˥":8,"i˨":9,"n":10,"i˧˩̰":11,"y˨":12,"s":13,"ɐ˨":14,"p":15,"ts":16,"ɐu˥":17,"ɪ̞˧˥":18,"ŋ":19,"ɵ˧":20,"a̞˧":21,"l":22,"ʊ̟˥":23,"a̞˧˩̰":24,"ɛ˥":25,"ei˩˧":26,"w":27,"a̞˨":28,"ɐi˧˥":29,"a̞˧˥":30,"m̩˧˥":31,"m":32,"ou˥":33,"ei˥":34,"i˧":35,"ɔ̽˧˥":36,"tʰ":37,"i˥":38,"f":39,"aːĭ˧":40,"h":41,"ɵy˧":42,"a̞˥":43,"ei˧˩̰":44,"ou˨":45,"ɔ̽˧":46,"ɐi˧˩̰":47,"u˧":48,"ɔːĭ˥":49,"ɐu˨":50,"ei˧˥":51,"ɐi˨":52,"ʊ̟˧˩̰":53,"ʊ̟˨":54,"a̞˩˧":55,"ou˧˥":56,"aːĭ˧˥":57,"ɔ̽˨":58,"ɛ˩˧":59,"ɪ̞˨":60,"iːŭ˧":61,"ɛ˧˩̰":62,"m̩˧˩̰":63,"ɵ˧˥":64,"ei˧":65,"ɐu˧˩̰":66,"m̩˧":67,"ɐ˧˥":68,"ɐu˩˧":69,"ɐi˥":70,"ɔ̽˥":71,"ɔ̽˧˩̰":72,"ɔːĭ˧":73,"ou˩˧":74,"m̩˥":75,"ɐ˧":76,"tsʰ":77,"ɛ˧˥":78,"i˧˥":79,"ɔ̽˩˧":80,"kʰ":81,"ɐ˧˩̰":82,"aːŭ˧˥":83,"pʰ":84,"aːĭ˧˩̰":85,"ɵy˩˧":86,"ɛ˧":87,"u˧˥":88,"ɛ˨":89,"ʊ̟˧":90,"u˥":91,"m̩˩˧":92,"aːŭ˧":93,"œ̞˩˧":94,"i˩˧":95,"ɪ̞˧˩̰":96,"u˨":97,"ɪ̞˥":98,"iːŭ˧˩̰":99,"œ̞˧˥":100,"y˧":101,"uːĭ˩˧":102,"uːĭ˥":103,"ɵy˧˥":104,"y˧˩̰":105,"ɔːĭ˧˥":106,"ɛ":107,"ou˧":108,"ei˨":109,"ɵ˥":110,"u˧˩̰":111,"y˥":112,"œ̞˥":113,"œ̞˧˩̰":114,"aːĭ˨":115,"ɐ˩˧":116,"œ̞˧":117,"uːĭ˧˥":118,"ɐu˧":119,"ɐi˩˧":120,"ɐi˧":121,"ou˧˩̰":122,"aːĭ˥":123,"aːŭ˥":124,"ŋ˩˧":125,"y˧˥":126,"iːŭ˥":127,"ɔːĭ˨":128,"ʊ̟˧˥":129,"iːŭ˧˥":130,"ɵy˥":131,"ɔːĭ˧˩̰":132,"uːĭ˧":133,"ɵy˧˩̰":134,"œ̞˨":135,"m̩˨":136,"aːŭ˧˩̰":137,"y˩˧":138,"aːŭ˩˧":139,"aːĭ˩˧":140,"uːĭ˨":141,"ɵy˨":142,"aːŭ˨":143,"ɪ̞˧":144,"ɵ˨":145,"iːŭ˩˧":146,"iːŭ˨":147,"ɵ˧˩̰":148,"uːĭ˧˩̰":149,"u˩˧":150,"ŋ˧˩̰":151}
Catalan/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 40
31
+ }
Catalan/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Catalan/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdfc9c6f2507119bf6a15f3d6a7b45bf7adfa72e1b40d075f112cd19fd6f449e
3
+ size 2535704
Catalan/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Catalan/tokenizer.json ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "a": 4,
119
+ "w": 5,
120
+ "ɛ": 6,
121
+ "ə": 7,
122
+ "ð": 8,
123
+ "t̪": 9,
124
+ "j": 10,
125
+ "i": 11,
126
+ "ɔ": 12,
127
+ "n̺": 13,
128
+ "z̺": 14,
129
+ "d̪": 15,
130
+ "s̺": 16,
131
+ "β": 17,
132
+ "m": 18,
133
+ "e": 19,
134
+ "f": 20,
135
+ "ɾ̺": 21,
136
+ "r̺": 22,
137
+ "u̯": 23,
138
+ "k": 24,
139
+ "u": 25,
140
+ "b": 26,
141
+ "p": 27,
142
+ "ɣ": 28,
143
+ "ɡ": 29,
144
+ "ŋ": 30,
145
+ "o": 31,
146
+ "ɫ̺": 32,
147
+ "ɲ̟": 33,
148
+ "ʒ": 34,
149
+ "ʃ": 35,
150
+ "ʎ̟": 36,
151
+ "t̠ʃ": 37,
152
+ "d̠ʒ": 38,
153
+ "ts̺": 39
154
+ },
155
+ "unk_token": "UNK"
156
+ }
157
+ }
Catalan/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Catalan/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a7040f7ec89ba614817a534ae93366548bd38a7b2b5590c13fa574346d1c17
3
+ size 5368
Catalan/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"a":4,"w":5,"ɛ":6,"ə":7,"ð":8,"t̪":9,"j":10,"i":11,"ɔ":12,"n̺":13,"z̺":14,"d̪":15,"s̺":16,"β":17,"m":18,"e":19,"f":20,"ɾ̺":21,"r̺":22,"u̯":23,"k":24,"u":25,"b":26,"p":27,"ɣ":28,"ɡ":29,"ŋ":30,"o":31,"ɫ̺":32,"ɲ̟":33,"ʒ":34,"ʃ":35,"ʎ̟":36,"t̠ʃ":37,"d̠ʒ":38,"ts̺":39}
Croatian/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 35
31
+ }
Croatian/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Croatian/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed0b9958f82c31f0e4c77719708ab94a772c58997d927716c023ee288b52869a
3
+ size 2533144
Croatian/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Croatian/tokenizer.json ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "e": 4,
119
+ "a": 5,
120
+ "u": 6,
121
+ "x": 7,
122
+ "k": 8,
123
+ "t̪": 9,
124
+ "n": 10,
125
+ "o": 11,
126
+ "d̪": 12,
127
+ "i": 13,
128
+ "r": 14,
129
+ "m": 15,
130
+ "ʃ": 16,
131
+ "p": 17,
132
+ "s": 18,
133
+ "ʋ": 19,
134
+ "j": 20,
135
+ "t̠ʃ": 21,
136
+ "l": 22,
137
+ "ɡ": 23,
138
+ "ʒ": 24,
139
+ "b": 25,
140
+ "t̪s": 26,
141
+ "z": 27,
142
+ "d̠ʒ": 28,
143
+ "ʎ": 29,
144
+ "f": 30,
145
+ "ɲ": 31,
146
+ "y": 32,
147
+ "q": 33,
148
+ "w": 34
149
+ },
150
+ "unk_token": "UNK"
151
+ }
152
+ }
Croatian/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Croatian/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd3f3a66db05b16ce892b0eaac9d0e8dd0d693f3f5c25936e5ac1b8c9dc2a52
3
+ size 5368
Croatian/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"e":4,"a":5,"u":6,"x":7,"k":8,"t̪":9,"n":10,"o":11,"d̪":12,"i":13,"r":14,"m":15,"ʃ":16,"p":17,"s":18,"ʋ":19,"j":20,"t̠ʃ":21,"l":22,"ɡ":23,"ʒ":24,"b":25,"t̪s":26,"z":27,"d̠ʒ":28,"ʎ":29,"f":30,"ɲ":31,"y":32,"q":33,"w":34}
Danish/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 55
31
+ }
Danish/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Danish/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b4099c3bb9b42c44e7c514877643d08a7244e54e9e5e09f72ffaa87ac9da850
3
+ size 2543384
Danish/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Danish/tokenizer.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "n": 4,
119
+ "oˤ": 5,
120
+ "t": 6,
121
+ "y": 7,
122
+ "ə": 8,
123
+ "ð": 9,
124
+ "ʁ": 10,
125
+ "ɑˤː": 11,
126
+ "s": 12,
127
+ "k": 13,
128
+ "i": 14,
129
+ "b": 15,
130
+ "eˤ": 16,
131
+ "t̠ʃ": 17,
132
+ "a": 18,
133
+ "l": 19,
134
+ "d": 20,
135
+ "ɡ": 21,
136
+ "f": 22,
137
+ "e": 23,
138
+ "ɛ": 24,
139
+ "r": 25,
140
+ "ɔ": 26,
141
+ "w": 27,
142
+ "ɔˤ": 28,
143
+ "m": 29,
144
+ "uˤ": 30,
145
+ "j": 31,
146
+ "ɑ": 32,
147
+ "u": 33,
148
+ "ɒ": 34,
149
+ "iˤ": 35,
150
+ "ʋ": 36,
151
+ "h": 37,
152
+ "œ": 38,
153
+ "p": 39,
154
+ "ɕ": 40,
155
+ "o": 41,
156
+ "ŋ": 42,
157
+ "ɒː": 43,
158
+ "aˤ": 44,
159
+ "ɜ": 45,
160
+ "œː": 46,
161
+ "eː": 47,
162
+ "aː": 48,
163
+ "d̠ʒ": 49,
164
+ "uː": 50,
165
+ "ɔː": 51,
166
+ "oː": 52,
167
+ "iː": 53,
168
+ "yː": 54
169
+ },
170
+ "unk_token": "UNK"
171
+ }
172
+ }
Danish/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Danish/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd62a528ea96d86ad343aab5d79a6f3e218353244a0be4c92de82947b82c1fe9
3
+ size 5368
Danish/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"n":4,"oˤ":5,"t":6,"y":7,"ə":8,"ð":9,"ʁ":10,"ɑˤː":11,"s":12,"k":13,"i":14,"b":15,"eˤ":16,"t̠ʃ":17,"a":18,"l":19,"d":20,"ɡ":21,"f":22,"e":23,"ɛ":24,"r":25,"ɔ":26,"w":27,"ɔˤ":28,"m":29,"uˤ":30,"j":31,"ɑ":32,"u":33,"ɒ":34,"iˤ":35,"ʋ":36,"h":37,"œ":38,"p":39,"ɕ":40,"o":41,"ŋ":42,"ɒː":43,"aˤ":44,"ɜ":45,"œː":46,"eː":47,"aː":48,"d̠ʒ":49,"uː":50,"ɔː":51,"oː":52,"iː":53,"yː":54}
Dutch/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 50
31
+ }
Dutch/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }
Dutch/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2419c1d9f086c904f711146ca854e725f4afa8b229953ec297133263c26d1b8d
3
+ size 2540824
Dutch/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Dutch/tokenizer.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "WORD_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "UTT_BOUNDARY",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": {
44
+ "type": "Sequence",
45
+ "normalizers": [
46
+ {
47
+ "type": "Strip",
48
+ "strip_left": true,
49
+ "strip_right": true
50
+ }
51
+ ]
52
+ },
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "UTT_BOUNDARY",
62
+ "type_id": 0
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 0
69
+ }
70
+ }
71
+ ],
72
+ "pair": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "UTT_BOUNDARY",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "B",
94
+ "type_id": 1
95
+ }
96
+ }
97
+ ],
98
+ "special_tokens": {
99
+ "UTT_BOUNDARY": {
100
+ "id": "UTT_BOUNDARY",
101
+ "ids": [
102
+ 3
103
+ ],
104
+ "tokens": [
105
+ "UTT_BOUNDARY"
106
+ ]
107
+ }
108
+ }
109
+ },
110
+ "decoder": null,
111
+ "model": {
112
+ "type": "WordLevel",
113
+ "vocab": {
114
+ "UNK": 0,
115
+ "PAD": 1,
116
+ "WORD_BOUNDARY": 2,
117
+ "UTT_BOUNDARY": 3,
118
+ "z": 4,
119
+ "oː": 5,
120
+ "j": 6,
121
+ "ãː": 7,
122
+ "ɦ": 8,
123
+ "ɾ": 9,
124
+ "d": 10,
125
+ "i": 11,
126
+ "ɛ": 12,
127
+ "p": 13,
128
+ "ɪ": 14,
129
+ "k": 15,
130
+ "ɑ": 16,
131
+ "l": 17,
132
+ "ɛː": 18,
133
+ "n": 19,
134
+ "s": 20,
135
+ "v": 21,
136
+ "ə": 22,
137
+ "ɛi": 23,
138
+ "ʋ": 24,
139
+ "t": 25,
140
+ "m": 26,
141
+ "ɣ": 27,
142
+ "ʏ": 28,
143
+ "ɔ": 29,
144
+ "x": 30,
145
+ "u": 31,
146
+ "f": 32,
147
+ "ŋ": 33,
148
+ "øː": 34,
149
+ "b": 35,
150
+ "ɔː": 36,
151
+ "ʌu": 37,
152
+ "y": 38,
153
+ "œy": 39,
154
+ "tʲ": 40,
155
+ "w": 41,
156
+ "ʃ": 42,
157
+ "t̠ʃ": 43,
158
+ "ɲ": 44,
159
+ "ʒ": 45,
160
+ "iː": 46,
161
+ "ɡ": 47,
162
+ "d̠ʒ": 48,
163
+ "ã": 49
164
+ },
165
+ "unk_token": "UNK"
166
+ }
167
+ }
Dutch/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "WORD_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "UTT_BOUNDARY",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "UTT_BOUNDARY",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "UTT_BOUNDARY",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "PAD",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "UNK"
44
+ }
Dutch/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051d8089b9b2a56e1beb0ffb7cb9d9effdd39e932954aebd1b6feb1c099a56c7
3
+ size 5368
Dutch/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"z":4,"oː":5,"j":6,"ãː":7,"ɦ":8,"ɾ":9,"d":10,"i":11,"ɛ":12,"p":13,"ɪ":14,"k":15,"ɑ":16,"l":17,"ɛː":18,"n":19,"s":20,"v":21,"ə":22,"ɛi":23,"ʋ":24,"t":25,"m":26,"ɣ":27,"ʏ":28,"ɔ":29,"x":30,"u":31,"f":32,"ŋ":33,"øː":34,"b":35,"ɔː":36,"ʌu":37,"y":38,"œy":39,"tʲ":40,"w":41,"ʃ":42,"t̠ʃ":43,"ɲ":44,"ʒ":45,"iː":46,"ɡ":47,"d̠ʒ":48,"ã":49}
EnglishNA/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.3,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 128,
14
+ "n_head": 4,
15
+ "n_inner": 512,
16
+ "n_layer": 3,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.2",
29
+ "use_cache": true,
30
+ "vocab_size": 47
31
+ }
EnglishNA/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 3,
4
+ "eos_token_id": 3,
5
+ "transformers_version": "4.44.2"
6
+ }