Simon Clematide commited on
Commit
9a7c6be
·
1 Parent(s): 758fd01

Initial model upload

Browse files
README.md CHANGED
@@ -1,3 +1,5 @@
 
 
1
  ---
2
- license: cc-by-sa-4.0
3
- ---
 
1
+ # German GBERT LARGE Trained on Archival Data Containing Local History Facts
2
+
3
  ---
4
+
5
+ ## license: cc-by-sa-4.0
config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepset/gbert-large",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "id2label": {
12
+ "0": "O",
13
+ "1": "B-REGISTER",
14
+ "2": "I-REGISTER",
15
+ "3": "B-ZEIT",
16
+ "4": "I-ZEIT",
17
+ "5": "B-ORT",
18
+ "6": "I-ORT",
19
+ "7": "B-QUELLE",
20
+ "8": "I-QUELLE",
21
+ "9": "B-HAUS",
22
+ "10": "I-HAUS",
23
+ "11": "B-BERUF",
24
+ "12": "I-BERUF",
25
+ "13": "B-POI",
26
+ "14": "I-POI",
27
+ "15": "B-PERS",
28
+ "16": "I-PERS",
29
+ "17": "B-FIRMA",
30
+ "18": "I-FIRMA",
31
+ "19": "B-STRASSE",
32
+ "20": "I-STRASSE"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 4096,
36
+ "label2id": {
37
+ "B-BERUF": 11,
38
+ "B-FIRMA": 17,
39
+ "B-HAUS": 9,
40
+ "B-ORT": 5,
41
+ "B-PERS": 15,
42
+ "B-POI": 13,
43
+ "B-QUELLE": 7,
44
+ "B-REGISTER": 1,
45
+ "B-STRASSE": 19,
46
+ "B-ZEIT": 3,
47
+ "I-BERUF": 12,
48
+ "I-FIRMA": 18,
49
+ "I-HAUS": 10,
50
+ "I-ORT": 6,
51
+ "I-PERS": 16,
52
+ "I-POI": 14,
53
+ "I-QUELLE": 8,
54
+ "I-REGISTER": 2,
55
+ "I-STRASSE": 20,
56
+ "I-ZEIT": 4,
57
+ "O": 0
58
+ },
59
+ "layer_norm_eps": 1e-12,
60
+ "max_position_embeddings": 512,
61
+ "model_type": "bert",
62
+ "num_attention_heads": 16,
63
+ "num_hidden_layers": 24,
64
+ "pad_token_id": 0,
65
+ "position_embedding_type": "absolute",
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.48.2",
68
+ "type_vocab_size": 2,
69
+ "use_cache": true,
70
+ "vocab_size": 31102
71
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:592635d4fab5ae641981f0cc5a2fe748ecf99b47a93575fafa17f586e5ba1f9e
3
+ size 1338877748
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bb5772ca8991d038ef4b954f83f57713eab658c380c09d1b6361c7f3c1549f3
3
+ size 2677978087
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:013d13e4b269a8648da58d2f40bf48e004388ea0631771b8c8e588ab75375092
3
+ size 13990
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9d0002a7093b476e437f7fc9349f4919b728c176911d2ad82b8fe71e4a24ef
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "max_len": 512,
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": false,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
trainer_state.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2930234372615814,
3
+ "best_model_checkpoint": "hf-ner-sample_004-gbert-large.model/checkpoint-115",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 115,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_f1": 0.3486529318541997,
14
+ "eval_loss": 0.8595609068870544,
15
+ "eval_precision": 0.32934131736526945,
16
+ "eval_recall": 0.37037037037037035,
17
+ "eval_runtime": 1.0185,
18
+ "eval_samples_per_second": 57.93,
19
+ "eval_steps_per_second": 2.946,
20
+ "step": 23
21
+ },
22
+ {
23
+ "epoch": 2.0,
24
+ "eval_f1": 0.6369230769230769,
25
+ "eval_loss": 0.47271719574928284,
26
+ "eval_precision": 0.5864022662889519,
27
+ "eval_recall": 0.696969696969697,
28
+ "eval_runtime": 0.9355,
29
+ "eval_samples_per_second": 63.071,
30
+ "eval_steps_per_second": 3.207,
31
+ "step": 46
32
+ },
33
+ {
34
+ "epoch": 3.0,
35
+ "eval_f1": 0.7248764415156508,
36
+ "eval_loss": 0.33701092004776,
37
+ "eval_precision": 0.7096774193548387,
38
+ "eval_recall": 0.7407407407407407,
39
+ "eval_runtime": 0.9711,
40
+ "eval_samples_per_second": 60.758,
41
+ "eval_steps_per_second": 3.089,
42
+ "step": 69
43
+ },
44
+ {
45
+ "epoch": 4.0,
46
+ "eval_f1": 0.765472312703583,
47
+ "eval_loss": 0.296609491109848,
48
+ "eval_precision": 0.7413249211356467,
49
+ "eval_recall": 0.7912457912457912,
50
+ "eval_runtime": 0.9576,
51
+ "eval_samples_per_second": 61.61,
52
+ "eval_steps_per_second": 3.133,
53
+ "step": 92
54
+ },
55
+ {
56
+ "epoch": 5.0,
57
+ "eval_f1": 0.7568438003220611,
58
+ "eval_loss": 0.2930234372615814,
59
+ "eval_precision": 0.7253086419753086,
60
+ "eval_recall": 0.7912457912457912,
61
+ "eval_runtime": 0.9995,
62
+ "eval_samples_per_second": 59.03,
63
+ "eval_steps_per_second": 3.002,
64
+ "step": 115
65
+ }
66
+ ],
67
+ "logging_steps": 500,
68
+ "max_steps": 230,
69
+ "num_input_tokens_seen": 0,
70
+ "num_train_epochs": 10,
71
+ "save_steps": 500,
72
+ "stateful_callbacks": {
73
+ "TrainerControl": {
74
+ "args": {
75
+ "should_epoch_stop": false,
76
+ "should_evaluate": false,
77
+ "should_log": false,
78
+ "should_save": true,
79
+ "should_training_stop": false
80
+ },
81
+ "attributes": {}
82
+ }
83
+ },
84
+ "total_flos": 555656517635304.0,
85
+ "train_batch_size": 24,
86
+ "trial_name": null,
87
+ "trial_params": null
88
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da153a41811865f9d39826675d0707e386cba820ad73e3cf4b1f8b57adcd9dda
3
+ size 5368
vocab.txt ADDED
The diff for this file is too large to render. See raw diff