Fahmid
commited on
Commit
·
51439a1
1
Parent(s):
cd6a891
Initial model upload
Browse files- .gitattributes +1 -32
- data/test.conll +0 -0
- data/train.conll +0 -0
- eval_ner.py +153 -0
- outputs/bert-base-cased-timeNER/$null +1 -0
- outputs/bert-base-cased-timeNER/.gitattributes +3 -0
- outputs/bert-base-cased-timeNER/config.json +35 -0
- outputs/bert-base-cased-timeNER/model.safetensors +3 -0
- outputs/bert-base-cased-timeNER/special_tokens_map.json +7 -0
- outputs/bert-base-cased-timeNER/tokenizer.json +0 -0
- outputs/bert-base-cased-timeNER/tokenizer_config.json +56 -0
- outputs/bert-base-cased-timeNER/training_args.bin +3 -0
- outputs/bert-base-cased-timeNER/vocab.txt +0 -0
- requirements.txt +4 -0
- train.py +175 -0
- upload_model.py +45 -0
.gitattributes
CHANGED
@@ -1,35 +1,4 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
1 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
4 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/test.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval_ner.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import numpy as np
|
7 |
+
from datasets import Dataset
|
8 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification
|
9 |
+
from seqeval.metrics import precision_score, recall_score, f1_score, classification_report
|
10 |
+
|
11 |
+
|
12 |
+
def read_conll_2col(path: str) -> Tuple[List[List[str]], List[List[str]]]:
|
13 |
+
"""Reads 2-column CoNLL (TOKEN TAG) with blank lines between sentences."""
|
14 |
+
toks, labs = [], []
|
15 |
+
all_toks, all_labs = [], []
|
16 |
+
with open(path, "r", encoding="utf-8") as f:
|
17 |
+
for line in f:
|
18 |
+
line = line.rstrip("\n")
|
19 |
+
if not line:
|
20 |
+
if toks:
|
21 |
+
all_toks.append(toks)
|
22 |
+
all_labs.append(labs)
|
23 |
+
toks, labs = [], []
|
24 |
+
continue
|
25 |
+
parts = line.split()
|
26 |
+
if len(parts) < 2:
|
27 |
+
# tolerate malformed lines
|
28 |
+
continue
|
29 |
+
tok, tag = parts[0], parts[-1]
|
30 |
+
toks.append(tok)
|
31 |
+
labs.append(tag)
|
32 |
+
if toks:
|
33 |
+
all_toks.append(toks)
|
34 |
+
all_labs.append(labs)
|
35 |
+
return all_toks, all_labs
|
36 |
+
|
37 |
+
|
38 |
+
def main():
|
39 |
+
parser = argparse.ArgumentParser()
|
40 |
+
parser.add_argument("--model_dir", type=str, default="outputs/bert-base-cased-timeNER",
|
41 |
+
help="Path to the fine-tuned model directory (with config.json, tokenizer files, weights).")
|
42 |
+
parser.add_argument("--test_path", type=str, default="data/test.conll",
|
43 |
+
help="Path to 2-column CoNLL test file.")
|
44 |
+
parser.add_argument("--batch_size", type=int, default=16)
|
45 |
+
parser.add_argument("--max_length", type=int, default=256)
|
46 |
+
args = parser.parse_args()
|
47 |
+
|
48 |
+
assert os.path.exists(args.model_dir), f"Model dir not found: {args.model_dir}"
|
49 |
+
assert os.path.exists(args.test_path), f"Test file not found: {args.test_path}"
|
50 |
+
|
51 |
+
# Load tokenizer & model
|
52 |
+
print(f"Loading model from: {args.model_dir}")
|
53 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_dir, use_fast=True)
|
54 |
+
model = AutoModelForTokenClassification.from_pretrained(args.model_dir)
|
55 |
+
model.eval()
|
56 |
+
|
57 |
+
# Labels
|
58 |
+
id2label = model.config.id2label
|
59 |
+
label2id = model.config.label2id
|
60 |
+
labels_sorted = [id2label[i] for i in range(len(id2label))]
|
61 |
+
print(f"Model labels: {labels_sorted}")
|
62 |
+
|
63 |
+
# Read test set
|
64 |
+
print(f"Reading test set: {args.test_path}")
|
65 |
+
tokens_list, tags_list = read_conll_2col(args.test_path)
|
66 |
+
num_sents = len(tokens_list)
|
67 |
+
num_tokens = sum(len(s) for s in tokens_list)
|
68 |
+
print(f"Loaded {num_sents} sentences / {num_tokens} tokens")
|
69 |
+
|
70 |
+
# Sanity check: test labels should be subset of model labels
|
71 |
+
uniq_test_labels = sorted({t for seq in tags_list for t in seq})
|
72 |
+
missing = [t for t in uniq_test_labels if t not in label2id]
|
73 |
+
if missing:
|
74 |
+
print(f"⚠️ Warning: test labels not in model: {missing}")
|
75 |
+
|
76 |
+
# Build a simple dataset for batching convenience
|
77 |
+
ds = Dataset.from_dict({"tokens": tokens_list, "ner_tags": tags_list})
|
78 |
+
|
79 |
+
# Device
|
80 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
81 |
+
model.to(device)
|
82 |
+
|
83 |
+
|
84 |
+
# Evaluation loop (word-piece alignment; label only first subword)
|
85 |
+
all_preds: List[List[str]] = []
|
86 |
+
all_refs: List[List[str]] = []
|
87 |
+
|
88 |
+
# iterate in batches
|
89 |
+
for start in range(0, len(ds), args.batch_size):
|
90 |
+
batch = ds[start : start + args.batch_size]
|
91 |
+
batch_tokens = batch["tokens"] # List[List[str]]
|
92 |
+
batch_refs = batch["ner_tags"] # List[List[str]]
|
93 |
+
|
94 |
+
# Tokenize as split words so we can map back using fast tokenizer encodings
|
95 |
+
encodings = tokenizer(
|
96 |
+
batch_tokens,
|
97 |
+
is_split_into_words=True,
|
98 |
+
truncation=True,
|
99 |
+
max_length=args.max_length,
|
100 |
+
return_tensors="pt",
|
101 |
+
padding=True,
|
102 |
+
)
|
103 |
+
|
104 |
+
with torch.no_grad():
|
105 |
+
logits = model(
|
106 |
+
input_ids=encodings["input_ids"].to(device),
|
107 |
+
attention_mask=encodings["attention_mask"].to(device),
|
108 |
+
token_type_ids=encodings.get("token_type_ids", None).to(device) if "token_type_ids" in encodings else None,
|
109 |
+
).logits # (bsz, seq_len, num_labels)
|
110 |
+
|
111 |
+
pred_ids = logits.argmax(dim=-1).cpu().numpy() # (bsz, seq_len)
|
112 |
+
|
113 |
+
# Recover word-level predictions using encodings.word_ids()
|
114 |
+
for i, word_labels in enumerate(batch_refs):
|
115 |
+
encoding = encodings.encodings[i] # fast tokenizer encoding
|
116 |
+
word_ids = encoding.word_ids # list[Optional[int]] aligned to tokens
|
117 |
+
seq_pred_ids = pred_ids[i]
|
118 |
+
|
119 |
+
word_level_preds: List[str] = []
|
120 |
+
seen_word = None
|
121 |
+
for tok_idx, wid in enumerate(word_ids):
|
122 |
+
if wid is None:
|
123 |
+
continue
|
124 |
+
if wid != seen_word:
|
125 |
+
# first subword of this word → take prediction
|
126 |
+
label_id = int(seq_pred_ids[tok_idx])
|
127 |
+
word_level_preds.append(id2label[label_id])
|
128 |
+
seen_word = wid
|
129 |
+
else:
|
130 |
+
# subsequent subwords → skip (standard NER eval)
|
131 |
+
continue
|
132 |
+
|
133 |
+
# Trim to same length as references (in case of truncation)
|
134 |
+
L = min(len(word_labels), len(word_level_preds))
|
135 |
+
all_refs.append(word_labels[:L])
|
136 |
+
all_preds.append(word_level_preds[:L])
|
137 |
+
|
138 |
+
# Metrics
|
139 |
+
p = precision_score(all_refs, all_preds)
|
140 |
+
r = recall_score(all_refs, all_preds)
|
141 |
+
f1 = f1_score(all_refs, all_preds)
|
142 |
+
|
143 |
+
print("\n Results on test set")
|
144 |
+
print(f"Precision: {p:.4f}")
|
145 |
+
print(f"Recall : {r:.4f}")
|
146 |
+
print(f"F1 : {f1:.4f}")
|
147 |
+
|
148 |
+
print("\nSeqeval classification report")
|
149 |
+
print(classification_report(all_refs, all_preds, digits=4))
|
150 |
+
|
151 |
+
|
152 |
+
if __name__ == "__main__":
|
153 |
+
main()
|
outputs/bert-base-cased-timeNER/$null
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Could Not Find C:\Users\ASUS_GAMER\Desktop\time-ner\outputs\bert-base-cased-timeNER\scheduler.pt
|
outputs/bert-base-cased-timeNER/.gitattributes
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
outputs/bert-base-cased-timeNER/config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForTokenClassification"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"classifier_dropout": null,
|
7 |
+
"gradient_checkpointing": false,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"id2label": {
|
12 |
+
"0": "O",
|
13 |
+
"1": "B-T",
|
14 |
+
"2": "I-T"
|
15 |
+
},
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 3072,
|
18 |
+
"label2id": {
|
19 |
+
"B-T": 1,
|
20 |
+
"I-T": 2,
|
21 |
+
"O": 0
|
22 |
+
},
|
23 |
+
"layer_norm_eps": 1e-12,
|
24 |
+
"max_position_embeddings": 512,
|
25 |
+
"model_type": "bert",
|
26 |
+
"num_attention_heads": 12,
|
27 |
+
"num_hidden_layers": 12,
|
28 |
+
"pad_token_id": 0,
|
29 |
+
"position_embedding_type": "absolute",
|
30 |
+
"torch_dtype": "float32",
|
31 |
+
"transformers_version": "4.55.2",
|
32 |
+
"type_vocab_size": 2,
|
33 |
+
"use_cache": true,
|
34 |
+
"vocab_size": 28996
|
35 |
+
}
|
outputs/bert-base-cased-timeNER/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf94fabfbf186a48a92b67db00d0d3ed563161882a28173d297c898b386d0644
|
3 |
+
size 430911284
|
outputs/bert-base-cased-timeNER/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
outputs/bert-base-cased-timeNER/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/bert-base-cased-timeNER/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": false,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": false,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "[PAD]",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
outputs/bert-base-cased-timeNER/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:294714db0ea3f4506c5f8a1f7cbff32000dc44762fc37d2c21d7f5de1fc2136c
|
3 |
+
size 5368
|
outputs/bert-base-cased-timeNER/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers>=4.42.0
|
2 |
+
datasets>=2.19.0
|
3 |
+
torch>=2.1.0
|
4 |
+
accelerate>=0.31.0
|
train.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import random
|
4 |
+
from typing import List, Tuple
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
from datasets import Dataset
|
9 |
+
from transformers import (
|
10 |
+
AutoTokenizer,
|
11 |
+
AutoModelForTokenClassification,
|
12 |
+
DataCollatorForTokenClassification,
|
13 |
+
TrainingArguments,
|
14 |
+
Trainer,
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
def set_seed(seed: int = 42):
|
19 |
+
random.seed(seed)
|
20 |
+
np.random.seed(seed)
|
21 |
+
torch.manual_seed(seed)
|
22 |
+
torch.cuda.manual_seed_all(seed)
|
23 |
+
|
24 |
+
def read_conll_2col(path: str) -> Tuple[List[List[str]], List[List[str]]]:
|
25 |
+
"""
|
26 |
+
Reads a 2-column CoNLL file:
|
27 |
+
TOKEN TAG
|
28 |
+
Blank line separates sentences.
|
29 |
+
Returns (tokens_per_sentence, tags_per_sentence).
|
30 |
+
"""
|
31 |
+
toks, labs = [], []
|
32 |
+
all_toks, all_labs = [], []
|
33 |
+
with open(path, "r", encoding="utf-8") as f:
|
34 |
+
for line in f:
|
35 |
+
line = line.rstrip("\n")
|
36 |
+
if not line:
|
37 |
+
if toks:
|
38 |
+
all_toks.append(toks)
|
39 |
+
all_labs.append(labs)
|
40 |
+
toks, labs = [], []
|
41 |
+
continue
|
42 |
+
parts = line.split()
|
43 |
+
if len(parts) < 2:
|
44 |
+
# tolerate malformed lines
|
45 |
+
continue
|
46 |
+
tok, tag = parts[0], parts[-1]
|
47 |
+
toks.append(tok)
|
48 |
+
labs.append(tag)
|
49 |
+
if toks:
|
50 |
+
all_toks.append(toks)
|
51 |
+
all_labs.append(labs)
|
52 |
+
return all_toks, all_labs
|
53 |
+
|
54 |
+
def build_label_maps(tags: List[List[str]]):
|
55 |
+
"""
|
56 |
+
Build label list & maps from training tags only.
|
57 |
+
Ensures 'O' is index 0 for convenience.
|
58 |
+
"""
|
59 |
+
uniq = set()
|
60 |
+
for seq in tags:
|
61 |
+
uniq.update(seq)
|
62 |
+
labels = ["O"] + sorted([x for x in uniq if x != "O"])
|
63 |
+
label2id = {l: i for i, l in enumerate(labels)}
|
64 |
+
id2label = {i: l for l, i in label2id.items()}
|
65 |
+
return labels, label2id, id2label
|
66 |
+
|
67 |
+
|
68 |
+
def main():
|
69 |
+
parser = argparse.ArgumentParser()
|
70 |
+
parser.add_argument("--train_path", type=str, default="data/train.conll")
|
71 |
+
parser.add_argument("--output_dir", type=str, default="outputs/bert-base-cased-timeNER")
|
72 |
+
parser.add_argument("--model_name", type=str, default="bert-base-cased",
|
73 |
+
help="Backbone (paper used BERT base cased).")
|
74 |
+
parser.add_argument("--epochs", type=int, default=3)
|
75 |
+
parser.add_argument("--lr", type=float, default=5e-5)
|
76 |
+
parser.add_argument("--batch_size", type=int, default=16)
|
77 |
+
parser.add_argument("--max_length", type=int, default=256,
|
78 |
+
help="Increase to 512 for longer sentences if needed.")
|
79 |
+
parser.add_argument("--label_all_tokens", action="store_true",
|
80 |
+
help="If set, label all wordpieces; default labels only first subword.")
|
81 |
+
parser.add_argument("--seed", type=int, default=42)
|
82 |
+
args = parser.parse_args()
|
83 |
+
|
84 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
85 |
+
set_seed(args.seed)
|
86 |
+
|
87 |
+
# 1) Load training data
|
88 |
+
tokens, tags = read_conll_2col(args.train_path)
|
89 |
+
labels, label2id, id2label = build_label_maps(tags)
|
90 |
+
print(f"Labels: {labels}")
|
91 |
+
|
92 |
+
# 2) Hugging Face tokenizer/model
|
93 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_fast=True)
|
94 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
95 |
+
args.model_name,
|
96 |
+
num_labels=len(labels),
|
97 |
+
id2label=id2label,
|
98 |
+
label2id=label2id,
|
99 |
+
)
|
100 |
+
|
101 |
+
# 3) Build Dataset
|
102 |
+
ds_train = Dataset.from_dict({"tokens": tokens, "ner_tags": tags})
|
103 |
+
|
104 |
+
def encode_batch(batch):
|
105 |
+
tokenized = tokenizer(
|
106 |
+
batch["tokens"],
|
107 |
+
is_split_into_words=True,
|
108 |
+
truncation=True,
|
109 |
+
max_length=args.max_length,
|
110 |
+
)
|
111 |
+
aligned_labels = []
|
112 |
+
for i, word_labels in enumerate(batch["ner_tags"]):
|
113 |
+
word_ids = tokenized.word_ids(batch_index=i)
|
114 |
+
prev_word = None
|
115 |
+
label_ids = []
|
116 |
+
for wid in word_ids:
|
117 |
+
if wid is None:
|
118 |
+
label_ids.append(-100) # ignore in loss
|
119 |
+
elif wid != prev_word:
|
120 |
+
label_ids.append(label2id[word_labels[wid]])
|
121 |
+
else:
|
122 |
+
# subword piece
|
123 |
+
if args.label_all_tokens:
|
124 |
+
# Optional: convert B- to I- for subsequent pieces if using BIO
|
125 |
+
lab = word_labels[wid]
|
126 |
+
if lab.startswith("B-"):
|
127 |
+
lab = "I-" + lab[2:]
|
128 |
+
label_ids.append(label2id.get(lab, label2id[word_labels[wid]]))
|
129 |
+
else:
|
130 |
+
label_ids.append(-100)
|
131 |
+
prev_word = wid
|
132 |
+
aligned_labels.append(label_ids)
|
133 |
+
tokenized["labels"] = aligned_labels
|
134 |
+
return tokenized
|
135 |
+
|
136 |
+
ds_train = ds_train.map(encode_batch, batched=True, remove_columns=["tokens", "ner_tags"])
|
137 |
+
|
138 |
+
# 4) TrainingArguments (no eval)
|
139 |
+
training_args = TrainingArguments(
|
140 |
+
output_dir=args.output_dir,
|
141 |
+
num_train_epochs=args.epochs,
|
142 |
+
learning_rate=args.lr,
|
143 |
+
per_device_train_batch_size=args.batch_size,
|
144 |
+
per_device_eval_batch_size=args.batch_size,
|
145 |
+
warmup_ratio=0.06,
|
146 |
+
weight_decay=0.01,
|
147 |
+
logging_steps=50,
|
148 |
+
save_steps=1000, # adjust to your dataset size
|
149 |
+
save_total_limit=2,
|
150 |
+
fp16=torch.cuda.is_available(),
|
151 |
+
report_to="none",
|
152 |
+
gradient_accumulation_steps=1,
|
153 |
+
seed=args.seed,
|
154 |
+
)
|
155 |
+
|
156 |
+
data_collator = DataCollatorForTokenClassification(tokenizer)
|
157 |
+
|
158 |
+
# 5) Train
|
159 |
+
trainer = Trainer(
|
160 |
+
model=model,
|
161 |
+
args=training_args,
|
162 |
+
train_dataset=ds_train,
|
163 |
+
tokenizer=tokenizer,
|
164 |
+
data_collator=data_collator,
|
165 |
+
)
|
166 |
+
|
167 |
+
trainer.train()
|
168 |
+
|
169 |
+
# 6) Save final model (for reuse in attacks)
|
170 |
+
trainer.save_model(args.output_dir)
|
171 |
+
tokenizer.save_pretrained(args.output_dir)
|
172 |
+
|
173 |
+
|
174 |
+
if __name__ == "__main__":
|
175 |
+
main()
|
upload_model.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pip install --upgrade huggingface_hub
|
2 |
+
from huggingface_hub import login, create_repo, upload_folder, HfApi
|
3 |
+
|
4 |
+
# === EDIT THESE IF NEEDED ===
|
5 |
+
REPO_ID = "mdg-nlp/updated-time-ner-bert-base-cased" # target HF repo
|
6 |
+
LOCAL_OUTPUTS = r"C:\Users\ASUS_GAMER\Desktop\time-ner\updated-time-ner-bert-base-cased\outputs" # local folder to upload
|
7 |
+
USE_DATASETS_SECTION = False # set True if this repo should be under "Datasets"
|
8 |
+
# ============================
|
9 |
+
|
10 |
+
repo_type = "dataset" if USE_DATASETS_SECTION else "model"
|
11 |
+
|
12 |
+
# 1) Auth (or use: login(token="hf_..."))
|
13 |
+
login()
|
14 |
+
|
15 |
+
# 2) Ensure repo exists
|
16 |
+
create_repo(repo_id=REPO_ID, repo_type=repo_type, exist_ok=True)
|
17 |
+
|
18 |
+
# 3) Force re-upload LOCAL_OUTPUTS -> repo path "outputs"
|
19 |
+
# delete=True removes remote /outputs first so it exactly mirrors your local folder.
|
20 |
+
print("Uploading. This may take a while for large .safetensors/.bin files...")
|
21 |
+
upload_folder(
|
22 |
+
folder_path=LOCAL_OUTPUTS,
|
23 |
+
repo_id=REPO_ID,
|
24 |
+
repo_type=repo_type,
|
25 |
+
path_in_repo="outputs",
|
26 |
+
commit_message="Force re-upload of outputs (weights + tokenizer)",
|
27 |
+
delete=True,
|
28 |
+
# If you want to limit what gets uploaded, uncomment:
|
29 |
+
# allow_patterns=["*.safetensors", "*.bin", "*.json", "*.txt", "*.model", "*.vocab", "*.merges"],
|
30 |
+
# ignore_patterns=["checkpoint*/", "runs/", "logs/", "*.tmp"],
|
31 |
+
)
|
32 |
+
|
33 |
+
print("✅ Upload complete.")
|
34 |
+
|
35 |
+
# 4) Verify by listing files now on the repo under /outputs
|
36 |
+
api = HfApi()
|
37 |
+
files = api.list_repo_files(REPO_ID, repo_type=repo_type, revision="main")
|
38 |
+
outputs_files = [f for f in files if f.startswith("outputs/")]
|
39 |
+
|
40 |
+
print("\nRemote files under /outputs:")
|
41 |
+
for f in outputs_files:
|
42 |
+
print(" -", f)
|
43 |
+
|
44 |
+
if not outputs_files:
|
45 |
+
print("⚠️ No files found under /outputs on the remote. Double-check LOCAL_OUTPUTS path and permissions.")
|