Upload utils.py
Browse files
utils.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import zipfile
|
3 |
+
|
4 |
+
import findfile
|
5 |
+
import requests
|
6 |
+
import torch
|
7 |
+
from omnigenbench import (
|
8 |
+
ClassificationMetric,
|
9 |
+
OmniTokenizer,
|
10 |
+
OmniModelForSequenceClassification,
|
11 |
+
OmniDatasetForSequenceClassification,
|
12 |
+
Trainer,
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
def download_te_dataset(local_dir):
|
19 |
+
if not findfile.find_cwd_dir(local_dir, disable_alert=True):
|
20 |
+
os.makedirs(local_dir, exist_ok=True)
|
21 |
+
url_to_download = "https://huggingface.co/datasets/yangheng/translation_efficiency_prediction/resolve/main/translation_efficiency_prediction.zip"
|
22 |
+
zip_path = os.path.join(local_dir, "te_rice_dataset.zip")
|
23 |
+
if not os.path.exists(zip_path):
|
24 |
+
print(f"Downloading te_rice_dataset.zip from {url_to_download}...")
|
25 |
+
response = requests.get(url_to_download, stream=True)
|
26 |
+
response.raise_for_status()
|
27 |
+
|
28 |
+
with open(zip_path, 'wb') as f:
|
29 |
+
for chunk in response.iter_content(chunk_size=8192):
|
30 |
+
f.write(chunk)
|
31 |
+
print(f"Downloaded {zip_path}")
|
32 |
+
|
33 |
+
# Unzip the dataset if the zip file exists
|
34 |
+
ZIP_DATASET = findfile.find_cwd_file("te_rice_dataset.zip")
|
35 |
+
if ZIP_DATASET:
|
36 |
+
with zipfile.ZipFile(ZIP_DATASET, 'r') as zip_ref:
|
37 |
+
zip_ref.extractall(local_dir)
|
38 |
+
print(f"Extracted te_rice_dataset.zip into {local_dir}")
|
39 |
+
os.remove(ZIP_DATASET)
|
40 |
+
else:
|
41 |
+
print("te_rice_dataset.zip not found. Skipping extraction.")
|
42 |
+
|
43 |
+
|
44 |
+
class TEClassificationDataset(OmniDatasetForSequenceClassification):
|
45 |
+
def __init__(self, data_source, tokenizer, max_length, **kwargs):
|
46 |
+
super().__init__(data_source, tokenizer, max_length, **kwargs)
|
47 |
+
|
48 |
+
def prepare_input(self, instance, **kwargs):
|
49 |
+
sequence, labels = instance["sequence"], instance["label"]
|
50 |
+
|
51 |
+
tokenized_inputs = self.tokenizer(
|
52 |
+
sequence,
|
53 |
+
padding=True,
|
54 |
+
truncation=True,
|
55 |
+
max_length=self.max_length,
|
56 |
+
return_tensors="pt",
|
57 |
+
**kwargs
|
58 |
+
)
|
59 |
+
tokenized_inputs["labels"] = torch.tensor(int(labels), dtype=torch.long)
|
60 |
+
# Remove the batch dimension that gets added by return_tensors="pt"
|
61 |
+
for col in tokenized_inputs:
|
62 |
+
tokenized_inputs[col] = tokenized_inputs[col].squeeze(0)
|
63 |
+
|
64 |
+
if labels is not None:
|
65 |
+
label_id = self.label2id.get(str(labels), -100)
|
66 |
+
tokenized_inputs["labels"] = torch.tensor(label_id, dtype=torch.long)
|
67 |
+
|
68 |
+
return tokenized_inputs
|
69 |
+
|
70 |
+
def run_finetuning(
|
71 |
+
model_name,
|
72 |
+
train_file,
|
73 |
+
valid_file,
|
74 |
+
test_file,
|
75 |
+
label2id,
|
76 |
+
epochs,
|
77 |
+
learning_rate,
|
78 |
+
weight_decay,
|
79 |
+
batch_size,
|
80 |
+
max_length,
|
81 |
+
seed,
|
82 |
+
):
|
83 |
+
"""
|
84 |
+
Runs the full TE classification analysis pipeline.
|
85 |
+
"""
|
86 |
+
# 1. Model & Tokenizer Initialization
|
87 |
+
tokenizer = OmniTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
88 |
+
ssp_model = OmniModelForSequenceClassification(
|
89 |
+
model_name,
|
90 |
+
tokenizer=tokenizer,
|
91 |
+
label2id=label2id,
|
92 |
+
trust_remote_code=True,
|
93 |
+
)
|
94 |
+
print(f"Model '{model_name}' and tokenizer loaded successfully.")
|
95 |
+
|
96 |
+
# 2. Data Loading & Preparation
|
97 |
+
train_set = TEClassificationDataset(data_source=train_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)
|
98 |
+
valid_set = TEClassificationDataset(data_source=valid_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)
|
99 |
+
test_set = TEClassificationDataset(data_source=test_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)
|
100 |
+
|
101 |
+
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
|
102 |
+
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size)
|
103 |
+
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size)
|
104 |
+
print("Datasets and DataLoaders created.")
|
105 |
+
|
106 |
+
# 3. Training & Evaluation Setup
|
107 |
+
compute_metrics = [ClassificationMetric(ignore_y=-100, average="macro").f1_score]
|
108 |
+
optimizer = torch.optim.AdamW(ssp_model.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
109 |
+
|
110 |
+
trainer = Trainer(
|
111 |
+
model=ssp_model,
|
112 |
+
train_loader=train_loader,
|
113 |
+
eval_loader=valid_loader,
|
114 |
+
test_loader=test_loader,
|
115 |
+
batch_size=batch_size,
|
116 |
+
epochs=epochs,
|
117 |
+
optimizer=optimizer,
|
118 |
+
compute_metrics=compute_metrics,
|
119 |
+
seeds=seed,
|
120 |
+
)
|
121 |
+
|
122 |
+
# 4. Run Training
|
123 |
+
metrics = trainer.train()
|
124 |
+
trainer.save_model("finetuned_te_model")
|
125 |
+
print("Training completed!")
|
126 |
+
|
127 |
+
return metrics
|
128 |
+
|
129 |
+
|