File size: 4,575 Bytes
172eebd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
import zipfile

import findfile
import requests
import torch
from omnigenbench import (
    ClassificationMetric,
    OmniTokenizer,
    OmniModelForSequenceClassification,
    OmniDatasetForSequenceClassification,
    Trainer,
)




def download_te_dataset(local_dir):
    if not findfile.find_cwd_dir(local_dir, disable_alert=True):
        os.makedirs(local_dir, exist_ok=True)
    url_to_download = "https://huggingface.co/datasets/yangheng/translation_efficiency_prediction/resolve/main/translation_efficiency_prediction.zip"
    zip_path = os.path.join(local_dir, "te_rice_dataset.zip")
    if not os.path.exists(zip_path):
        print(f"Downloading te_rice_dataset.zip from {url_to_download}...")
        response = requests.get(url_to_download, stream=True)
        response.raise_for_status()

        with open(zip_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        print(f"Downloaded {zip_path}")

    # Unzip the dataset if the zip file exists
    ZIP_DATASET = findfile.find_cwd_file("te_rice_dataset.zip")
    if ZIP_DATASET:
        with zipfile.ZipFile(ZIP_DATASET, 'r') as zip_ref:
            zip_ref.extractall(local_dir)
        print(f"Extracted te_rice_dataset.zip into {local_dir}")
        os.remove(ZIP_DATASET)
    else:
        print("te_rice_dataset.zip not found. Skipping extraction.")


class TEClassificationDataset(OmniDatasetForSequenceClassification):
    def __init__(self, data_source, tokenizer, max_length, **kwargs):
        super().__init__(data_source, tokenizer, max_length, **kwargs)

    def prepare_input(self, instance, **kwargs):
        sequence, labels = instance["sequence"], instance["label"]

        tokenized_inputs = self.tokenizer(
            sequence,
            padding=True,
            truncation=True,
            max_length=self.max_length,
            return_tensors="pt",
            **kwargs
        )
        tokenized_inputs["labels"] = torch.tensor(int(labels), dtype=torch.long)
        # Remove the batch dimension that gets added by return_tensors="pt"
        for col in tokenized_inputs:
            tokenized_inputs[col] = tokenized_inputs[col].squeeze(0)

        if labels is not None:
            label_id = self.label2id.get(str(labels), -100)
            tokenized_inputs["labels"] = torch.tensor(label_id, dtype=torch.long)

        return tokenized_inputs

def run_finetuning(

    model_name,

    train_file,

    valid_file,

    test_file,

    label2id,

    epochs,

    learning_rate,

    weight_decay,

    batch_size,

    max_length,

    seed,

):
    """

    Runs the full TE classification analysis pipeline.

    """
    # 1. Model & Tokenizer Initialization
    tokenizer = OmniTokenizer.from_pretrained(model_name, trust_remote_code=True)
    ssp_model = OmniModelForSequenceClassification(
        model_name,
        tokenizer=tokenizer,
        label2id=label2id,
        trust_remote_code=True,
    )
    print(f"Model '{model_name}' and tokenizer loaded successfully.")

    # 2. Data Loading & Preparation
    train_set = TEClassificationDataset(data_source=train_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)
    valid_set = TEClassificationDataset(data_source=valid_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)
    test_set = TEClassificationDataset(data_source=test_file, tokenizer=tokenizer, label2id=label2id, max_length=max_length)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size)
    print("Datasets and DataLoaders created.")

    # 3. Training & Evaluation Setup
    compute_metrics = [ClassificationMetric(ignore_y=-100, average="macro").f1_score]
    optimizer = torch.optim.AdamW(ssp_model.parameters(), lr=learning_rate, weight_decay=weight_decay)

    trainer = Trainer(
        model=ssp_model,
        train_loader=train_loader,
        eval_loader=valid_loader,
        test_loader=test_loader,
        batch_size=batch_size,
        epochs=epochs,
        optimizer=optimizer,
        compute_metrics=compute_metrics,
        seeds=seed,
    )

    # 4. Run Training
    metrics = trainer.train()
    trainer.save_model("finetuned_te_model")
    print("Training completed!")

    return metrics