Datasets:

Modalities:
Image
Text
Formats:
text
Size:
< 1K
Libraries:
Datasets
License:
Firoj112 commited on
Commit
042770c
·
verified ·
1 Parent(s): 15e1ea1

files updation

Browse files
Files changed (2) hide show
  1. gem_trainer.py +248 -0
  2. requirements.txt +6 -0
gem_trainer.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ================================================
2
+ # GEM Model Trainer & Evaluator - Callable Version
3
+ # ================================================
4
+
5
+ import torch
6
+ import torch.quantization
7
+ from transformers import AutoTokenizer, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup
8
+ from sklearn.cluster import MiniBatchKMeans
9
+ from torch.utils.data import DataLoader
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from tqdm import tqdm
13
+ import numpy as np
14
+
15
+ def run_gem_pipeline(
16
+ dataset,
17
+ model_name="bert-base-uncased",
18
+ num_classes=77,
19
+ num_epochs=3,
20
+ batch_size=16,
21
+ learning_rate=2e-5,
22
+ max_seq_length=128,
23
+ gradient_accum_steps=2,
24
+ cluster_size=256,
25
+ threshold=0.65
26
+ ):
27
+ """
28
+ Runs the GEM model training & evaluation pipeline on a custom dataset.
29
+
30
+ Args:
31
+ dataset: HuggingFace DatasetDict or custom dataset (must have 'train' and 'test').
32
+ model_name: Name of the transformer model.
33
+ num_classes: Number of output classes.
34
+ num_epochs: Training epochs.
35
+ batch_size: Batch size for dataloaders.
36
+ learning_rate: Learning rate for optimizer.
37
+ max_seq_length: Max sequence length for tokenizer.
38
+ gradient_accum_steps: Gradient accumulation steps.
39
+ cluster_size: Number of clusters for routing.
40
+ threshold: Routing threshold.
41
+
42
+ Returns:
43
+ final_accuracy: Final evaluation accuracy on test set.
44
+ avg_loss: Average training loss.
45
+ """
46
+
47
+ # ========================
48
+ # Config
49
+ # ========================
50
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
51
+ hidden_size = 768
52
+ num_heads = 12
53
+
54
+ # ========================
55
+ # Tokenizer & Dataloaders
56
+ # ========================
57
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
58
+
59
+ def tokenize_fn(examples):
60
+ return tokenizer(
61
+ examples['text'],
62
+ padding='max_length',
63
+ truncation=True,
64
+ max_length=max_seq_length
65
+ )
66
+
67
+ dataset = dataset.map(tokenize_fn, batched=True)
68
+
69
+ def collate_fn(batch):
70
+ return {
71
+ 'input_ids': torch.stack([torch.tensor(x['input_ids']) for x in batch]),
72
+ 'attention_mask': torch.stack([torch.tensor(x['attention_mask']) for x in batch]),
73
+ 'labels': torch.tensor([x['label'] for x in batch])
74
+ }
75
+
76
+ train_loader = DataLoader(
77
+ dataset['train'],
78
+ batch_size=batch_size,
79
+ shuffle=True,
80
+ collate_fn=collate_fn
81
+ )
82
+
83
+ test_loader = DataLoader(
84
+ dataset['test'],
85
+ batch_size=batch_size,
86
+ collate_fn=collate_fn
87
+ )
88
+
89
+ # ========================
90
+ # GEM Model (Modular)
91
+ # ========================
92
+ class QuantizedBERT(nn.Module):
93
+ def __init__(self):
94
+ super().__init__()
95
+ self.bert = AutoModel.from_pretrained(model_name)
96
+ self.quant = torch.quantization.QuantStub()
97
+ self.dequant = torch.quantization.DeQuantStub()
98
+
99
+ def forward(self, input_ids, attention_mask=None):
100
+ outputs = self.bert(input_ids, attention_mask=attention_mask)
101
+ return self.dequant(self.quant(outputs.last_hidden_state))
102
+
103
+ class TokenRouter(nn.Module):
104
+ def __init__(self):
105
+ super().__init__()
106
+ self.clusterer = MiniBatchKMeans(n_clusters=cluster_size)
107
+ self.W_r = nn.Parameter(torch.randn(num_classes, hidden_size))
108
+ self.threshold = threshold
109
+
110
+ def forward(self, x):
111
+ cluster_input = x.detach().cpu().numpy().reshape(-1, x.shape[-1])
112
+ cluster_ids = self.clusterer.fit_predict(cluster_input)
113
+ cluster_ids = torch.tensor(cluster_ids, device=device).reshape(x.shape[:2])
114
+
115
+ domain_logits = torch.einsum('bsh,nh->bsn', x, self.W_r.to(x.device))
116
+ domain_probs = F.softmax(domain_logits, dim=-1)
117
+ routing_mask = (domain_probs.max(-1).values > self.threshold).long()
118
+
119
+ return domain_probs, routing_mask, cluster_ids
120
+
121
+ class SCAR(nn.Module):
122
+ def __init__(self):
123
+ super().__init__()
124
+ self.num_heads = num_heads
125
+ self.head_dim = hidden_size // num_heads
126
+ self.qkv = nn.Linear(hidden_size, 3 * hidden_size)
127
+ self.out = nn.Linear(hidden_size, hidden_size)
128
+
129
+ def create_mask(self, cluster_ids, routing_mask):
130
+ cluster_mask = (cluster_ids.unsqueeze(-1) == cluster_ids.unsqueeze(-2))
131
+ domain_mask = (routing_mask.unsqueeze(-1) == routing_mask.unsqueeze(-2))
132
+ return cluster_mask | domain_mask
133
+
134
+ def forward(self, x, cluster_ids, routing_mask):
135
+ B, N, _ = x.shape
136
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
137
+ q, k, v = qkv[0], qkv[1], qkv[2]
138
+
139
+ attn = (q @ k.transpose(-2, -1)) / np.sqrt(self.head_dim)
140
+ mask = self.create_mask(cluster_ids, routing_mask).unsqueeze(1)
141
+ attn = attn.masked_fill(~mask, -1e9)
142
+
143
+ attn = F.softmax(attn, dim=-1)
144
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
145
+ return self.out(x)
146
+
147
+ class GEM(nn.Module):
148
+ def __init__(self):
149
+ super().__init__()
150
+ self.bert = QuantizedBERT()
151
+ self.router = TokenRouter()
152
+ self.scar = SCAR()
153
+ self.classifier = nn.Linear(hidden_size, num_classes)
154
+
155
+ self.teacher = AutoModelForSequenceClassification.from_pretrained(
156
+ model_name, num_labels=num_classes
157
+ ).eval().to(device).requires_grad_(False)
158
+
159
+ def forward(self, input_ids, attention_mask=None):
160
+ x = self.bert(input_ids, attention_mask)
161
+ domain_probs, routing_mask, cluster_ids = self.router(x)
162
+ x = self.scar(x, cluster_ids, routing_mask)
163
+ return self.classifier(x[:, 0, :])
164
+
165
+ def qakp_loss(self, outputs, labels, input_ids):
166
+ task_loss = F.cross_entropy(outputs, labels)
167
+ quant_error = F.mse_loss(self.bert.quant(self.bert.dequant(outputs)), outputs)
168
+
169
+ with torch.no_grad():
170
+ teacher_logits = self.teacher(input_ids).logits
171
+
172
+ kd_loss = F.kl_div(
173
+ F.log_softmax(outputs, dim=-1),
174
+ F.softmax(teacher_logits, dim=-1),
175
+ reduction='batchmean'
176
+ )
177
+
178
+ return task_loss + 0.3 * quant_error + 0.7 * kd_loss
179
+
180
+ # ========================
181
+ # Training Setup
182
+ # ========================
183
+ model = GEM().to(device)
184
+
185
+ if torch.cuda.device_count() > 1:
186
+ model = nn.DataParallel(model, device_ids=list(range(torch.cuda.device_count())))
187
+
188
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
189
+ scheduler = get_linear_schedule_with_warmup(
190
+ optimizer,
191
+ num_warmup_steps=100,
192
+ num_training_steps=len(train_loader) * num_epochs
193
+ )
194
+
195
+ # ========================
196
+ # Training Loop
197
+ # ========================
198
+ model.train()
199
+ avg_loss = 0
200
+
201
+ for epoch in range(num_epochs):
202
+ total_loss = 0
203
+
204
+ for step, batch in enumerate(tqdm(train_loader)):
205
+ input_ids = batch['input_ids'].to(device)
206
+ attention_mask = batch['attention_mask'].to(device)
207
+ labels = batch['labels'].to(device)
208
+
209
+ outputs = model(input_ids, attention_mask)
210
+ loss = model.module.qakp_loss(outputs, labels, input_ids) if hasattr(model, 'module') else model.qakp_loss(outputs, labels, input_ids)
211
+
212
+ loss.backward()
213
+
214
+ if (step + 1) % gradient_accum_steps == 0:
215
+ optimizer.step()
216
+ scheduler.step()
217
+ optimizer.zero_grad()
218
+
219
+ total_loss += loss.item()
220
+
221
+ avg_loss = total_loss / len(train_loader)
222
+ print(f"Epoch {epoch+1}/{num_epochs} | Avg Loss: {avg_loss:.4f}")
223
+
224
+ # ========================
225
+ # Evaluation Loop
226
+ # ========================
227
+ model.eval()
228
+ correct = total = 0
229
+
230
+ with torch.no_grad():
231
+ for batch in tqdm(test_loader):
232
+ input_ids = batch['input_ids'].to(device)
233
+ attention_mask = batch['attention_mask'].to(device)
234
+ labels = batch['labels'].to(device)
235
+
236
+ outputs = model(input_ids, attention_mask)
237
+ preds = outputs.argmax(dim=-1)
238
+
239
+ correct += (preds == labels).sum().item()
240
+ total += labels.size(0)
241
+
242
+ final_accuracy = 100 * correct / total
243
+ print(f"Final Accuracy: {final_accuracy:.2f}%")
244
+
245
+ return {
246
+ 'accuracy': final_accuracy,
247
+ 'average_loss': avg_loss
248
+ }
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ scikit-learn
4
+ tqdm
5
+ numpy
6
+ datasets