Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
@@ -1,532 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
"""
|
3 |
-
app.py – Quranic Data Training Pipeline Endpoint for ZeroGPU Spaces
|
4 |
-
--------------------------------------------------------------------
|
5 |
-
This script integrates a full Quranic data processing and training pipeline
|
6 |
-
into a Gradio interface endpoint. It is optimized for CPU-based training on
|
7 |
-
Hugging Face ZeroGPU (using the Gradio SDK) and uses chunked incremental
|
8 |
-
training, memory management, and gradient checkpointing to efficiently update
|
9 |
-
Google’s Gemma-2B model with Quranic data.
|
10 |
-
|
11 |
-
Requirements:
|
12 |
-
- Hugging Face Transformers, Gradio, PyTorch, psutil
|
13 |
-
- Hugging Face PRO account with ZeroGPU enabled (make sure to add your HF token
|
14 |
-
as a secret named HF_TOKEN in your Space settings)
|
15 |
-
- Ubuntu CPU/Linux with access to ZeroGPU hardware via Spaces
|
16 |
-
- Source data in "source_files" directory
|
17 |
-
- Sufficient storage in "working_directory"
|
18 |
-
|
19 |
-
Author: [M-Saddam Hussain]
|
20 |
-
Date: February 2025
|
21 |
-
Data References: [Tanzil.net, IslamSource, QuranicCorpus]
|
22 |
-
"""
|
23 |
-
|
24 |
-
import json
|
25 |
-
import logging
|
26 |
-
import os
|
27 |
-
import sys
|
28 |
-
import traceback
|
29 |
-
import gc
|
30 |
-
import time
|
31 |
-
import psutil
|
32 |
-
import math
|
33 |
-
from datetime import datetime
|
34 |
-
from typing import Dict, List, Optional
|
35 |
-
from dataclasses import dataclass, asdict
|
36 |
-
|
37 |
-
import torch
|
38 |
-
# Limit PyTorch threads for CPU stability.
|
39 |
-
torch.set_num_threads(8)
|
40 |
-
|
41 |
-
from torch.utils.data import Dataset
|
42 |
-
from transformers import (
|
43 |
-
AutoTokenizer,
|
44 |
-
AutoModelForCausalLM,
|
45 |
-
TrainingArguments,
|
46 |
-
Trainer,
|
47 |
-
DataCollatorForLanguageModeling
|
48 |
-
)
|
49 |
-
from threading import Lock
|
50 |
-
|
51 |
-
# Import Gradio and spaces module for ZeroGPU.
|
52 |
-
import gradio as gr
|
53 |
-
import spaces
|
54 |
-
|
55 |
-
# Configure logging
|
56 |
-
logging.basicConfig(
|
57 |
-
level=logging.INFO,
|
58 |
-
format='%(asctime)s - %(levelname)s - %(message)s',
|
59 |
-
handlers=[
|
60 |
-
logging.FileHandler('pipeline.log'),
|
61 |
-
logging.StreamHandler()
|
62 |
-
]
|
63 |
-
)
|
64 |
-
logger = logging.getLogger(__name__)
|
65 |
-
|
66 |
-
def manage_memory(threshold_percent: int = 90, min_available_mb: int = 500, sleep_duration: int = 10):
|
67 |
-
"""
|
68 |
-
Check memory usage; if usage is high or available memory is low,
|
69 |
-
force garbage collection and sleep briefly.
|
70 |
-
"""
|
71 |
-
vm = psutil.virtual_memory()
|
72 |
-
used_percent = vm.percent
|
73 |
-
available_mb = vm.available / (1024 * 1024)
|
74 |
-
logger.info(f"Memory usage: {used_percent}% used, {available_mb:.2f} MB available")
|
75 |
-
if used_percent > threshold_percent or available_mb < min_available_mb:
|
76 |
-
logger.warning("High memory usage detected, forcing garbage collection and sleeping...")
|
77 |
-
gc.collect()
|
78 |
-
time.sleep(sleep_duration)
|
79 |
-
|
80 |
-
@dataclass
|
81 |
-
class WordAnalysis:
|
82 |
-
"""Structured representation of word-level analysis"""
|
83 |
-
arabic: str
|
84 |
-
translation: str
|
85 |
-
position: str
|
86 |
-
morphology: Dict
|
87 |
-
features: List[str]
|
88 |
-
root: str
|
89 |
-
location: str
|
90 |
-
metadata: Dict
|
91 |
-
|
92 |
-
@dataclass
|
93 |
-
class VerseData:
|
94 |
-
"""Structured representation of verse-level data"""
|
95 |
-
chapter: int
|
96 |
-
verse: int
|
97 |
-
arabic_text: str
|
98 |
-
translation: str
|
99 |
-
words: List[WordAnalysis]
|
100 |
-
metadata: Dict
|
101 |
-
|
102 |
-
class QuranicDataset(Dataset):
|
103 |
-
"""Custom dataset for Quranic text training."""
|
104 |
-
def __init__(self, processed_data: List[Dict], tokenizer):
|
105 |
-
self.examples = []
|
106 |
-
self.tokenizer = tokenizer
|
107 |
-
for verse_data in processed_data:
|
108 |
-
self.examples.extend(self._create_training_examples(verse_data))
|
109 |
-
|
110 |
-
def _create_training_examples(self, verse_data: Dict) -> List[Dict]:
|
111 |
-
examples = []
|
112 |
-
text_block = (
|
113 |
-
f"[VERSE {verse_data['chapter']}:{verse_data['verse']}]\n"
|
114 |
-
f"Arabic: {verse_data['arabic_text']}\n"
|
115 |
-
f"Translation: {verse_data['translation']}\n"
|
116 |
-
"Morphological Analysis:\n"
|
117 |
-
)
|
118 |
-
for word in verse_data['words']:
|
119 |
-
text_block += (
|
120 |
-
f"[WORD] {word['arabic']}\n"
|
121 |
-
f"Root: {word['root']}\n"
|
122 |
-
f"Features: {', '.join(word['features'])}\n"
|
123 |
-
)
|
124 |
-
examples.append(self._format_example(text_block))
|
125 |
-
return examples
|
126 |
-
|
127 |
-
def _format_example(self, text: str) -> Dict:
|
128 |
-
encodings = self.tokenizer(
|
129 |
-
text,
|
130 |
-
truncation=True,
|
131 |
-
max_length=64, # Reduced length to lower memory usage.
|
132 |
-
padding="max_length",
|
133 |
-
return_tensors="pt"
|
134 |
-
)
|
135 |
-
return {
|
136 |
-
"input_ids": encodings["input_ids"][0],
|
137 |
-
"attention_mask": encodings["attention_mask"][0]
|
138 |
-
}
|
139 |
-
|
140 |
-
def __len__(self):
|
141 |
-
return len(self.examples)
|
142 |
-
|
143 |
-
def __getitem__(self, idx):
|
144 |
-
return self.examples[idx]
|
145 |
-
|
146 |
-
class QuranicDataProcessor:
|
147 |
-
"""Processes Quranic data into structured training examples."""
|
148 |
-
def __init__(self, source_dir: str, output_dir: str):
|
149 |
-
self.source_dir = source_dir
|
150 |
-
self.output_dir = output_dir
|
151 |
-
self.morphological_data: Dict[str, Dict] = {}
|
152 |
-
self.word_by_word_data: Dict[str, List[str]] = {}
|
153 |
-
self.translation_data: Dict[str, str] = {}
|
154 |
-
self.processed_verses = set()
|
155 |
-
self.processing_lock = Lock()
|
156 |
-
os.makedirs(output_dir, exist_ok=True)
|
157 |
-
os.makedirs(os.path.join(output_dir, 'json'), exist_ok=True)
|
158 |
-
os.makedirs(os.path.join(output_dir, 'txt'), exist_ok=True)
|
159 |
-
os.makedirs(os.path.join(output_dir, 'checkpoints'), exist_ok=True)
|
160 |
-
logger.info(f"Initialized processor with source dir: {source_dir}")
|
161 |
-
|
162 |
-
def load_source_files(self) -> bool:
|
163 |
-
"""Loads morphological, translation, and word-by-word data."""
|
164 |
-
try:
|
165 |
-
logger.info("Loading morphological data...")
|
166 |
-
morph_path = os.path.join(self.source_dir, 'quranic-corpus-morphology-0.4.txt')
|
167 |
-
with open(morph_path, 'r', encoding='utf-8') as f:
|
168 |
-
next(f)
|
169 |
-
for line in f:
|
170 |
-
if line.strip() and not line.startswith('#'):
|
171 |
-
parts = line.strip().split('\t')
|
172 |
-
if len(parts) >= 4:
|
173 |
-
location = parts[0].strip('()')
|
174 |
-
self.morphological_data[location] = {
|
175 |
-
'form': parts[1],
|
176 |
-
'tag': parts[2],
|
177 |
-
'features': parts[3]
|
178 |
-
}
|
179 |
-
logger.info(f"Loaded {len(self.morphological_data)} morphological entries")
|
180 |
-
logger.info("Loading translation data...")
|
181 |
-
trans_path = os.path.join(self.source_dir, 'en.sample.quran-maududi.txt')
|
182 |
-
with open(trans_path, 'r', encoding='utf-8') as f:
|
183 |
-
next(f)
|
184 |
-
for line in f:
|
185 |
-
if line.strip():
|
186 |
-
parts = line.strip().split('|')
|
187 |
-
if len(parts) >= 3:
|
188 |
-
key = f"{parts[0]}:{parts[1]}"
|
189 |
-
self.translation_data[key] = parts[2].strip()
|
190 |
-
logger.info(f"Loaded {len(self.translation_data)} verse translations")
|
191 |
-
logger.info("Loading word-by-word data...")
|
192 |
-
word_path = os.path.join(self.source_dir, 'en.w4w.qurandev.txt')
|
193 |
-
with open(word_path, 'r', encoding='utf-8-sig') as f:
|
194 |
-
lines = [line.strip() for line in f if line.strip()]
|
195 |
-
sorted_keys = sorted(self.translation_data.keys(), key=lambda x: (int(x.split(':')[0]), int(x.split(':')[1])))
|
196 |
-
if len(lines) != len(sorted_keys):
|
197 |
-
logger.warning("Mismatch between word-by-word file and translation data")
|
198 |
-
for i, verse_key in enumerate(sorted_keys):
|
199 |
-
if i < len(lines):
|
200 |
-
words = [w.strip() for w in lines[i].split('|') if w.strip()]
|
201 |
-
self.word_by_word_data[verse_key] = words
|
202 |
-
logger.info(f"Loaded word-by-word data for {len(self.word_by_word_data)} verses")
|
203 |
-
return True
|
204 |
-
except Exception as e:
|
205 |
-
logger.error(f"Error loading source files: {str(e)}")
|
206 |
-
logger.error(traceback.format_exc())
|
207 |
-
return False
|
208 |
-
|
209 |
-
def process_verse(self, chapter: int, verse: int) -> Optional[VerseData]:
|
210 |
-
"""Processes a single verse into structured format."""
|
211 |
-
try:
|
212 |
-
verse_ref = f"{chapter}:{verse}"
|
213 |
-
logger.info(f"Processing verse {verse_ref}")
|
214 |
-
translation = self.translation_data.get(verse_ref)
|
215 |
-
if not translation:
|
216 |
-
logger.warning(f"No translation for verse {verse_ref}")
|
217 |
-
return None
|
218 |
-
verse_word_list = self.word_by_word_data.get(verse_ref, [])
|
219 |
-
if not verse_word_list:
|
220 |
-
logger.warning(f"No word-by-word data for verse {verse_ref}")
|
221 |
-
return None
|
222 |
-
verse_words: List[WordAnalysis] = []
|
223 |
-
arabic_text = ""
|
224 |
-
for pos in range(1, len(verse_word_list) + 1):
|
225 |
-
pattern = f"{chapter}:{verse}:{pos}:"
|
226 |
-
matching_entries = [data for loc, data in self.morphological_data.items() if loc.startswith(pattern)]
|
227 |
-
if not matching_entries:
|
228 |
-
logger.debug(f"No morphological data for {pattern}")
|
229 |
-
continue
|
230 |
-
combined_form = " ".join(entry['form'] for entry in matching_entries)
|
231 |
-
combined_features = []
|
232 |
-
root = ""
|
233 |
-
for entry in matching_entries:
|
234 |
-
features = entry['features'].split('|')
|
235 |
-
combined_features.extend(features)
|
236 |
-
if not root:
|
237 |
-
for f in features:
|
238 |
-
if 'ROOT:' in f:
|
239 |
-
root = f.split('ROOT:')[1]
|
240 |
-
break
|
241 |
-
word_translation = verse_word_list[pos - 1]
|
242 |
-
word = WordAnalysis(
|
243 |
-
arabic=combined_form,
|
244 |
-
translation=word_translation,
|
245 |
-
position=str(pos),
|
246 |
-
morphology=matching_entries[0],
|
247 |
-
features=combined_features,
|
248 |
-
root=root,
|
249 |
-
location=f"{chapter}:{verse}:{pos}",
|
250 |
-
metadata={}
|
251 |
-
)
|
252 |
-
verse_words.append(word)
|
253 |
-
arabic_text += f" {combined_form}"
|
254 |
-
verse_data = VerseData(
|
255 |
-
chapter=chapter,
|
256 |
-
verse=verse,
|
257 |
-
arabic_text=arabic_text.strip(),
|
258 |
-
translation=translation,
|
259 |
-
words=verse_words,
|
260 |
-
metadata={
|
261 |
-
"processed_timestamp": datetime.now().isoformat(),
|
262 |
-
"word_count": len(verse_words)
|
263 |
-
}
|
264 |
-
)
|
265 |
-
self._save_verse_data(verse_data)
|
266 |
-
return verse_data
|
267 |
-
except Exception as e:
|
268 |
-
logger.error(f"Error processing verse {chapter}:{verse}: {str(e)}")
|
269 |
-
logger.error(traceback.format_exc())
|
270 |
-
return None
|
271 |
-
|
272 |
-
def _save_verse_data(self, verse_data: VerseData):
|
273 |
-
"""Saves processed verse data as JSON and TXT."""
|
274 |
-
try:
|
275 |
-
verse_ref = f"{verse_data.chapter}:{verse_data.verse}"
|
276 |
-
json_path = os.path.join(self.output_dir, 'json', f'verse_{verse_ref.replace(":", "_")}.json')
|
277 |
-
with open(json_path, 'w', encoding='utf-8') as f:
|
278 |
-
json.dump(asdict(verse_data), f, ensure_ascii=False, indent=2)
|
279 |
-
txt_path = os.path.join(self.output_dir, 'txt', f'verse_{verse_ref.replace(":", "_")}.txt')
|
280 |
-
with open(txt_path, 'w', encoding='utf-8') as f:
|
281 |
-
f.write(f"=== Verse {verse_ref} ===\n\n")
|
282 |
-
f.write(f"Arabic Text:\n{verse_data.arabic_text}\n\n")
|
283 |
-
f.write(f"Translation:\n{verse_data.translation}\n\n")
|
284 |
-
f.write("Word Analysis:\n")
|
285 |
-
for i, word in enumerate(verse_data.words, 1):
|
286 |
-
f.write(f"\nWord {i}:\n")
|
287 |
-
f.write(f" Arabic: {word.arabic}\n")
|
288 |
-
f.write(f" Translation: {word.translation}\n")
|
289 |
-
f.write(f" Root: {word.root}\n")
|
290 |
-
f.write(" Features:\n")
|
291 |
-
for feature in word.features:
|
292 |
-
f.write(f" - {feature}\n")
|
293 |
-
f.write("\n")
|
294 |
-
logger.info(f"Saved verse data to {json_path} and {txt_path}")
|
295 |
-
except Exception as e:
|
296 |
-
logger.error(f"Error saving verse data: {str(e)}")
|
297 |
-
logger.error(traceback.format_exc())
|
298 |
-
|
299 |
-
class QuranicModelTrainer:
|
300 |
-
"""Trains the Gemma-2B model on Quranic data using chunked incremental updates."""
|
301 |
-
def __init__(self,
|
302 |
-
model_name: str = "google/gemma-2-2b",
|
303 |
-
processed_data_dir: str = "processed_data",
|
304 |
-
checkpoint_dir: str = "checkpoints"):
|
305 |
-
self.processed_data_dir = processed_data_dir
|
306 |
-
self.checkpoint_dir = checkpoint_dir
|
307 |
-
self.device = "cpu" # Training on CPU; ZeroGPU will handle GPU access.
|
308 |
-
logger.info("Loading tokenizer and model...")
|
309 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
310 |
-
model_name,
|
311 |
-
use_auth_token=os.environ.get("HF_TOKEN"),
|
312 |
-
additional_special_tokens=["[VERSE]", "[WORD]", "[ROOT]", "[FEATURES]"],
|
313 |
-
trust_remote_code=True
|
314 |
-
)
|
315 |
-
self.tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
316 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
317 |
-
model_name,
|
318 |
-
use_auth_token=os.environ.get("HF_TOKEN"),
|
319 |
-
torch_dtype=torch.float32,
|
320 |
-
low_cpu_mem_usage=True,
|
321 |
-
trust_remote_code=True
|
322 |
-
)
|
323 |
-
self.model.resize_token_embeddings(len(self.tokenizer))
|
324 |
-
self.model.train()
|
325 |
-
# Disable caching and enable gradient checkpointing for memory savings.
|
326 |
-
self.model.config.use_cache = False
|
327 |
-
self.model.gradient_checkpointing_enable()
|
328 |
-
|
329 |
-
def prepare_training_data(self, chapter_data: List[Dict]) -> Dataset:
|
330 |
-
"""Creates a QuranicDataset from processed chapter data."""
|
331 |
-
return QuranicDataset(chapter_data, self.tokenizer)
|
332 |
-
|
333 |
-
def train_chapter(self,
|
334 |
-
chapter_num: int,
|
335 |
-
processed_verses: List[Dict],
|
336 |
-
chunk_size: int = 10,
|
337 |
-
num_train_epochs: int = 10,
|
338 |
-
per_device_train_batch_size: int = 1,
|
339 |
-
learning_rate: float = 3e-5,
|
340 |
-
weight_decay: float = 0.01,
|
341 |
-
gradient_accumulation_steps: int = 64) -> bool:
|
342 |
-
"""
|
343 |
-
Splits chapter data into chunks and trains incrementally to reduce memory usage.
|
344 |
-
"""
|
345 |
-
try:
|
346 |
-
total_examples = len(processed_verses)
|
347 |
-
total_chunks = math.ceil(total_examples / chunk_size)
|
348 |
-
logger.info(f"Chapter {chapter_num}: {total_examples} examples, {total_chunks} chunks.")
|
349 |
-
for chunk_index in range(total_chunks):
|
350 |
-
chunk_data = processed_verses[chunk_index * chunk_size: (chunk_index + 1) * chunk_size]
|
351 |
-
dataset = self.prepare_training_data(chunk_data)
|
352 |
-
chunk_output_dir = os.path.join(self.checkpoint_dir, f"chapter_{chapter_num}", f"chunk_{chunk_index}")
|
353 |
-
os.makedirs(chunk_output_dir, exist_ok=True)
|
354 |
-
training_args = TrainingArguments(
|
355 |
-
output_dir=chunk_output_dir,
|
356 |
-
overwrite_output_dir=True,
|
357 |
-
num_train_epochs=num_train_epochs,
|
358 |
-
per_device_train_batch_size=per_device_train_batch_size,
|
359 |
-
learning_rate=learning_rate,
|
360 |
-
weight_decay=weight_decay,
|
361 |
-
gradient_accumulation_steps=gradient_accumulation_steps,
|
362 |
-
fp16=False,
|
363 |
-
remove_unused_columns=False,
|
364 |
-
logging_steps=50,
|
365 |
-
report_to="none",
|
366 |
-
evaluation_strategy="no",
|
367 |
-
no_cuda=True,
|
368 |
-
dataloader_num_workers=0,
|
369 |
-
dataloader_pin_memory=False
|
370 |
-
)
|
371 |
-
data_collator = DataCollatorForLanguageModeling(
|
372 |
-
tokenizer=self.tokenizer,
|
373 |
-
mlm=False
|
374 |
-
)
|
375 |
-
trainer = Trainer(
|
376 |
-
model=self.model,
|
377 |
-
args=training_args,
|
378 |
-
train_dataset=dataset,
|
379 |
-
tokenizer=self.tokenizer,
|
380 |
-
data_collator=data_collator
|
381 |
-
)
|
382 |
-
logger.info(f"Training chunk {chunk_index+1}/{total_chunks} for Chapter {chapter_num}...")
|
383 |
-
trainer.train()
|
384 |
-
trainer.save_model(chunk_output_dir)
|
385 |
-
del trainer, dataset
|
386 |
-
gc.collect()
|
387 |
-
manage_memory()
|
388 |
-
logger.info(f"Completed training for Chapter {chapter_num}")
|
389 |
-
return True
|
390 |
-
except Exception as e:
|
391 |
-
logger.error(f"Error training chapter {chapter_num}: {str(e)}")
|
392 |
-
logger.error(traceback.format_exc())
|
393 |
-
return False
|
394 |
-
|
395 |
-
class QuranicPipeline:
|
396 |
-
"""Integrates data processing and incremental model training for all chapters."""
|
397 |
-
def __init__(self,
|
398 |
-
source_dir: str = os.path.abspath(os.path.dirname(__file__)),
|
399 |
-
working_dir: str = "working_directory",
|
400 |
-
start_chapter: int = 1,
|
401 |
-
end_chapter: int = 114):
|
402 |
-
self.source_dir = source_dir
|
403 |
-
self.working_dir = working_dir
|
404 |
-
self.start_chapter = start_chapter
|
405 |
-
self.end_chapter = end_chapter
|
406 |
-
self.setup_directories()
|
407 |
-
global logger
|
408 |
-
logger = logging.getLogger(__name__)
|
409 |
-
self.state = {
|
410 |
-
"last_processed_chapter": 0,
|
411 |
-
"last_trained_chapter": 0,
|
412 |
-
"current_state": "initialized",
|
413 |
-
"errors": [],
|
414 |
-
"start_time": datetime.now().isoformat()
|
415 |
-
}
|
416 |
-
self.load_state()
|
417 |
-
try:
|
418 |
-
logger.info("Initializing Quranic Data Processor...")
|
419 |
-
self.processor = QuranicDataProcessor(
|
420 |
-
source_dir=self.source_dir,
|
421 |
-
output_dir=os.path.join(self.working_dir, "processed_data")
|
422 |
-
)
|
423 |
-
logger.info("Initializing Quranic Model Trainer...")
|
424 |
-
self.trainer = QuranicModelTrainer(
|
425 |
-
model_name="google/gemma-2-2b",
|
426 |
-
processed_data_dir=os.path.join(self.working_dir, "processed_data"),
|
427 |
-
checkpoint_dir=os.path.join(self.working_dir, "checkpoints")
|
428 |
-
)
|
429 |
-
self.state["current_state"] = "ready"
|
430 |
-
self.save_state()
|
431 |
-
except Exception as e:
|
432 |
-
self.handle_error("Initialization failed", e)
|
433 |
-
raise
|
434 |
-
|
435 |
-
def setup_directories(self):
|
436 |
-
dirs = [
|
437 |
-
self.working_dir,
|
438 |
-
os.path.join(self.working_directory, "processed_data"),
|
439 |
-
os.path.join(self.working_dir, "checkpoints"),
|
440 |
-
os.path.join(self.working_dir, "logs"),
|
441 |
-
os.path.join(self.working_dir, "state")
|
442 |
-
]
|
443 |
-
for d in dirs:
|
444 |
-
os.makedirs(d, exist_ok=True)
|
445 |
-
|
446 |
-
def load_state(self):
|
447 |
-
state_file = os.path.join(self.working_dir, "state", "pipeline_state.json")
|
448 |
-
if os.path.exists(state_file):
|
449 |
-
try:
|
450 |
-
with open(state_file, 'r') as f:
|
451 |
-
saved_state = json.load(f)
|
452 |
-
self.state.update(saved_state)
|
453 |
-
logger.info(f"Loaded previous state: Last processed chapter {self.state.get('last_processed_chapter')}, "
|
454 |
-
f"last trained chapter {self.state.get('last_trained_chapter')}")
|
455 |
-
except Exception as e:
|
456 |
-
logger.warning(f"Could not load previous state: {str(e)}")
|
457 |
-
|
458 |
-
def save_state(self):
|
459 |
-
state_file = os.path.join(self.working_dir, "state", "pipeline_state.json")
|
460 |
-
with open(state_file, 'w') as f:
|
461 |
-
json.dump(self.state, f, indent=2)
|
462 |
-
|
463 |
-
def handle_error(self, context: str, error: Exception):
|
464 |
-
error_detail = {
|
465 |
-
"timestamp": datetime.now().isoformat(),
|
466 |
-
"context": context,
|
467 |
-
"error": str(error),
|
468 |
-
"traceback": traceback.format_exc()
|
469 |
-
}
|
470 |
-
self.state.setdefault("errors", []).append(error_detail)
|
471 |
-
logger.error(f"{context}: {str(error)}")
|
472 |
-
self.save_state()
|
473 |
-
|
474 |
-
def run_pipeline(self):
|
475 |
-
"""Runs processing and training for chapters sequentially."""
|
476 |
-
logger.info("Starting pipeline execution")
|
477 |
-
try:
|
478 |
-
if not self.processor.load_source_files():
|
479 |
-
raise Exception("Failed to load source files")
|
480 |
-
for chapter in range(self.start_chapter, self.end_chapter + 1):
|
481 |
-
logger.info(f"=== Processing Chapter {chapter} ===")
|
482 |
-
processed_chapter_data = []
|
483 |
-
verse = 1
|
484 |
-
while True:
|
485 |
-
verse_data = self.processor.process_verse(chapter, verse)
|
486 |
-
if verse_data is None:
|
487 |
-
break
|
488 |
-
processed_chapter_data.append(asdict(verse_data))
|
489 |
-
verse += 1
|
490 |
-
if processed_chapter_data:
|
491 |
-
success = self.trainer.train_chapter(chapter, processed_chapter_data)
|
492 |
-
if not success:
|
493 |
-
logger.error(f"Training failed for Chapter {chapter}. Stopping pipeline.")
|
494 |
-
break
|
495 |
-
self.state["last_trained_chapter"] = chapter
|
496 |
-
self.save_state()
|
497 |
-
else:
|
498 |
-
logger.warning(f"No processed data for Chapter {chapter}")
|
499 |
-
self.state["last_processed_chapter"] = chapter
|
500 |
-
self.save_state()
|
501 |
-
manage_memory()
|
502 |
-
logger.info("Pipeline execution completed")
|
503 |
-
except Exception as e:
|
504 |
-
self.handle_error("Pipeline execution failed", e)
|
505 |
-
raise
|
506 |
-
|
507 |
-
# Define a Gradio endpoint function that triggers the training pipeline.
|
508 |
-
@spaces.GPU() # Request ZeroGPU hardware.
|
509 |
-
def start_pipeline():
|
510 |
-
try:
|
511 |
-
pipeline = QuranicPipeline(
|
512 |
-
source_dir="source_files",
|
513 |
-
working_dir="working_directory",
|
514 |
-
start_chapter=1,
|
515 |
-
end_chapter=114
|
516 |
-
)
|
517 |
-
pipeline.run_pipeline()
|
518 |
-
return "Pipeline execution completed successfully."
|
519 |
-
except Exception as e:
|
520 |
-
return f"Pipeline execution failed: {str(e)}"
|
521 |
-
|
522 |
-
# Create a Gradio Interface with no inputs and a text output.
|
523 |
-
iface = gr.Interface(
|
524 |
-
fn=start_pipeline,
|
525 |
-
inputs=[],
|
526 |
-
outputs="text",
|
527 |
-
title="Quranic Training Pipeline",
|
528 |
-
description="Click 'Submit' to trigger the Quranic data processing and training pipeline on ZeroGPU."
|
529 |
-
)
|
530 |
-
|
531 |
-
if __name__ == "__main__":
|
532 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|