satyaiyer commited on
Commit
4b313d4
·
verified ·
1 Parent(s): 4e71c8f

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -57
app.py DELETED
@@ -1,57 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import gradio as gr
4
- from llama_cpp import Llama
5
-
6
- # Path to GGUF file
7
- MODEL_PATH = "./models/llama-3-8b-instruct.Q4_K_M.gguf"
8
-
9
- # Load quantized model with llama-cpp
10
- llm = Llama(
11
- model_path=MODEL_PATH,
12
- n_ctx=2048,
13
- n_threads=8,
14
- verbose=False
15
- )
16
-
17
- def build_prompt(source, translation):
18
- return (
19
- f"<|begin_of_text|><|system|>\n"
20
- "You are a helpful assistant that evaluates translation quality. "
21
- "Score the quality from 0 (worst) to 1 (best).\n"
22
- "<|user|>\n"
23
- f"Original: {source}\nTranslation: {translation}\n"
24
- "How good is the translation?\n<|assistant|>"
25
- )
26
-
27
- def estimate_score(source, translation):
28
- prompt = build_prompt(source, translation)
29
- output = llm(prompt, max_tokens=10, stop=["</s>", "\n"])
30
- text = output["choices"][0]["text"].strip()
31
-
32
- try:
33
- score = float([s for s in text.split() if s.replace('.', '', 1).isdigit()][-1])
34
- score = round(score, 3)
35
- except:
36
- score = "N/A"
37
- return score
38
-
39
- def process_file(file):
40
- df = pd.read_csv(file.name, sep="\t")
41
- scores = []
42
- for _, row in df.iterrows():
43
- score = estimate_score(row["original"], row["translation"])
44
- scores.append(score)
45
- df["predicted_score"] = scores
46
- return df
47
-
48
- demo = gr.Interface(
49
- fn=process_file,
50
- inputs=gr.File(label="Upload dev.tsv with 'original' and 'translation' columns"),
51
- outputs=gr.Dataframe(),
52
- title="LLaMA 3 8B (Quantized) MT QE",
53
- description="Translation Quality Estimation using LLaMA-3-8B GGUF via llama-cpp-python on CPU"
54
- )
55
-
56
- if __name__ == "__main__":
57
- demo.launch()