satyaiyer commited on
Commit
c58c20a
·
verified ·
1 Parent(s): 13edc57

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -61
app.py DELETED
@@ -1,61 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import gradio as gr
4
- from llama_cpp import Llama
5
-
6
- # Path to GGUF file
7
- MODEL_PATH = "./models/llama-3-8b-instruct.Q4_K_M.gguf"
8
-
9
- os.makedirs("models", exist_ok=True)
10
- with open("models/.keep", "w") as f:
11
- f.write("placeholder")
12
-
13
- # Load quantized model with llama-cpp
14
- llm = Llama(
15
- model_path=MODEL_PATH,
16
- n_ctx=2048,
17
- n_threads=8,
18
- verbose=False
19
- )
20
-
21
- def build_prompt(source, translation):
22
- return (
23
- f"<|begin_of_text|><|system|>\n"
24
- "You are a helpful assistant that evaluates translation quality. "
25
- "Score the quality from 0 (worst) to 1 (best).\n"
26
- "<|user|>\n"
27
- f"Original: {source}\nTranslation: {translation}\n"
28
- "How good is the translation?\n<|assistant|>"
29
- )
30
-
31
- def estimate_score(source, translation):
32
- prompt = build_prompt(source, translation)
33
- output = llm(prompt, max_tokens=10, stop=["</s>", "\n"])
34
- text = output["choices"][0]["text"].strip()
35
-
36
- try:
37
- score = float([s for s in text.split() if s.replace('.', '', 1).isdigit()][-1])
38
- score = round(score, 3)
39
- except:
40
- score = "N/A"
41
- return score
42
-
43
- def process_file(file):
44
- df = pd.read_csv(file.name, sep="\t")
45
- scores = []
46
- for _, row in df.iterrows():
47
- score = estimate_score(row["original"], row["translation"])
48
- scores.append(score)
49
- df["predicted_score"] = scores
50
- return df
51
-
52
- demo = gr.Interface(
53
- fn=process_file,
54
- inputs=gr.File(label="Upload dev.tsv with 'original' and 'translation' columns"),
55
- outputs=gr.Dataframe(),
56
- title="LLaMA 3 8B (Quantized) MT QE",
57
- description="Translation Quality Estimation using LLaMA-3-8B GGUF via llama-cpp-python on CPU"
58
- )
59
-
60
- if __name__ == "__main__":
61
- demo.launch()