satyaiyer commited on
Commit
7eecac8
·
verified ·
1 Parent(s): c58c20a

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +63 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import gradio as gr
4
+ from llama_cpp import Llama
5
+
6
+ MODEL_PATH = "./models/mistral-7b-instruct-v0.1.Q2_K.gguf"
7
+
8
+ # Create models folder and placeholder
9
+ os.makedirs("models", exist_ok=True)
10
+ with open("models/.keep", "w") as f:
11
+ f.write("placeholder")
12
+
13
+ # Load quantized Mistral model
14
+ llm = Llama(
15
+ model_path=MODEL_PATH,
16
+ n_ctx=2048,
17
+ n_threads=8,
18
+ verbose=False
19
+ )
20
+
21
+ def build_prompt(source, translation):
22
+ return (
23
+ f"<|system|>
24
+ "
25
+ "You are a helpful assistant that evaluates translation quality. "
26
+ "Score the quality from 0 (worst) to 1 (best).
27
+ "
28
+ "<|user|>
29
+ "
30
+ f"Original: {source}\nTranslation: {translation}\n"
31
+ "How good is the translation?\n<|assistant|>"
32
+ )
33
+
34
+ def estimate_score(source, translation):
35
+ prompt = build_prompt(source, translation)
36
+ output = llm(prompt, max_tokens=10, stop=["</s>", "\n"])
37
+ text = output["choices"][0]["text"].strip()
38
+ try:
39
+ score = float([s for s in text.split() if s.replace('.', '', 1).isdigit()][-1])
40
+ score = round(score, 3)
41
+ except:
42
+ score = "N/A"
43
+ return score
44
+
45
+ def process_file(file):
46
+ df = pd.read_csv(file.name, sep="\t")
47
+ scores = []
48
+ for _, row in df.iterrows():
49
+ score = estimate_score(row["original"], row["translation"])
50
+ scores.append(score)
51
+ df["predicted_score"] = scores
52
+ return df
53
+
54
+ demo = gr.Interface(
55
+ fn=process_file,
56
+ inputs=gr.File(label="Upload dev.tsv with 'original' and 'translation' columns"),
57
+ outputs=gr.Dataframe(),
58
+ title="Mistral 7B Q2_K MT QE",
59
+ description="Translation Quality Estimation using Mistral-7B-Instruct Q2_K GGUF via llama-cpp-python on CPU"
60
+ )
61
+
62
+ if __name__ == "__main__":
63
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ llama-cpp-python==0.2.56
2
+ pandas
3
+ gradio