satyaiyer commited on
Commit
0526c20
·
verified ·
1 Parent(s): 99d205d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -3,6 +3,8 @@ import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
  import torch
5
  import os
 
 
6
  bnb_config = BitsAndBytesConfig(load_in_4bit=True)
7
  model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
8
  hf_token = os.environ.get("HF_TOKEN")
@@ -11,13 +13,15 @@ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
11
  model = AutoModelForCausalLM.from_pretrained(
12
  model_name,
13
  quantization_config=bnb_config,
14
- model.to("cpu"),
15
  use_auth_token=hf_token
16
  )
 
17
 
 
18
  def generate_prompt(original, translation):
19
  return f"### Task: Machine Translation Quality Estimation\n\nSource: {original}\nTranslation: {translation}\n\nScore (0-1):"
20
 
 
21
  def predict_scores(file):
22
  df = pd.read_csv(file.name, sep="\t")
23
  scores = []
@@ -40,6 +44,7 @@ def predict_scores(file):
40
  df["predicted_score"] = scores
41
  return df
42
 
 
43
  iface = gr.Interface(
44
  fn=predict_scores,
45
  inputs=gr.File(label="Upload dev.tsv"),
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
  import torch
5
  import os
6
+
7
+ # Load model
8
  bnb_config = BitsAndBytesConfig(load_in_4bit=True)
9
  model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
10
  hf_token = os.environ.get("HF_TOKEN")
 
13
  model = AutoModelForCausalLM.from_pretrained(
14
  model_name,
15
  quantization_config=bnb_config,
 
16
  use_auth_token=hf_token
17
  )
18
+ model.to("cpu") # move model to CPU (if not using GPU Space)
19
 
20
+ # Prompt generator
21
  def generate_prompt(original, translation):
22
  return f"### Task: Machine Translation Quality Estimation\n\nSource: {original}\nTranslation: {translation}\n\nScore (0-1):"
23
 
24
+ # Prediction function
25
  def predict_scores(file):
26
  df = pd.read_csv(file.name, sep="\t")
27
  scores = []
 
44
  df["predicted_score"] = scores
45
  return df
46
 
47
+ # Gradio app
48
  iface = gr.Interface(
49
  fn=predict_scores,
50
  inputs=gr.File(label="Upload dev.tsv"),