satyaiyer commited on
Commit
850e11d
·
verified ·
1 Parent(s): 932fc71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -3,14 +3,17 @@ import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
- # Load model and tokenizer
7
- model_name = "meta-llama/Meta-Llama-3.2-3B-Instruct"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(
10
- model_name,
11
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
12
- device_map="auto"
13
- )
 
 
 
14
 
15
  def generate_prompt(original, translation):
16
  return f"### Task: Machine Translation Quality Estimation\n\nSource: {original}\nTranslation: {translation}\n\nScore (0-1):"
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
+ model_name = "your-private-or-protected-model-id"
7
+ hf_token = os.environ.get("HF_TOKEN")
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token)
11
+
12
+ # model = AutoModelForCausalLM.from_pretrained(
13
+ # model_name,
14
+ # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
15
+ # device_map="auto"
16
+ # )
17
 
18
  def generate_prompt(original, translation):
19
  return f"### Task: Machine Translation Quality Estimation\n\nSource: {original}\nTranslation: {translation}\n\nScore (0-1):"