SameerJugno commited on
Commit
8ceda8f
·
verified ·
1 Parent(s): 771fb48

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
3
+ from peft import PeftModel, PeftConfig
4
+
5
+ # Load tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained(".")
7
+
8
+ # Load base model with quantization
9
+ bnb_config = BitsAndBytesConfig(load_in_4bit=True)
10
+ base_model = AutoModelForCausalLM.from_pretrained(
11
+ "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # same base you fine-tuned
12
+ quantization_config=bnb_config,
13
+ device_map="auto"
14
+ )
15
+
16
+ # Load LoRA adapters
17
+ model = PeftModel.from_pretrained(base_model, ".")
18
+
19
+ # Create Gradio Interface
20
+ def generate_response(prompt):
21
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
22
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
23
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+
25
+ gr.Interface(
26
+ fn=generate_response,
27
+ inputs=gr.Textbox(label="Enter your instruction"),
28
+ outputs=gr.Textbox(label="Model response"),
29
+ title="LLaMA 3 - Fine-tuned Model"
30
+ ).launch()