Model-QV4 / app.py
Abdul-Basit123's picture
Update app.py
be88a6d verified
raw
history blame contribute delete
648 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_id = "Abdul-Basit123/llama-3-8b-Instruct-bnb-4bit-finetuned"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", title="Model-Q Demo")
iface.launch()