stevenArtificial's picture
Update app.py
94912c5 verified
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
base_model = "stevenArtificial/Babaru-Llama-3.2-1B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(base_model)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.float16
)
model = AutoModelForCausalLM.from_pretrained(
base_model,
quantization_config=quantization_config,
device_map="auto"
)
def generate(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
iface = gr.Interface(
fn=generate,
inputs=gr.Textbox(lines=4, label="Prompt"),
outputs=gr.Textbox(label="Response"),
title="Babaru LLaMA-3.2-1B-Instruct Chatbot",
description="Interactive demo using Babaru fine-tuned LLaMA-3.2-1B-Instruct by stevenArtificial."
)
iface.launch()