|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
def generate_text(model_name, text, num_beams, max_length, top_p, temperature, repetition_penalty, no_repeat_ngram_size, token): |
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=token) |
|
|
|
|
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
generated_text = pipe(text, |
|
pad_token_id=tokenizer.eos_token_id, |
|
num_beams=num_beams, |
|
max_length=max_length, |
|
top_p=top_p, |
|
temperature=temperature, |
|
repetition_penalty=repetition_penalty, |
|
no_repeat_ngram_size=no_repeat_ngram_size, |
|
truncation=True)[0]['generated_text'] |
|
|
|
return generated_text |
|
|
|
|
|
model_options = ["riotu-lab/ArabianGPT-01B", "riotu-lab/ArabianGPT-03B", "riotu-lab/ArabianGPT-08B-V2"] |
|
inputs_component = [ |
|
gr.Dropdown(choices=model_options, label="Select Model"), |
|
gr.Textbox(lines=2, placeholder="Enter your text here...", label="Input Text"), |
|
gr.Slider(minimum=1, maximum=10, step=1, label="Num Beams"), |
|
gr.Slider(minimum=50, maximum=300, step=10, label="Max Length"), |
|
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Top p"), |
|
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Temperature"), |
|
gr.Slider(minimum=1.0, maximum=5.0, step=0.5, label="Repetition Penalty"), |
|
gr.Slider(minimum=2, maximum=5, step=1, label="No Repeat Ngram Size"), |
|
gr.Textbox(placeholder="Enter your Hugging Face token here...", label="Hugging Face Token", type="password") |
|
] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=inputs_component, |
|
outputs="text", |
|
title="ArabianGPT Playground", |
|
description="Explore the capabilities of ArabianGPT models. Adjust the hyperparameters to see how they affect text generation.", |
|
live=False |
|
) |
|
|
|
|
|
iface.launch() |
|
|