Kongfha's picture
debug int temperature
135eff8
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
model_name = "Kongfha/PhraAphaiManee-LM"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
nlp = pipeline("text-generation",
model=model,
tokenizer=tokenizer)
def generate(input_sentence, top_k=50, temperature=1.0, max_length=140):
generated_text = nlp(input_sentence,
max_length=int(max_length),
do_sample=True,
top_k=int(top_k),
temperature=float(temperature))
return generated_text[0]['generated_text']
inputs = [
gr.inputs.Textbox(label="Input Sentence"),
gr.inputs.Number(default=50, label="Top K"),
gr.inputs.Slider(minimum=0.1, maximum=2.0, default=1.0, label="Temperature", step=0.1),
gr.inputs.Number(default=140, label="Max Length")
]
outputs = gr.outputs.Textbox(label="Generated Text")
examples = [
["๏ เรือล่อง", 50, 1.0, 60],
["๏ แม้นชีวี", 30, 0.8, 60],
["๏ หากวันใด", 50, 1.0, 60],
["๏ หากจำเป็น", 70, 1.5, 60]
]
iface = gr.Interface(
fn=generate,
inputs=inputs,
outputs=outputs,
examples=examples,
title="PhraAphaiManee-LM (แต่งกลอนสไตล์พระอภัยมณี ด้วย GPT-2)",
description="โมเดลนี้เป็นโมเดล GPT-2 ที่ถูกเทรนบนชุดข้อมูลพระอภัยมณี"
)
iface.launch()