File size: 1,439 Bytes
b475953 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
import gradio as gr
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
config = PeftConfig.from_pretrained("clemsadand/quote_generator")
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
model = PeftModel.from_pretrained(base_model, "clemsadand/quote_generator")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
def generate_quote(input_text):
input_tensor = tokenizer(input_text, return_tensors="pt")
output = model.generate(input_tensor["input_ids"], attention_mask=input_tensor["attention_mask"],
max_length=64, num_beams=5, no_repeat_ngram_size=2,
early_stopping=True, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.7)
output = tokenizer.decode(output[0], ski_special_tokens=True, clean_up_tokenization_spaces=True)
return ".".join(output.split(".")[1:-1]).replace("\n", "").replace("->:", "") + "."
input_text = "Generate a quote about kindness with the keywords compassion, empathy, help, generosity, care"
# print(generate_quote(input_text))
input_textbox = gr.Textbox(label="Prompt", placeholder="Generate a quote about kindness with the keywords compassion, empathy, help, generosity, care", lines=2)
output_textbox = gr.Textbox(label="Generated quote", placeholder="", lines=4)
demo = gr.Interface(fn=generate_quote, inputs=input_textbox, outputs=output_textbox)
demo.launch() |