ananddey commited on
Commit
f58072b
·
verified ·
1 Parent(s): 63a78fa

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -30
app.py DELETED
@@ -1,30 +0,0 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
-
4
- # Load model and tokenizer
5
- model_id = "ananddey/gemma-3-ad-finetuned"
6
- tokenizer = AutoTokenizer.from_pretrained(model_id)
7
- model = AutoModelForCausalLM.from_pretrained(model_id)
8
-
9
- def generate_text(prompt, max_length=100, temperature=0.7):
10
- inputs = tokenizer(prompt, return_tensors="pt")
11
- outputs = model.generate(
12
- inputs.input_ids,
13
- max_length=max_length,
14
- temperature=temperature,
15
- do_sample=True,
16
- )
17
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
18
-
19
- # Create Gradio interface
20
- demo = gr.Interface(
21
- fn=generate_text,
22
- inputs=[
23
- gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
24
- gr.Slider(minimum=10, maximum=500, value=100, label="Max Length"),
25
- gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
26
- ],
27
- outputs="text"
28
- )
29
-
30
- demo.launch(share=True)