Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,44 @@
|
|
1 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GemmaTokenizer
|
2 |
import torch
|
3 |
import os
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
10 |
|
11 |
-
# CPU ํ๊ฒฝ์์ ๋ชจ๋ธ์ ๋ก๋ํ๊ณ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ torch.float32๋ฅผ ๋ช
์์ ์ผ๋ก ์ฌ์ฉํ ์ ์์ต๋๋ค.
|
12 |
-
model = model.to(torch.float32)
|
13 |
-
model.eval()
|
14 |
|
15 |
-
# ํ
์คํธ ์์ฑ ํจ์
|
16 |
-
def generate_text(text, max_length=50, do_sample=True, temperature=1.0):
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
|
23 |
-
# Gradio ์ธํฐํ์ด์ค ์์ฑ
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
|
38 |
-
if __name__ == "__main__":
|
39 |
-
|
|
|
|
|
|
|
|
|
|
1 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GemmaTokenizer
|
2 |
import torch
|
3 |
import os
|
4 |
+
import gradio as gr
|
5 |
|
6 |
+
try:
|
7 |
+
# ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋
|
8 |
+
model_id = "kimhyunwoo/gemma2-ko-dialogue-lora-fp16"
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
10 |
+
# AutoTokenizer ๋์ ์ง์ GemmaTokenizer๋ฅผ ๋ก๋ํฉ๋๋ค.
|
11 |
+
tokenizer = GemmaTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
12 |
|
13 |
+
# CPU ํ๊ฒฝ์์ ๋ชจ๋ธ์ ๋ก๋ํ๊ณ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ torch.float32๋ฅผ ๋ช
์์ ์ผ๋ก ์ฌ์ฉํ ์ ์์ต๋๋ค.
|
14 |
+
model = model.to(torch.float32)
|
15 |
+
model.eval()
|
16 |
|
17 |
+
# ํ
์คํธ ์์ฑ ํจ์
|
18 |
+
def generate_text(text, max_length=50, do_sample=True, temperature=1.0):
|
19 |
+
inputs = tokenizer(text, return_tensors="pt")
|
20 |
+
with torch.no_grad():
|
21 |
+
outputs = model.generate(**inputs, max_length=max_length, do_sample=do_sample, temperature=temperature)
|
22 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
23 |
+
return generated_text
|
24 |
|
25 |
+
# Gradio ์ธํฐํ์ด์ค ์์ฑ
|
26 |
+
iface = gr.Interface(
|
27 |
+
fn=generate_text,
|
28 |
+
inputs=[
|
29 |
+
gr.Textbox(lines=5, placeholder="ํ
์คํธ๋ฅผ ์
๋ ฅํ์ธ์..."),
|
30 |
+
gr.Slider(minimum=10, maximum=200, step=1, value=50, label="์ต๋ ๊ธธ์ด"),
|
31 |
+
gr.Checkbox(label="์ํ๋ง"),
|
32 |
+
gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="์จ๋"),
|
33 |
+
],
|
34 |
+
outputs=gr.Textbox(lines=5, label="์์ฑ๋ ํ
์คํธ"),
|
35 |
+
title="Gemma 2 Text Generator",
|
36 |
+
description="Fine-tuned๋ Gemma 2 ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ํ
์คํธ๋ฅผ ์์ฑํฉ๋๋ค.",
|
37 |
+
)
|
|
|
38 |
|
39 |
+
if __name__ == "__main__":
|
40 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|
41 |
+
except ImportError as e:
|
42 |
+
print(f"ImportError ๋ฐ์: {e}")
|
43 |
+
except Exception as e:
|
44 |
+
print(f"์์์น ๋ชปํ ์ค๋ฅ ๋ฐ์: {e}")
|