|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
MODEL_NAMES = { |
|
"DeepSeek-V3": "deepseek-ai/DeepSeek-V3", |
|
"DeepSeek-R1": "deepseek-ai/DeepSeek-R1" |
|
} |
|
|
|
def load_model(model_name): |
|
"""載入 Hugging Face 模型與 tokenizer""" |
|
model_path = MODEL_NAMES.get(model_name, "deepseek-ai/DeepSeek-V3") |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16).cuda() |
|
return model, tokenizer |
|
|
|
|
|
current_model, current_tokenizer = load_model("DeepSeek-V3") |
|
|
|
def chat(message, history, model_name): |
|
"""處理聊天訊息""" |
|
global current_model, current_tokenizer |
|
|
|
|
|
if model_name != current_model: |
|
current_model, current_tokenizer = load_model(model_name) |
|
|
|
inputs = current_tokenizer(message, return_tensors="pt").to("cuda") |
|
outputs = current_model.generate(**inputs, max_length=1024) |
|
response = current_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("## Chatbot with DeepSeek Models") |
|
|
|
with gr.Row(): |
|
chat_interface = gr.ChatInterface(chat, streaming=True, save_history=True) |
|
model_selector = gr.Dropdown( |
|
choices=list(MODEL_NAMES.keys()), |
|
value="DeepSeek-V3", |
|
label="Select Model" |
|
) |
|
|
|
chat_interface.append(model_selector) |
|
app.launch() |
|
|