unausagi commited on
Commit
3eecdbe
·
verified ·
1 Parent(s): b025d20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -17,10 +17,13 @@ def load_model(model_path):
17
  model_path,
18
  trust_remote_code=True,
19
  token=HF_TOKEN,
 
20
  device_map="auto", # 讓 transformers 自動決定使用 CPU/GPU
 
21
  )
22
  return model, tokenizer
23
 
 
24
  # 預設載入 DeepSeek-V3
25
  current_model, current_tokenizer = load_model("deepseek-ai/DeepSeek-V3")
26
 
@@ -33,7 +36,8 @@ def chat(message, history, model_name):
33
  if model_name != current_model:
34
  current_model, current_tokenizer = load_model(model_name)
35
 
36
- inputs = current_tokenizer(message, return_tensors="pt").to("cuda")
 
37
  outputs = current_model.generate(**inputs, max_length=1024)
38
  response = current_tokenizer.decode(outputs[0], skip_special_tokens=True)
39
 
 
17
  model_path,
18
  trust_remote_code=True,
19
  token=HF_TOKEN,
20
+ torch_dtype=torch.float16, # 強制 FP16,避免 FP8 問題
21
  device_map="auto", # 讓 transformers 自動決定使用 CPU/GPU
22
+ revision="main"
23
  )
24
  return model, tokenizer
25
 
26
+
27
  # 預設載入 DeepSeek-V3
28
  current_model, current_tokenizer = load_model("deepseek-ai/DeepSeek-V3")
29
 
 
36
  if model_name != current_model:
37
  current_model, current_tokenizer = load_model(model_name)
38
 
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
+ inputs = current_tokenizer(message, return_tensors="pt").to(device)
41
  outputs = current_model.generate(**inputs, max_length=1024)
42
  response = current_tokenizer.decode(outputs[0], skip_special_tokens=True)
43