Daemontatox commited on
Commit
f97b7b0
·
verified ·
1 Parent(s): 24c1dce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,14 +5,14 @@ import torch
5
  from threading import Thread
6
  import re
7
 
8
- phi4_model_path = "Daemontatox/Grifflet-0.6B"
9
 
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
 
12
  phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, device_map="auto", torch_dtype="auto")
13
  phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
14
 
15
- @spaces.GPU(duration=60)
16
  def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state):
17
  if not user_message.strip():
18
  return history_state, history_state
 
5
  from threading import Thread
6
  import re
7
 
8
+ phi4_model_path = "Daemontatox/Qwen3-14B-Griffon"
9
 
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
 
12
  phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, device_map="auto", torch_dtype="auto")
13
  phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
14
 
15
+ @spaces.GPU(duration=120)
16
  def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state):
17
  if not user_message.strip():
18
  return history_state, history_state