XA-vito commited on
Commit
b16edf6
·
verified ·
1 Parent(s): 1808b93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -2,8 +2,7 @@
2
 
3
 
4
 
5
-
6
-
7
  import gradio as gr
8
  import joblib
9
  import numpy as np
@@ -18,14 +17,17 @@ MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.3"
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
  HF_TOKEN = os.getenv("HF_TOKEN") # Obtiene el token de la variable de entorno
20
 
 
 
 
 
21
  print("🔄 Cargando modelo de lenguaje...")
22
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME,
23
- use_auth_token=HF_TOKEN )
24
  model = AutoModelForCausalLM.from_pretrained(
25
  MODEL_NAME,
26
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
27
  device_map="auto",
28
- use_auth_token=HF_TOKEN
29
  ).to(device)
30
 
31
  # Memoria conversacional
@@ -111,3 +113,4 @@ iface = gr.Interface(
111
 
112
  iface.launch()
113
 
 
 
2
 
3
 
4
 
5
+ import accelerate
 
6
  import gradio as gr
7
  import joblib
8
  import numpy as np
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
  HF_TOKEN = os.getenv("HF_TOKEN") # Obtiene el token de la variable de entorno
19
 
20
+ # Verificación de token
21
+ if not HF_TOKEN:
22
+ raise ValueError("❌ ERROR: No se encontró HF_TOKEN. Asegúrate de definirlo en las variables de entorno.")
23
+
24
  print("🔄 Cargando modelo de lenguaje...")
25
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
 
26
  model = AutoModelForCausalLM.from_pretrained(
27
  MODEL_NAME,
28
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
29
  device_map="auto",
30
+ token=HF_TOKEN
31
  ).to(device)
32
 
33
  # Memoria conversacional
 
113
 
114
  iface.launch()
115
 
116
+