Kal1510 commited on
Commit
3c00b89
·
verified ·
1 Parent(s): d3a157d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -35,8 +35,8 @@ import subprocess
35
 
36
  subprocess.run([
37
  "huggingface-cli", "download",
38
- "TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
39
- "mistral-7b-instruct-v0.1.Q2_K.gguf",
40
  "--local-dir", "./models",
41
  "--local-dir-use-symlinks", "False"
42
  ], check=True)
@@ -58,7 +58,7 @@ embeddings = HuggingFaceEmbeddings(
58
  # Load Mistral GGUF via llama.cpp (CPU optimized)
59
  # ------------------------------
60
  llm_cpp = Llama(
61
- model_path="./models/mistral-7b-instruct-v0.1.Q2_K.gguf",
62
  n_ctx=2048,
63
  n_threads=4, # Adjust based on your CPU cores
64
  n_gpu_layers=0, # Force CPU-only
 
35
 
36
  subprocess.run([
37
  "huggingface-cli", "download",
38
+ "microsoft/Phi-3-mini-4k-instruct-gguf",
39
+ "Phi-3-mini-4k-instruct-gguf",
40
  "--local-dir", "./models",
41
  "--local-dir-use-symlinks", "False"
42
  ], check=True)
 
58
  # Load Mistral GGUF via llama.cpp (CPU optimized)
59
  # ------------------------------
60
  llm_cpp = Llama(
61
+ model_path="./models/Phi-3-mini-4k-instruct-gguf",
62
  n_ctx=2048,
63
  n_threads=4, # Adjust based on your CPU cores
64
  n_gpu_layers=0, # Force CPU-only