satyaiyer commited on
Commit
803b570
·
verified ·
1 Parent(s): 325323e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -2,15 +2,23 @@ import os
2
  import pandas as pd
3
  import gradio as gr
4
  from llama_cpp import Llama
 
 
5
 
6
- # Model path
7
- MODEL_PATH = "models/mistral-7b-instruct-v0.1.Q2_K.gguf"
8
-
9
- # Create a models directory with a placeholder
10
  os.makedirs("models", exist_ok=True)
11
- with open("models/.keep", "w") as f:
12
- f.write("placeholder")
13
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Load the quantized model (CPU)
15
  llm = Llama(
16
  model_path=MODEL_PATH,
 
2
  import pandas as pd
3
  import gradio as gr
4
  from llama_cpp import Llama
5
+ import os
6
+ import subprocess
7
 
8
+ # Create models directory if not exists
 
 
 
9
  os.makedirs("models", exist_ok=True)
 
 
10
 
11
+ # Path to model
12
+ model_path = "models/mistral-7b-instruct-v0.1.Q2_K.gguf"
13
+
14
+ # Download model if it doesn't exist
15
+ if not os.path.exists(model_path):
16
+ print("Downloading GGUF model...")
17
+ subprocess.run([
18
+ "wget", "-O", model_path,
19
+ "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q2_K.gguf?download=true"
20
+ ], check=True)
21
+
22
  # Load the quantized model (CPU)
23
  llm = Llama(
24
  model_path=MODEL_PATH,