Update app.py
Browse files
app.py
CHANGED
@@ -2,23 +2,15 @@ import os
|
|
2 |
import pandas as pd
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
5 |
-
import os
|
6 |
-
import subprocess
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
|
11 |
-
#
|
12 |
-
|
|
|
|
|
13 |
|
14 |
-
# Download model if it doesn't exist
|
15 |
-
if not os.path.exists(model_path):
|
16 |
-
print("Downloading GGUF model...")
|
17 |
-
subprocess.run([
|
18 |
-
"wget", "-O", model_path,
|
19 |
-
"https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q2_K.gguf?download=true"
|
20 |
-
], check=True)
|
21 |
-
|
22 |
# Load the quantized model (CPU)
|
23 |
llm = Llama(
|
24 |
model_path=MODEL_PATH,
|
|
|
2 |
import pandas as pd
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
|
|
|
|
5 |
|
6 |
+
# Define the path for the model
|
7 |
+
MODEL_PATH = "models/mistral-7b-instruct-v0.1.Q2_K.gguf"
|
8 |
|
9 |
+
# Create the models directory if it doesn't exist
|
10 |
+
os.makedirs("models", exist_ok=True)
|
11 |
+
with open("models/.keep", "w") as f:
|
12 |
+
f.write("placeholder")
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# Load the quantized model (CPU)
|
15 |
llm = Llama(
|
16 |
model_path=MODEL_PATH,
|