Update app.py
Browse files
app.py
CHANGED
@@ -3,13 +3,17 @@ import pandas as pd
|
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
-
# Define the path
|
7 |
MODEL_PATH = "models/mistral-7b-instruct-v0.1.Q2_K.gguf"
|
8 |
|
9 |
-
# Create
|
10 |
os.makedirs("models", exist_ok=True)
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Load the quantized model (CPU)
|
15 |
llm = Llama(
|
|
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
+
# Define the model path
|
7 |
MODEL_PATH = "models/mistral-7b-instruct-v0.1.Q2_K.gguf"
|
8 |
|
9 |
+
# Create models directory if not exists
|
10 |
os.makedirs("models", exist_ok=True)
|
11 |
+
|
12 |
+
# If the model file doesn't exist, you can download it.
|
13 |
+
# (This download should ideally be done manually or during local testing, not in the Hugging Face Space runtime)
|
14 |
+
if not os.path.exists(MODEL_PATH):
|
15 |
+
print("Downloading GGUF model...")
|
16 |
+
# Use subprocess or any method to download the model file here if needed
|
17 |
|
18 |
# Load the quantized model (CPU)
|
19 |
llm = Llama(
|