Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,11 @@ from transformers import AutoProcessor, AutoModelForCausalLM, AutoTokenizer
|
|
15 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
16 |
from PIL import Image
|
17 |
|
|
|
|
|
|
|
|
|
|
|
18 |
# Add GPU decorator for Hugging Face Spaces
|
19 |
try:
|
20 |
from spaces import GPU
|
@@ -36,6 +41,11 @@ llava_model = None
|
|
36 |
llava_processor = None
|
37 |
stable_diffusion_pipeline = None
|
38 |
|
|
|
|
|
|
|
|
|
|
|
39 |
def load_llava_model():
|
40 |
"""Load LLaVA model for image captioning"""
|
41 |
global llava_model, llava_processor
|
@@ -44,13 +54,16 @@ def load_llava_model():
|
|
44 |
print("Loading LLaVA model for image analysis...")
|
45 |
model_id = "llava-hf/llava-1.5-7b-hf"
|
46 |
|
47 |
-
# Load processor and model
|
48 |
-
llava_processor = AutoProcessor.from_pretrained(
|
|
|
|
|
|
|
49 |
llava_model = AutoModelForCausalLM.from_pretrained(
|
50 |
model_id,
|
51 |
torch_dtype=torch.float16,
|
52 |
device_map="auto",
|
53 |
-
cache_dir="
|
54 |
)
|
55 |
|
56 |
return llava_model, llava_processor
|
@@ -67,8 +80,10 @@ def load_stable_diffusion_model():
|
|
67 |
stable_diffusion_pipeline = StableDiffusionPipeline.from_pretrained(
|
68 |
model_id,
|
69 |
torch_dtype=torch.float16,
|
70 |
-
safety_checker=None # Disable safety checker for performance
|
|
|
71 |
)
|
|
|
72 |
|
73 |
# Move to GPU if available
|
74 |
if torch.cuda.is_available():
|
|
|
15 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
16 |
from PIL import Image
|
17 |
|
18 |
+
# os.makedirs("./hf_cache", exist_ok=True)
|
19 |
+
# os.environ["HF_HOME"] = "./hf_cache"
|
20 |
+
# os.environ["TRANSFORMERS_CACHE"] = "./hf_cache/transformers"
|
21 |
+
# os.environ["HUGGINGFACE_HUB_CACHE"] = "./hf_cache/hub"
|
22 |
+
|
23 |
# Add GPU decorator for Hugging Face Spaces
|
24 |
try:
|
25 |
from spaces import GPU
|
|
|
41 |
llava_processor = None
|
42 |
stable_diffusion_pipeline = None
|
43 |
|
44 |
+
# Set up the model directory
|
45 |
+
MODEL_DIR = "./model"
|
46 |
+
os.makedirs(MODEL_DIR, exist_ok=True)
|
47 |
+
|
48 |
+
# Update the model loading functions
|
49 |
def load_llava_model():
|
50 |
"""Load LLaVA model for image captioning"""
|
51 |
global llava_model, llava_processor
|
|
|
54 |
print("Loading LLaVA model for image analysis...")
|
55 |
model_id = "llava-hf/llava-1.5-7b-hf"
|
56 |
|
57 |
+
# Load processor and model with explicit cache directory
|
58 |
+
llava_processor = AutoProcessor.from_pretrained(
|
59 |
+
model_id,
|
60 |
+
cache_dir=os.path.join(MODEL_DIR, "llava_processor")
|
61 |
+
)
|
62 |
llava_model = AutoModelForCausalLM.from_pretrained(
|
63 |
model_id,
|
64 |
torch_dtype=torch.float16,
|
65 |
device_map="auto",
|
66 |
+
cache_dir=os.path.join(MODEL_DIR, "llava_model")
|
67 |
)
|
68 |
|
69 |
return llava_model, llava_processor
|
|
|
80 |
stable_diffusion_pipeline = StableDiffusionPipeline.from_pretrained(
|
81 |
model_id,
|
82 |
torch_dtype=torch.float16,
|
83 |
+
safety_checker=None, # Disable safety checker for performance
|
84 |
+
cache_dir=os.path.join(MODEL_DIR, "stable_diffusion")
|
85 |
)
|
86 |
+
|
87 |
|
88 |
# Move to GPU if available
|
89 |
if torch.cuda.is_available():
|