RajatMalviya commited on
Commit
bc2361e
·
verified ·
1 Parent(s): e14f55f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -12
app.py CHANGED
@@ -14,12 +14,41 @@ from dotenv import load_dotenv
14
  from transformers import AutoProcessor, AutoModelForCausalLM, AutoTokenizer
15
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
16
  from PIL import Image
 
 
17
 
18
  # os.makedirs("./hf_cache", exist_ok=True)
19
  # os.environ["HF_HOME"] = "./hf_cache"
20
  # os.environ["TRANSFORMERS_CACHE"] = "./hf_cache/transformers"
21
  # os.environ["HUGGINGFACE_HUB_CACHE"] = "./hf_cache/hub"
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  # Add GPU decorator for Hugging Face Spaces
24
  try:
25
  from spaces import GPU
@@ -45,7 +74,6 @@ stable_diffusion_pipeline = None
45
  MODEL_DIR = "./model"
46
  os.makedirs(MODEL_DIR, exist_ok=True)
47
 
48
- # Update the model loading functions
49
  def load_llava_model():
50
  """Load LLaVA model for image captioning"""
51
  global llava_model, llava_processor
@@ -54,20 +82,35 @@ def load_llava_model():
54
  print("Loading LLaVA model for image analysis...")
55
  model_id = "llava-hf/llava-1.5-7b-hf"
56
 
57
- # Load processor and model with explicit cache directory
58
- llava_processor = AutoProcessor.from_pretrained(
59
- model_id,
60
- cache_dir=os.path.join(MODEL_DIR, "llava_processor")
61
- )
62
- llava_model = AutoModelForCausalLM.from_pretrained(
63
- model_id,
64
- torch_dtype=torch.float16,
65
- device_map="auto",
66
- cache_dir=os.path.join(MODEL_DIR, "llava_model")
67
- )
 
 
 
 
68
 
69
  return llava_model, llava_processor
70
 
 
 
 
 
 
 
 
 
 
 
 
71
  def load_stable_diffusion_model():
72
  """Load Stable Diffusion model for Ghibli-style image generation"""
73
  global stable_diffusion_pipeline
 
14
  from transformers import AutoProcessor, AutoModelForCausalLM, AutoTokenizer
15
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
16
  from PIL import Image
17
+ import tempfile
18
+
19
 
20
  # os.makedirs("./hf_cache", exist_ok=True)
21
  # os.environ["HF_HOME"] = "./hf_cache"
22
  # os.environ["TRANSFORMERS_CACHE"] = "./hf_cache/transformers"
23
  # os.environ["HUGGINGFACE_HUB_CACHE"] = "./hf_cache/hub"
24
 
25
+ # Use system temp directories which should be writable
26
+ TMP_DIR = tempfile.gettempdir()
27
+ MODEL_DIR = os.path.join(TMP_DIR, "hf_models")
28
+
29
+ # Set environment variables to use these directories
30
+ os.environ["TRANSFORMERS_CACHE"] = os.path.join(TMP_DIR, "transformers_cache")
31
+ os.environ["HF_HOME"] = os.path.join(TMP_DIR, "hf_home")
32
+ os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.join(TMP_DIR, "hf_hub_cache")
33
+
34
+ # Helper function to safely create directories
35
+ def safe_makedirs(directory):
36
+ try:
37
+ os.makedirs(directory, exist_ok=True)
38
+ return True
39
+ except (PermissionError, OSError) as e:
40
+ print(f"Warning: Could not create directory {directory}: {e}")
41
+ return False
42
+
43
+ # Create necessary directories
44
+ for directory in [MODEL_DIR, os.environ["TRANSFORMERS_CACHE"],
45
+ os.environ["HF_HOME"], os.environ["HUGGINGFACE_HUB_CACHE"]]:
46
+ safe_makedirs(directory)
47
+
48
+
49
+
50
+
51
+
52
  # Add GPU decorator for Hugging Face Spaces
53
  try:
54
  from spaces import GPU
 
74
  MODEL_DIR = "./model"
75
  os.makedirs(MODEL_DIR, exist_ok=True)
76
 
 
77
  def load_llava_model():
78
  """Load LLaVA model for image captioning"""
79
  global llava_model, llava_processor
 
82
  print("Loading LLaVA model for image analysis...")
83
  model_id = "llava-hf/llava-1.5-7b-hf"
84
 
85
+ try:
86
+ # Load processor and model with system temp directory
87
+ llava_processor = AutoProcessor.from_pretrained(
88
+ model_id,
89
+ local_files_only=False
90
+ )
91
+ llava_model = AutoModelForCausalLM.from_pretrained(
92
+ model_id,
93
+ torch_dtype=torch.float16,
94
+ device_map="auto",
95
+ local_files_only=False
96
+ )
97
+ except Exception as e:
98
+ print(f"Error loading LLaVA model: {e}")
99
+ raise
100
 
101
  return llava_model, llava_processor
102
 
103
+
104
+ # In the stylize_video function, replace:
105
+ os.makedirs("outputs", exist_ok=True)
106
+ persistent_output = os.path.join("outputs", f"stylized_{uuid.uuid4()}.mp4")
107
+
108
+ # With:
109
+ outputs_dir = os.path.join(TMP_DIR, "outputs")
110
+ safe_makedirs(outputs_dir)
111
+ persistent_output = os.path.join(outputs_dir, f"stylized_{uuid.uuid4()}.mp4")
112
+
113
+
114
  def load_stable_diffusion_model():
115
  """Load Stable Diffusion model for Ghibli-style image generation"""
116
  global stable_diffusion_pipeline