Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,17 +5,18 @@ import torch
|
|
5 |
from PIL import Image
|
6 |
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, FluxPipeline, DiffusionPipeline, DPMSolverMultistepScheduler
|
7 |
from diffusers.utils import export_to_video
|
8 |
-
from transformers import pipeline as transformers_pipeline, TrainingArguments, Trainer
|
9 |
from audiocraft.models import MusicGen
|
10 |
import gradio as gr
|
11 |
-
from huggingface_hub import HfFolder
|
12 |
import multiprocessing
|
13 |
import io
|
14 |
import time
|
15 |
|
|
|
16 |
hf_token = os.getenv("HF_TOKEN")
|
17 |
redis_host = os.getenv("REDIS_HOST")
|
18 |
-
redis_port = int(os.getenv("REDIS_PORT", 6379))
|
19 |
redis_password = os.getenv("REDIS_PASSWORD")
|
20 |
|
21 |
HfFolder.save_token(hf_token)
|
@@ -24,15 +25,18 @@ def connect_to_redis():
|
|
24 |
while True:
|
25 |
try:
|
26 |
redis_client = redis.Redis(host=redis_host, port=redis_port, password=redis_password)
|
27 |
-
redis_client.ping()
|
|
|
28 |
return redis_client
|
29 |
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError) as e:
|
|
|
30 |
time.sleep(1)
|
31 |
|
32 |
def reconnect_if_needed(redis_client):
|
33 |
try:
|
34 |
redis_client.ping()
|
35 |
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError):
|
|
|
36 |
return connect_to_redis()
|
37 |
return redis_client
|
38 |
|
@@ -43,6 +47,7 @@ def load_object_from_redis(key):
|
|
43 |
obj_data = redis_client.get(key)
|
44 |
return pickle.loads(obj_data) if obj_data else None
|
45 |
except (pickle.PickleError, redis.exceptions.RedisError) as e:
|
|
|
46 |
return None
|
47 |
|
48 |
def save_object_to_redis(key, obj):
|
@@ -50,17 +55,21 @@ def save_object_to_redis(key, obj):
|
|
50 |
redis_client = reconnect_if_needed(redis_client)
|
51 |
try:
|
52 |
redis_client.set(key, pickle.dumps(obj))
|
|
|
53 |
except redis.exceptions.RedisError as e:
|
54 |
print(f"Failed to save object to Redis: {e}")
|
55 |
|
56 |
def get_model_or_download(model_id, redis_key, loader_func):
|
57 |
model = load_object_from_redis(redis_key)
|
58 |
if model:
|
|
|
59 |
return model
|
60 |
try:
|
61 |
model = loader_func(model_id, torch_dtype=torch.float16)
|
62 |
save_object_to_redis(redis_key, model)
|
|
|
63 |
except Exception as e:
|
|
|
64 |
return None
|
65 |
|
66 |
def generate_image(prompt):
|
@@ -71,6 +80,7 @@ def generate_image(prompt):
|
|
71 |
image = text_to_image_pipeline(prompt).images[0]
|
72 |
save_object_to_redis(redis_key, image)
|
73 |
except Exception as e:
|
|
|
74 |
return None
|
75 |
return image
|
76 |
|
@@ -82,6 +92,7 @@ def edit_image_with_prompt(image, prompt, strength=0.75):
|
|
82 |
edited_image = img2img_pipeline(prompt=prompt, init_image=image.convert("RGB"), strength=strength).images[0]
|
83 |
save_object_to_redis(redis_key, edited_image)
|
84 |
except Exception as e:
|
|
|
85 |
return None
|
86 |
return edited_image
|
87 |
|
@@ -93,6 +104,7 @@ def generate_song(prompt, duration=10):
|
|
93 |
song = music_gen.generate(prompt, duration=duration)
|
94 |
save_object_to_redis(redis_key, song)
|
95 |
except Exception as e:
|
|
|
96 |
return None
|
97 |
return song
|
98 |
|
@@ -101,10 +113,10 @@ def generate_text(prompt):
|
|
101 |
text = load_object_from_redis(redis_key)
|
102 |
if not text:
|
103 |
try:
|
104 |
-
# Reemplazar "bigcode/starcoder" con otro modelo de generaci贸n de texto
|
105 |
text = text_gen_pipeline([{"role": "user", "content": prompt}], max_new_tokens=256)[0]["generated_text"].strip()
|
106 |
save_object_to_redis(redis_key, text)
|
107 |
except Exception as e:
|
|
|
108 |
return None
|
109 |
return text
|
110 |
|
@@ -122,6 +134,7 @@ def generate_flux_image(prompt):
|
|
122 |
).images[0]
|
123 |
save_object_to_redis(redis_key, flux_image)
|
124 |
except Exception as e:
|
|
|
125 |
return None
|
126 |
return flux_image
|
127 |
|
@@ -135,6 +148,7 @@ def generate_code(prompt):
|
|
135 |
code = starcoder_tokenizer.decode(outputs[0])
|
136 |
save_object_to_redis(redis_key, code)
|
137 |
except Exception as e:
|
|
|
138 |
return None
|
139 |
return code
|
140 |
|
@@ -149,6 +163,7 @@ def generate_video(prompt):
|
|
149 |
video = export_to_video(pipe(prompt, num_inference_steps=25).frames)
|
150 |
save_object_to_redis(redis_key, video)
|
151 |
except Exception as e:
|
|
|
152 |
return None
|
153 |
return video
|
154 |
|
@@ -164,6 +179,7 @@ def test_model_meta_llama():
|
|
164 |
response = meta_llama_pipeline(messages, max_new_tokens=256)[0]["generated_text"].strip()
|
165 |
save_object_to_redis(redis_key, response)
|
166 |
except Exception as e:
|
|
|
167 |
return None
|
168 |
return response
|
169 |
|
@@ -205,13 +221,15 @@ for _ in range(num_processes):
|
|
205 |
|
206 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
207 |
|
|
|
208 |
text_to_image_pipeline = get_model_or_download("stabilityai/stable-diffusion-2", "text_to_image_model", StableDiffusionPipeline.from_pretrained)
|
209 |
img2img_pipeline = get_model_or_download("CompVis/stable-diffusion-v1-4", "img2img_model", StableDiffusionImg2ImgPipeline.from_pretrained)
|
210 |
flux_pipeline = get_model_or_download("black-forest-labs/FLUX.1-schnell", "flux_model", FluxPipeline.from_pretrained)
|
211 |
-
text_gen_pipeline = transformers_pipeline("text-generation", model="google/
|
212 |
music_gen = load_object_from_redis("music_gen") or MusicGen.from_pretrained('melody')
|
213 |
meta_llama_pipeline = get_model_or_download("meta-llama/Meta-Llama-3.1-8B-Instruct", "meta_llama_model", transformers_pipeline)
|
214 |
|
|
|
215 |
gen_image_tab = gr.Interface(generate_image, gr.inputs.Textbox(label="Prompt:"), gr.outputs.Image(type="pil"), title="Generate Image")
|
216 |
edit_image_tab = gr.Interface(edit_image_with_prompt, [gr.inputs.Image(type="pil", label="Image:"), gr.inputs.Textbox(label="Prompt:"), gr.inputs.Slider(0.1, 1.0, 0.75, step=0.05, label="Strength:")], gr.outputs.Image(type="pil"), title="Edit Image")
|
217 |
generate_song_tab = gr.Interface(generate_song, [gr.inputs.Textbox(label="Prompt:"), gr.inputs.Slider(5, 60, 10, step=1, label="Duration (s):")], gr.outputs.Audio(type="numpy"), title="Generate Songs")
|
@@ -229,4 +247,4 @@ app.launch(share=True)
|
|
229 |
for _ in range(num_processes):
|
230 |
task_queue.put(None)
|
231 |
for p in processes:
|
232 |
-
p.join()
|
|
|
5 |
from PIL import Image
|
6 |
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, FluxPipeline, DiffusionPipeline, DPMSolverMultistepScheduler
|
7 |
from diffusers.utils import export_to_video
|
8 |
+
from transformers import pipeline as transformers_pipeline, AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
|
9 |
from audiocraft.models import MusicGen
|
10 |
import gradio as gr
|
11 |
+
from huggingface_hub import snapshot_download, HfApi, HfFolder
|
12 |
import multiprocessing
|
13 |
import io
|
14 |
import time
|
15 |
|
16 |
+
# Obtener las variables de entorno
|
17 |
hf_token = os.getenv("HF_TOKEN")
|
18 |
redis_host = os.getenv("REDIS_HOST")
|
19 |
+
redis_port = int(os.getenv("REDIS_PORT", 6379)) # Valor predeterminado si no se proporciona
|
20 |
redis_password = os.getenv("REDIS_PASSWORD")
|
21 |
|
22 |
HfFolder.save_token(hf_token)
|
|
|
25 |
while True:
|
26 |
try:
|
27 |
redis_client = redis.Redis(host=redis_host, port=redis_port, password=redis_password)
|
28 |
+
redis_client.ping() # Verifica si la conexi贸n est谩 activa
|
29 |
+
print("Connected to Redis successfully.")
|
30 |
return redis_client
|
31 |
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError) as e:
|
32 |
+
print(f"Connection to Redis failed: {e}. Retrying in 1 second...")
|
33 |
time.sleep(1)
|
34 |
|
35 |
def reconnect_if_needed(redis_client):
|
36 |
try:
|
37 |
redis_client.ping()
|
38 |
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError):
|
39 |
+
print("Reconnecting to Redis...")
|
40 |
return connect_to_redis()
|
41 |
return redis_client
|
42 |
|
|
|
47 |
obj_data = redis_client.get(key)
|
48 |
return pickle.loads(obj_data) if obj_data else None
|
49 |
except (pickle.PickleError, redis.exceptions.RedisError) as e:
|
50 |
+
print(f"Failed to load object from Redis: {e}")
|
51 |
return None
|
52 |
|
53 |
def save_object_to_redis(key, obj):
|
|
|
55 |
redis_client = reconnect_if_needed(redis_client)
|
56 |
try:
|
57 |
redis_client.set(key, pickle.dumps(obj))
|
58 |
+
print(f"Object saved to Redis: {key}")
|
59 |
except redis.exceptions.RedisError as e:
|
60 |
print(f"Failed to save object to Redis: {e}")
|
61 |
|
62 |
def get_model_or_download(model_id, redis_key, loader_func):
|
63 |
model = load_object_from_redis(redis_key)
|
64 |
if model:
|
65 |
+
print(f"Model loaded from Redis: {redis_key}")
|
66 |
return model
|
67 |
try:
|
68 |
model = loader_func(model_id, torch_dtype=torch.float16)
|
69 |
save_object_to_redis(redis_key, model)
|
70 |
+
print(f"Model downloaded and saved to Redis: {redis_key}")
|
71 |
except Exception as e:
|
72 |
+
print(f"Failed to load or save model: {e}")
|
73 |
return None
|
74 |
|
75 |
def generate_image(prompt):
|
|
|
80 |
image = text_to_image_pipeline(prompt).images[0]
|
81 |
save_object_to_redis(redis_key, image)
|
82 |
except Exception as e:
|
83 |
+
print(f"Failed to generate image: {e}")
|
84 |
return None
|
85 |
return image
|
86 |
|
|
|
92 |
edited_image = img2img_pipeline(prompt=prompt, init_image=image.convert("RGB"), strength=strength).images[0]
|
93 |
save_object_to_redis(redis_key, edited_image)
|
94 |
except Exception as e:
|
95 |
+
print(f"Failed to edit image: {e}")
|
96 |
return None
|
97 |
return edited_image
|
98 |
|
|
|
104 |
song = music_gen.generate(prompt, duration=duration)
|
105 |
save_object_to_redis(redis_key, song)
|
106 |
except Exception as e:
|
107 |
+
print(f"Failed to generate song: {e}")
|
108 |
return None
|
109 |
return song
|
110 |
|
|
|
113 |
text = load_object_from_redis(redis_key)
|
114 |
if not text:
|
115 |
try:
|
|
|
116 |
text = text_gen_pipeline([{"role": "user", "content": prompt}], max_new_tokens=256)[0]["generated_text"].strip()
|
117 |
save_object_to_redis(redis_key, text)
|
118 |
except Exception as e:
|
119 |
+
print(f"Failed to generate text: {e}")
|
120 |
return None
|
121 |
return text
|
122 |
|
|
|
134 |
).images[0]
|
135 |
save_object_to_redis(redis_key, flux_image)
|
136 |
except Exception as e:
|
137 |
+
print(f"Failed to generate flux image: {e}")
|
138 |
return None
|
139 |
return flux_image
|
140 |
|
|
|
148 |
code = starcoder_tokenizer.decode(outputs[0])
|
149 |
save_object_to_redis(redis_key, code)
|
150 |
except Exception as e:
|
151 |
+
print(f"Failed to generate code: {e}")
|
152 |
return None
|
153 |
return code
|
154 |
|
|
|
163 |
video = export_to_video(pipe(prompt, num_inference_steps=25).frames)
|
164 |
save_object_to_redis(redis_key, video)
|
165 |
except Exception as e:
|
166 |
+
print(f"Failed to generate video: {e}")
|
167 |
return None
|
168 |
return video
|
169 |
|
|
|
179 |
response = meta_llama_pipeline(messages, max_new_tokens=256)[0]["generated_text"].strip()
|
180 |
save_object_to_redis(redis_key, response)
|
181 |
except Exception as e:
|
182 |
+
print(f"Failed to test Meta-Llama: {e}")
|
183 |
return None
|
184 |
return response
|
185 |
|
|
|
221 |
|
222 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
223 |
|
224 |
+
# Cargar modelos
|
225 |
text_to_image_pipeline = get_model_or_download("stabilityai/stable-diffusion-2", "text_to_image_model", StableDiffusionPipeline.from_pretrained)
|
226 |
img2img_pipeline = get_model_or_download("CompVis/stable-diffusion-v1-4", "img2img_model", StableDiffusionImg2ImgPipeline.from_pretrained)
|
227 |
flux_pipeline = get_model_or_download("black-forest-labs/FLUX.1-schnell", "flux_model", FluxPipeline.from_pretrained)
|
228 |
+
text_gen_pipeline = transformers_pipeline("text-generation", model="google/gemma-2-9b", tokenizer="google/gemma-2-9b", device=0)
|
229 |
music_gen = load_object_from_redis("music_gen") or MusicGen.from_pretrained('melody')
|
230 |
meta_llama_pipeline = get_model_or_download("meta-llama/Meta-Llama-3.1-8B-Instruct", "meta_llama_model", transformers_pipeline)
|
231 |
|
232 |
+
# Definir interfaces de usuario
|
233 |
gen_image_tab = gr.Interface(generate_image, gr.inputs.Textbox(label="Prompt:"), gr.outputs.Image(type="pil"), title="Generate Image")
|
234 |
edit_image_tab = gr.Interface(edit_image_with_prompt, [gr.inputs.Image(type="pil", label="Image:"), gr.inputs.Textbox(label="Prompt:"), gr.inputs.Slider(0.1, 1.0, 0.75, step=0.05, label="Strength:")], gr.outputs.Image(type="pil"), title="Edit Image")
|
235 |
generate_song_tab = gr.Interface(generate_song, [gr.inputs.Textbox(label="Prompt:"), gr.inputs.Slider(5, 60, 10, step=1, label="Duration (s):")], gr.outputs.Audio(type="numpy"), title="Generate Songs")
|
|
|
247 |
for _ in range(num_processes):
|
248 |
task_queue.put(None)
|
249 |
for p in processes:
|
250 |
+
p.join()
|