Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
from random import randint
|
@@ -246,171 +246,4 @@ demo.launch(max_threads=200)
|
|
246 |
|
247 |
#load_fn(models)
|
248 |
#num_models = len(models)
|
249 |
-
#default_models = models[:num_models]
|
250 |
-
'''
|
251 |
-
|
252 |
-
import gradio as gr
|
253 |
-
import os
|
254 |
-
from random import randint
|
255 |
-
from all_models import models
|
256 |
-
from datetime import datetime
|
257 |
-
from concurrent.futures import TimeoutError, ThreadPoolExecutor
|
258 |
-
import logging
|
259 |
-
import traceback
|
260 |
-
|
261 |
-
# Disable GPU usage
|
262 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
263 |
-
|
264 |
-
# Logging setup
|
265 |
-
logging.basicConfig(level=logging.WARNING)
|
266 |
-
now2 = 0
|
267 |
-
index_tracker = 0
|
268 |
-
model_scores = {model: 0 for model in models}
|
269 |
-
processed_models_count = 0
|
270 |
-
kii = "blonde mohawk femboy playing game with self at computer with programmer socks on, still a wip"
|
271 |
-
combined_prompt = ""
|
272 |
-
|
273 |
-
# Function to get current timestamp
|
274 |
-
def get_current_time():
|
275 |
-
now = datetime.now()
|
276 |
-
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
|
277 |
-
return f'{kii} {current_time}'
|
278 |
-
|
279 |
-
# Function to sanitize file names
|
280 |
-
def sanitize_file_name(file_name, max_length=100):
|
281 |
-
return file_name[:max_length].replace(" ", "_").replace("/", "_")
|
282 |
-
|
283 |
-
# Load models
|
284 |
-
def load_fn(models):
|
285 |
-
global models_load
|
286 |
-
models_load = {}
|
287 |
-
for model in models:
|
288 |
-
try:
|
289 |
-
m = gr.load(f'models/{model}')
|
290 |
-
if m is not None:
|
291 |
-
models_load[model] = m
|
292 |
-
print(f"Loaded model: {model}")
|
293 |
-
else:
|
294 |
-
print(f"Warning: Model {model} could not be loaded.")
|
295 |
-
models_load[model] = None
|
296 |
-
except Exception as error:
|
297 |
-
print(f"Error loading model {model}: {error}")
|
298 |
-
models_load[model] = None
|
299 |
-
|
300 |
-
load_fn(models)
|
301 |
-
num_models = len(models)
|
302 |
-
default_models = models[:num_models]
|
303 |
-
|
304 |
-
# Extend choices to match model count
|
305 |
-
def extend_choices(choices):
|
306 |
-
return choices + (num_models - len(choices)) * ['NA']
|
307 |
-
|
308 |
-
def update_imgbox(choices):
|
309 |
-
choices_plus = extend_choices(choices)
|
310 |
-
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
|
311 |
-
|
312 |
-
executor = ThreadPoolExecutor(max_workers=num_models)
|
313 |
-
|
314 |
-
def gen_fn(model_str, prompt):
|
315 |
-
global index_tracker, model_scores, processed_models_count
|
316 |
-
if model_str == 'NA' or models_load.get(model_str) is None:
|
317 |
-
return None # Avoid calling an undefined function
|
318 |
-
|
319 |
-
try:
|
320 |
-
index_tracker = (index_tracker + 1) % len(models)
|
321 |
-
current_model_index = index_tracker
|
322 |
-
current_model_name = models[current_model_index]
|
323 |
-
|
324 |
-
max_prompt_length = 100
|
325 |
-
truncated_prompt = sanitize_file_name(prompt[:max_prompt_length])
|
326 |
-
combined_prompt = f"{truncated_prompt}_{randint(0, 9999)}"
|
327 |
-
|
328 |
-
# Execute model with timeout
|
329 |
-
future = executor.submit(models_load[model_str], f"{combined_prompt}")
|
330 |
-
response = future.result(timeout=150)
|
331 |
-
|
332 |
-
if isinstance(response, gr.Image):
|
333 |
-
return response
|
334 |
-
elif isinstance(response, str):
|
335 |
-
model_scores[current_model_name] += 1
|
336 |
-
processed_models_count += 1
|
337 |
-
if processed_models_count == len(models):
|
338 |
-
processed_models_count = 0
|
339 |
-
return response
|
340 |
-
|
341 |
-
except TimeoutError:
|
342 |
-
processed_models_count += 1
|
343 |
-
if processed_models_count == len(models):
|
344 |
-
processed_models_count = 0
|
345 |
-
return None
|
346 |
-
|
347 |
-
except Exception as e:
|
348 |
-
processed_models_count += 1
|
349 |
-
if processed_models_count == len(models):
|
350 |
-
processed_models_count = 0
|
351 |
-
return None
|
352 |
-
|
353 |
-
def make_me():
|
354 |
-
with gr.Row():
|
355 |
-
txt_input = gr.Textbox(lines=2, value=kii, label=None)
|
356 |
-
gen_button = gr.Button('Generate images')
|
357 |
-
stop_button = gr.Button('Stop', variant='secondary', interactive=False)
|
358 |
-
# gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
|
359 |
-
gen_button.click(lambda s: gr.update(interactive=True), inputs=[], outputs=[stop_button])
|
360 |
-
|
361 |
-
with gr.Row():
|
362 |
-
output = [gr.Image(label=m) for m in default_models]
|
363 |
-
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
364 |
-
for m, o in zip(current_models, output):
|
365 |
-
print(f"Connecting {m} to gen_fn") # Debugging output
|
366 |
-
gen_event = gen_button.click(gen_fn, [m, txt_input], o)
|
367 |
-
stop_button.click(lambda s: gr.update(interactive=False), inputs=[], outputs=[stop_button], cancels=[gen_event])
|
368 |
-
|
369 |
-
|
370 |
-
with gr.Accordion('Model selection', visible=False):
|
371 |
-
model_choice = gr.CheckboxGroup(models, label=f'{num_models} models selected', value=default_models, interactive=True)
|
372 |
-
model_choice.change(update_imgbox, model_choice, output)
|
373 |
-
model_choice.change(extend_choices, model_choice, current_models)
|
374 |
-
|
375 |
-
# JavaScript to fix scrolling issue
|
376 |
-
js_code = """
|
377 |
-
<script>
|
378 |
-
const originalScroll = window.scrollTo;
|
379 |
-
gradio.Toast.show = function() {
|
380 |
-
originalShowToast.apply(this, arguments);
|
381 |
-
window.scrollTo = function() {};
|
382 |
-
};
|
383 |
-
setTimeout(() => {
|
384 |
-
window.scrollTo = originalScroll;
|
385 |
-
}, 1000);
|
386 |
-
</script>
|
387 |
-
"""
|
388 |
-
|
389 |
-
# Launch Gradio App
|
390 |
-
with gr.Blocks(css="""
|
391 |
-
textarea:hover { background:#55555555; }
|
392 |
-
textarea { font-size: 1.5em; letter-spacing: 3px; color: limegreen; }
|
393 |
-
""") as demo:
|
394 |
-
gr.Markdown("<script>" + js_code + "</script>")
|
395 |
-
make_me()
|
396 |
-
|
397 |
-
demo.queue()
|
398 |
-
demo.config["queue"] = False
|
399 |
-
|
400 |
-
if __name__ == "__main__":
|
401 |
-
print("Launching Gradio app...")
|
402 |
-
demo.launch(debug=True, share=False, max_threads=200)
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
from random import randint
|
|
|
246 |
|
247 |
#load_fn(models)
|
248 |
#num_models = len(models)
|
249 |
+
#default_models = models[:num_models]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|