charliebaby2023 commited on
Commit
19c8bea
·
verified ·
1 Parent(s): 72a8b28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -66
app.py CHANGED
@@ -2,18 +2,17 @@ import gradio as gr
2
  from random import randint
3
  from all_models import models
4
  from datetime import datetime
5
- from concurrent.futures import ThreadPoolExecutor, TimeoutError
6
-
7
-
8
-
9
  import numpy as np
10
  import time
11
  import requests
 
 
12
  now2 = 0
13
  index_tracker = 0 # Index tracker for the current model
14
  model_scores = {model: 0 for model in models} # Dictionary to track scores for each model
15
  processed_models_count = 0
16
- kii="femboy race car driver, mohawk, still a wip"
17
  combined_prompt = ""
18
 
19
  def get_current_time():
@@ -22,6 +21,18 @@ def get_current_time():
22
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
23
  ki = f'{kii} {current_time}'
24
  return ki
 
 
 
 
 
 
 
 
 
 
 
 
25
  def load_fn(models):
26
  global models_load
27
  models_load = {}
@@ -32,7 +43,7 @@ def load_fn(models):
32
  print(f"{m}\n");
33
  except Exception as error:
34
  print(f"Error loading model {model}: {error}\n")
35
- m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
36
  models_load.update({model: m})
37
 
38
 
@@ -44,38 +55,35 @@ def extend_choices(choices):
44
  def update_imgbox(choices):
45
  choices_plus = extend_choices(choices)
46
  return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
47
- executor = ThreadPoolExecutor(max_workers=num_models)
48
-
49
-
50
-
51
-
52
 
 
 
53
 
54
 
55
  def gen_fn(model_str, prompt):
56
- global model_scores, processed_models_count, index_tracker # Declare global variables
57
-
58
-
59
-
60
-
61
  if model_str == 'NA':
62
  return None
63
- # model_name = None # Initialize `model_name` to avoid reference errors
64
- # current_model_name = None # Initialize `current_model_name` to avoid reference errors
65
- # current_model_index = None # Initialize `current_model_index` to avoid reference errors
66
-
67
  try:
68
  index_tracker = (index_tracker + 1) % len(models)
69
  current_model_index = index_tracker
70
  current_model_name = models[current_model_index]
71
- combined_prompt = f"{prompt} {randint(0, 9999)}"
72
- response = models_load[model_str](f"{combined_prompt}")
73
-
 
 
 
 
 
 
74
  if isinstance(response, gr.Image):
75
  return response
76
  elif isinstance(response, tuple):
77
  return None
78
- elif isinstance(response, str): # If the response is a path or URL, pass it as a string
79
  if processed_models_count == 0:
80
  print(f"**************")
81
  print(f"{prompt}")
@@ -91,27 +99,32 @@ def gen_fn(model_str, prompt):
91
  processed_models_count = 0
92
  return response
93
 
94
-
 
 
 
 
 
 
 
 
95
  except Exception as e:
96
  if processed_models_count == 0:
97
- print(f"**************")
98
- print(f"{prompt}")
99
- print(f"{prompt}")
100
- print(f"{prompt}")
101
- print(f"**************")
102
  print(f"--- n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
103
  processed_models_count += 1
104
  if processed_models_count == len(models):
105
- print("\nCycle Complete! Updated Scores:")
106
- print(model_scores)
107
- processed_models_count = 0
108
  return None
109
 
110
 
111
-
112
-
113
 
114
-
115
 
116
 
117
  def make_me():
@@ -129,7 +142,7 @@ def make_me():
129
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
130
  for m, o in zip(current_models, output):
131
  gen_event = gen_button.click(gen_fn, [m, txt_input], o, queue=False)
132
- # stop_button.click(lambda _: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
133
 
134
 
135
  with gr.Accordion('Model selection', visible=False):
@@ -160,35 +173,8 @@ textarea{ position: absolute; font-size: 1em !important; padding: 4px;
160
  gr.Markdown("<script>" + js_code + "</script>")
161
  make_me()
162
 
 
163
  demo.queue()
164
  demo.queue = False
165
  demo.config["queue"] = False
166
- demo.launch(max_threads=400)
167
-
168
-
169
-
170
-
171
-
172
-
173
-
174
-
175
-
176
-
177
-
178
-
179
-
180
-
181
-
182
-
183
-
184
-
185
-
186
-
187
-
188
-
189
-
190
-
191
-
192
-
193
-
194
-
 
2
  from random import randint
3
  from all_models import models
4
  from datetime import datetime
5
+ from concurrent.futures import TimeoutError, ThreadPoolExecutor
 
 
 
6
  import numpy as np
7
  import time
8
  import requests
9
+ import logging
10
+ logging.basicConfig(level=logging.WARNING)
11
  now2 = 0
12
  index_tracker = 0 # Index tracker for the current model
13
  model_scores = {model: 0 for model in models} # Dictionary to track scores for each model
14
  processed_models_count = 0
15
+ kii=" this is your prompt input window still a wip"
16
  combined_prompt = ""
17
 
18
  def get_current_time():
 
21
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
22
  ki = f'{kii} {current_time}'
23
  return ki
24
+
25
+
26
+ # Sanitize file names and truncate them
27
+ def sanitize_file_name(file_name, max_length=100):
28
+ """Shortens and removes unsafe characters from file name."""
29
+ file_name = file_name[:max_length]
30
+ return file_name.replace(" ", "_").replace("/", "_")
31
+
32
+
33
+
34
+
35
+
36
  def load_fn(models):
37
  global models_load
38
  models_load = {}
 
43
  print(f"{m}\n");
44
  except Exception as error:
45
  print(f"Error loading model {model}: {error}\n")
46
+ m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), queue=False)
47
  models_load.update({model: m})
48
 
49
 
 
55
  def update_imgbox(choices):
56
  choices_plus = extend_choices(choices)
57
  return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
 
 
 
 
 
58
 
59
+
60
+ executor = ThreadPoolExecutor(max_workers=num_models)
61
 
62
 
63
  def gen_fn(model_str, prompt):
64
+ global index_tracker, model_scores, processed_models_count
 
 
 
 
65
  if model_str == 'NA':
66
  return None
67
+
68
+
 
 
69
  try:
70
  index_tracker = (index_tracker + 1) % len(models)
71
  current_model_index = index_tracker
72
  current_model_name = models[current_model_index]
73
+
74
+ max_prompt_length = 100
75
+ truncated_prompt = sanitize_file_name(prompt[:max_prompt_length])
76
+ combined_prompt = f"{truncated_prompt}_{randint(0, 9999)}"
77
+
78
+ # Execute the model's processing with a timeout
79
+ future = executor.submit(models_load[model_str], f"{combined_prompt}")
80
+ response = future.result(timeout=100) # Wait for result with timeout
81
+
82
  if isinstance(response, gr.Image):
83
  return response
84
  elif isinstance(response, tuple):
85
  return None
86
+ elif isinstance(response, str):
87
  if processed_models_count == 0:
88
  print(f"**************")
89
  print(f"{prompt}")
 
99
  processed_models_count = 0
100
  return response
101
 
102
+ except TimeoutError:
103
+ print(f"TimeoutError: Model '{model_str}' did not respond within {timeout_limit} seconds.")
104
+ processed_models_count += 1
105
+ if processed_models_count == len(models):
106
+ print("\nCycle Complete! Updated Scores:")
107
+ print(model_scores)
108
+ processed_models_count = 0
109
+ return None
110
+
111
  except Exception as e:
112
  if processed_models_count == 0:
113
+ print(f"**************")
114
+ print(f"{prompt}")
115
+ print(f"{prompt}")
116
+ print(f"{prompt}")
117
+ print(f"**************")
118
  print(f"--- n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
119
  processed_models_count += 1
120
  if processed_models_count == len(models):
121
+ print("\nCycle Complete! Updated Scores:")
122
+ print(model_scores)
123
+ processed_models_count = 0
124
  return None
125
 
126
 
 
 
127
 
 
128
 
129
 
130
  def make_me():
 
142
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
143
  for m, o in zip(current_models, output):
144
  gen_event = gen_button.click(gen_fn, [m, txt_input], o, queue=False)
145
+ stop_button.click(lambda _: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
146
 
147
 
148
  with gr.Accordion('Model selection', visible=False):
 
173
  gr.Markdown("<script>" + js_code + "</script>")
174
  make_me()
175
 
176
+
177
  demo.queue()
178
  demo.queue = False
179
  demo.config["queue"] = False
180
+ demo.launch(max_threads=200)