Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -75,67 +75,42 @@ def gen_fn(model_str, prompt):
|
|
75 |
response = models_load[model_str](f'{combined_prompt}')
|
76 |
return model_name, response
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
print(f"85 this should pin the image ")
|
87 |
-
print(f"86{response}")
|
88 |
-
if processed_models_count == 0:
|
89 |
-
print(f"**************")
|
90 |
-
print(f"{prompt}")
|
91 |
-
print(f"{prompt}")
|
92 |
-
print(f"{prompt}")
|
93 |
-
print(f"**************")
|
94 |
-
#model_scores[current_model_name] += 1
|
95 |
-
#print(f"OOO n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
|
96 |
-
#processed_models_count += 1
|
97 |
-
if processed_models_count == len(models):
|
98 |
-
print(f" **** ME facepalm")
|
99 |
-
# print("\nCycle Complete! Updated Scores:")
|
100 |
-
# print(model_scores)
|
101 |
-
processed_models_count = 0
|
102 |
-
print(f"100 RIIIIIIIght cheer ")
|
103 |
return response
|
104 |
-
|
105 |
-
else:
|
106 |
-
print(f"elsed out")
|
107 |
-
print(f"hang da bish")
|
108 |
-
processed_models_count += 1
|
109 |
return None
|
110 |
-
|
111 |
-
|
112 |
-
if processed_models_count == 0:
|
113 |
print(f"**************")
|
114 |
print(f"{prompt}")
|
115 |
print(f"{prompt}")
|
116 |
print(f"{prompt}")
|
117 |
print(f"**************")
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
if processed_models_count == len(models):
|
123 |
print("\nCycle Complete! Updated Scores:")
|
124 |
print(model_scores)
|
125 |
processed_models_count = 0
|
126 |
-
|
127 |
|
|
|
128 |
except Exception as e:
|
129 |
-
print(f"{e}")
|
130 |
if processed_models_count == 0:
|
131 |
print(f"**************")
|
132 |
print(f"{prompt}")
|
133 |
print(f"{prompt}")
|
134 |
print(f"{prompt}")
|
135 |
print(f"**************")
|
136 |
-
|
137 |
-
model_score = model_scores.get(current_model_name, 0)
|
138 |
-
print(f"--- n:{processed_models_count} x:{index_tracker} r[{model_score}] {model_str}")
|
139 |
processed_models_count += 1
|
140 |
if processed_models_count == len(models):
|
141 |
print("\nCycle Complete! Updated Scores:")
|
@@ -146,10 +121,6 @@ def gen_fn(model_str, prompt):
|
|
146 |
|
147 |
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
|
154 |
def make_me():
|
155 |
with gr.Row():
|
|
|
75 |
response = models_load[model_str](f'{combined_prompt}')
|
76 |
return model_name, response
|
77 |
|
78 |
+
try:
|
79 |
+
index_tracker = (index_tracker + 1) % len(models)
|
80 |
+
current_model_index = index_tracker
|
81 |
+
current_model_name = models[current_model_index]
|
82 |
+
combined_prompt = f"{prompt} {randint(0, 9999)}"
|
83 |
+
response = models_load[model_str](f"{combined_prompt}")
|
84 |
+
|
85 |
+
if isinstance(response, gr.Image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
return response
|
87 |
+
elif isinstance(response, tuple):
|
|
|
|
|
|
|
|
|
88 |
return None
|
89 |
+
elif isinstance(response, str): # If the response is a path or URL, pass it as a string
|
90 |
+
if processed_models_count == 0:
|
|
|
91 |
print(f"**************")
|
92 |
print(f"{prompt}")
|
93 |
print(f"{prompt}")
|
94 |
print(f"{prompt}")
|
95 |
print(f"**************")
|
96 |
+
model_scores[current_model_name] += 1
|
97 |
+
print(f"OOO n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
|
98 |
+
processed_models_count += 1
|
99 |
+
if processed_models_count == len(models):
|
|
|
100 |
print("\nCycle Complete! Updated Scores:")
|
101 |
print(model_scores)
|
102 |
processed_models_count = 0
|
103 |
+
return response
|
104 |
|
105 |
+
|
106 |
except Exception as e:
|
|
|
107 |
if processed_models_count == 0:
|
108 |
print(f"**************")
|
109 |
print(f"{prompt}")
|
110 |
print(f"{prompt}")
|
111 |
print(f"{prompt}")
|
112 |
print(f"**************")
|
113 |
+
print(f"--- n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
|
|
|
|
|
114 |
processed_models_count += 1
|
115 |
if processed_models_count == len(models):
|
116 |
print("\nCycle Complete! Updated Scores:")
|
|
|
121 |
|
122 |
|
123 |
|
|
|
|
|
|
|
|
|
124 |
|
125 |
def make_me():
|
126 |
with gr.Row():
|