charliebaby2023 commited on
Commit
9a27085
·
verified ·
1 Parent(s): c1cc5d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -138
app.py CHANGED
@@ -3,33 +3,31 @@ from random import randint
3
  from all_models import models
4
  from datetime import datetime
5
  from concurrent.futures import ThreadPoolExecutor
 
6
  import time
7
  import requests
8
- import logging
9
-
10
- # Set the logging level to WARNING or higher
11
- #attempting ignore user input
12
- logging.basicConfig(level=logging.WARNING)
13
-
 
 
 
14
  now2 = 0
 
 
 
15
  kii=" this is your prompt input window still a wip"
16
  combined_prompt = ""
17
 
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
  def get_current_time():
26
  now = datetime.now()
27
  now2 = now
28
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
29
  ki = f'{kii} {current_time}'
30
  return ki
31
-
32
-
33
  def load_fn(models):
34
  global models_load
35
  models_load = {}
@@ -37,79 +35,88 @@ def load_fn(models):
37
  if model not in models_load.keys():
38
  try:
39
  m = gr.load(f'models/{model}')
40
- print(f"{m}");
41
  except Exception as error:
42
- print(f"Error loading model {model}: {error}")
43
  m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
44
  models_load.update({model: m})
45
 
46
 
47
  load_fn(models)
48
-
49
  num_models = len(models)
50
  default_models = models[:num_models]
51
-
52
-
53
  def extend_choices(choices):
54
  return choices + (num_models - len(choices)) * ['NA']
55
-
56
-
57
  def update_imgbox(choices):
58
  choices_plus = extend_choices(choices)
59
  return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
60
-
61
-
62
  executor = ThreadPoolExecutor(max_workers=num_models)
63
 
64
 
65
  def gen_fn(model_str, prompt):
 
66
  if model_str == 'NA':
67
  return None
68
-
69
- noise = str(randint(0, 9999))
70
- combined_prompt = f'{prompt}'
71
- print(f"Generating with prompt: {combined_prompt}")
72
-
73
  try:
74
- image_response = models_load[model_str](f'{prompt} {noise}')
75
- # print(f"77 {models_load[model_str](f'{combined_prompt}')}")
76
- # image_response = models_load[model_str](f'{combined_prompt}')
77
- # Ensure the response is an image or image-like object
78
- if isinstance(image_response, gr.Image):
79
- return image_response
80
- elif isinstance(image_response, str): # If the response is a path or URL, pass it as a string
81
- return gr.Image(image_response) # You can handle it based on your model's return type
82
- else:
83
- print(f"Unexpected response type: {type(image_response)}")
84
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  except Exception as e:
86
- print(f"Error occurred: {e}")
 
 
 
 
 
 
 
 
 
 
 
87
  return None
88
-
89
-
90
-
91
  def make_me():
92
  with gr.Row():
93
  txt_input = gr.Textbox(lines=2, value=kii, label=None)
94
  gen_button = gr.Button('Generate images')
95
- # stop_button = gr.Button('Stop', variant='secondary', interactive=False)
96
 
97
- #gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
98
  gen_button.click(lambda _: gr.update(interactive=True), None)
99
-
100
-
101
- gr.HTML("""
102
- <div style="text-align: center; max-width: 100%; margin: 0 auto;">
103
- <body>
104
- </body>
105
- </div>
106
- """)
107
 
108
  with gr.Row():
109
  output = [gr.Image(label=m) for m in default_models]
110
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
111
  for m, o in zip(current_models, output):
112
  gen_event = gen_button.click(gen_fn, [m, txt_input], o, queue=False)
 
 
113
 
114
  with gr.Accordion('Model selection', visible=False):
115
  model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, interactive=True)
@@ -117,93 +124,25 @@ def make_me():
117
  model_choice.change(extend_choices, model_choice, current_models)
118
 
119
 
120
- js_code = """
121
- <script>
122
- const originalScroll = window.scrollTo;
123
- const originalShowToast = gradio.Toast.show;
124
- gradio.Toast.show = function() {
125
- originalShowToast.apply(this, arguments);
126
- window.scrollTo = function() {};};
127
- setTimeout(() => {
128
- window.scrollTo = originalScroll;
129
- }, 1000); // Restore scroll function after 3 seconds
130
- </script>
131
- """
132
-
133
-
134
-
135
 
136
  with gr.Blocks(css="""
137
- label.float.svelte-i3tvor { top:auto!important; bottom: 0; position: absolute; background: rgba(0,0,0,0.0); left: var(--block-label-margin); color: rgba(200,200,200,.7);}
138
- .genbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
139
- .stopbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
140
- .float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}
141
- textarea:hover { background:#55555555;}
142
- textarea { overflow-y: scroll; top:0px; width: 100%; height:100%!important;
143
-
144
- font-size: 1.5em;
145
- letter-spacing: 3px;
146
- color: limegreen;
147
-
148
- border: none!important;
149
- background: none;
150
- outline: none !important; }
151
-
152
-
153
-
154
- .form.svelte-633qhp{ flex-grow: 1;
155
- position: absolute;
156
- right: 0px;
157
- border-radius: 6px;
158
- z-index: 400000;
159
- resize: both;
160
- left: 52%;
161
- background: rgba(103, 103, 114, 0.35);
162
- height: 46px;
163
- width: 48%!important;
164
- }
165
-
166
-
167
-
168
- label.svelte-173056l.svelte-173056l {
169
- display: block;
170
- width: 100%;
171
- height: 100%;
172
- }
173
- .input-container.svelte-173056l.svelte-173056l {
174
- /* display: flex; */
175
- position: absolute;
176
- border: 1px solid;
177
- padding: 0px;
178
- /* height: calc(100% - 32px); */
179
- /* align-items: flex-end; */
180
- border-radius: 6px;
181
- margin: 0px;
182
- top: 0px;
183
- left: 0px;
184
- /* bottom: -16px; */
185
- width: 100%;
186
- min-height: 100%;
187
- }
188
- textarea{
189
- position: absolute;
190
- font-size: 1em !important;
191
- padding: 4px;
192
- background: none;
193
- height: 100% !important;
194
- height: 100%;}
195
-
196
- .svelte-11xb1hd.padded{background:none;}
197
-
198
- span.svelte-1gfkn6j:not(.has-info) {
199
- margin-bottom: var(--spacing-lg);
200
- display: none;
201
- }
202
-
203
-
204
- }
205
-
206
- """) as demo:
207
  gr.Markdown("<script>" + js_code + "</script>")
208
  make_me()
209
 
 
3
  from all_models import models
4
  from datetime import datetime
5
  from concurrent.futures import ThreadPoolExecutor
6
+ import numpy as np
7
  import time
8
  import requests
9
+ #attempting to remove logged user input for the respect of users privacy of hugginface
10
+ #thats right you heard me. logs happen here. and ive seen your input,
11
+ #AND, i can see it live (i think it may just be intrinsic in this hugginface network)
12
+ #BUT, im really not interested one bit in your input, at all, and im not here to judge others (ive always suspected this was happening, and im just now becoming fully aware and i havent found many solutions yet)
13
+ #SO, im trying my best to help protect our privacy
14
+ #PLEASE bare with me and if you have questions or suggestions or help
15
+ #OR EVEN if you just want to leave a comment about it
16
+ #please let me know (keep in mind, im a 0.32_ on the scale of (1 - 10) for python literacy )
17
+ #BUT KEEP IN MIND ---- this data helps me select the VERY BEST, and most stable models for US ALL to use. and thats my over all goal. (and the good news is, i have no idea who you are or where you are. the only thing i know, is whats currently being prompted and which models succedded and that some of you, smh just aint right lol. just try n play nice )
18
  now2 = 0
19
+ index_tracker = 0 # Index tracker for the current model
20
+ model_scores = {model: 0 for model in models} # Dictionary to track scores for each model
21
+ processed_models_count = 0
22
  kii=" this is your prompt input window still a wip"
23
  combined_prompt = ""
24
 
 
 
 
 
 
 
 
25
  def get_current_time():
26
  now = datetime.now()
27
  now2 = now
28
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
29
  ki = f'{kii} {current_time}'
30
  return ki
 
 
31
  def load_fn(models):
32
  global models_load
33
  models_load = {}
 
35
  if model not in models_load.keys():
36
  try:
37
  m = gr.load(f'models/{model}')
38
+ print(f"{m}\n");
39
  except Exception as error:
40
+ print(f"Error loading model {model}: {error}\n")
41
  m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
42
  models_load.update({model: m})
43
 
44
 
45
  load_fn(models)
 
46
  num_models = len(models)
47
  default_models = models[:num_models]
 
 
48
  def extend_choices(choices):
49
  return choices + (num_models - len(choices)) * ['NA']
 
 
50
  def update_imgbox(choices):
51
  choices_plus = extend_choices(choices)
52
  return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
 
 
53
  executor = ThreadPoolExecutor(max_workers=num_models)
54
 
55
 
56
  def gen_fn(model_str, prompt):
57
+ global index_tracker, model_scores, processed_models_count # Declare all global variables
58
  if model_str == 'NA':
59
  return None
 
 
 
 
 
60
  try:
61
+ index_tracker = (index_tracker + 1) % len(models)
62
+ current_model_index = index_tracker
63
+ current_model_name = models[current_model_index]
64
+ combined_prompt = f"{prompt} {randint(0, 9999)}"
65
+ response = models_load[model_str](f"{combined_prompt}")
66
+
67
+ if isinstance(response, gr.Image):
68
+ return response
69
+ elif isinstance(response, tuple):
 
70
  return None
71
+ elif isinstance(response, str): # If the response is a path or URL, pass it as a string
72
+ if processed_models_count == 0:
73
+ print(f"**************")
74
+ print(f"{prompt}")
75
+ print(f"{prompt}")
76
+ print(f"{prompt}")
77
+ print(f"**************")
78
+ model_scores[current_model_name] += 1
79
+ print(f"OOO n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
80
+ processed_models_count += 1
81
+ if processed_models_count == len(models):
82
+ print("\nCycle Complete! Updated Scores:")
83
+ print(model_scores)
84
+ processed_models_count = 0
85
+ return response
86
+
87
+
88
  except Exception as e:
89
+ if processed_models_count == 0:
90
+ print(f"**************")
91
+ print(f"{prompt}")
92
+ print(f"{prompt}")
93
+ print(f"{prompt}")
94
+ print(f"**************")
95
+ print(f"--- n:{processed_models_count} x:{current_model_index} r[{model_scores[current_model_name]}] {model_str}")
96
+ processed_models_count += 1
97
+ if processed_models_count == len(models):
98
+ print("\nCycle Complete! Updated Scores:")
99
+ print(model_scores)
100
+ processed_models_count = 0
101
  return None
102
+
 
 
103
  def make_me():
104
  with gr.Row():
105
  txt_input = gr.Textbox(lines=2, value=kii, label=None)
106
  gen_button = gr.Button('Generate images')
107
+ stop_button = gr.Button('Stop', variant='secondary', interactive=False)
108
 
109
+ gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
110
  gen_button.click(lambda _: gr.update(interactive=True), None)
111
+ gr.HTML(""" <div style="text-align: center; max-width: 100%; margin: 0 auto;"> <body> </body> </div> """)
 
 
 
 
 
 
 
112
 
113
  with gr.Row():
114
  output = [gr.Image(label=m) for m in default_models]
115
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
116
  for m, o in zip(current_models, output):
117
  gen_event = gen_button.click(gen_fn, [m, txt_input], o, queue=False)
118
+ # stop_button.click(lambda _: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
119
+
120
 
121
  with gr.Accordion('Model selection', visible=False):
122
  model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, interactive=True)
 
124
  model_choice.change(extend_choices, model_choice, current_models)
125
 
126
 
127
+ js_code = """<script>const originalScroll = window.scrollTo; const originalShowToast = gradio.Toast.show;
128
+ gradio.Toast.show = function() { originalShowToast.apply(this, arguments); window.scrollTo = function() {};};
129
+ setTimeout(() => { window.scrollTo = originalScroll; }, 1000); // Restore scroll function after 3 seconds</script>"""
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  with gr.Blocks(css="""
132
+ label.float.svelte-i3tvor { top:auto!important; bottom: 0; position: absolute; background: rgba(0,0,0,0.0); left: var(--block-label-margin); color: rgba(200,200,200,.7);}
133
+ .genbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
134
+ .stopbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
135
+ .float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}
136
+ textarea:hover { background:#55555555;}
137
+ textarea { overflow-y: scroll; top:0px; width: 100%; height:100%!important; font-size: 1.5em; letter-spacing: 3px; color: limegreen; border: none!important; background: none; outline: none !important; }
138
+ .form.svelte-633qhp{ flex-grow: 1; position: absolute; right: 0px; border-radius: 6px; z-index: 400000; resize: both; left: 52%; background: rgba(103, 103, 114, 0.35); height: 46px; width: 48%!important;}
139
+ label.svelte-173056l.svelte-173056l { display: block; width: 100%; height: 100%;}
140
+ .input-container.svelte-173056l.svelte-173056l { /* display: flex; */ position: absolute; border: 1px solid; padding: 0px; /* height: calc(100% - 32px); */ /* align-items: flex-end; */ border-radius: 6px; margin: 0px; top: 0px; left: 0px; /* bottom: -16px; */ width: 100%; min-height: 100%;}
141
+ textarea{ position: absolute; font-size: 1em !important; padding: 4px; background: none; height: 100% !important; height: 100%;}
142
+ .svelte-11xb1hd.padded{background:none;}span.svelte-1gfkn6j:not(.has-info) { margin-bottom: var(--spacing-lg); display: none;}
143
+ .lg.secondary{ min-width:20%!imoprtant; width: 150px !important; flex: none !important;}
144
+ .unpadded_box.svelte-1oiin9d { margin-top: 0; margin-left: auto!important; max-height: 134px!important; min-height: 156px!important; margin-right: auto!important; min-width: 133px !important;}
145
+ }""") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  gr.Markdown("<script>" + js_code + "</script>")
147
  make_me()
148