cvips commited on
Commit
596cc4a
·
1 Parent(s): 535d21c

front end updated

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +55 -41
  2. inference_utils/__pycache__/__init__.cpython-39.pyc +0 -0
  3. inference_utils/__pycache__/inference.cpython-39.pyc +0 -0
  4. inference_utils/__pycache__/output_processing.cpython-39.pyc +0 -0
  5. inference_utils/__pycache__/processing_utils.cpython-39.pyc +0 -0
  6. modeling/__pycache__/BaseModel.cpython-39.pyc +0 -0
  7. modeling/__pycache__/__init__.cpython-39.pyc +0 -0
  8. modeling/architectures/__pycache__/__init__.cpython-39.pyc +0 -0
  9. modeling/architectures/__pycache__/build.cpython-39.pyc +0 -0
  10. modeling/architectures/__pycache__/seem_model_demo.cpython-39.pyc +0 -0
  11. modeling/architectures/__pycache__/seem_model_v0.cpython-39.pyc +0 -0
  12. modeling/architectures/__pycache__/seem_model_v1.cpython-39.pyc +0 -0
  13. modeling/architectures/__pycache__/xdecoder_model.cpython-39.pyc +0 -0
  14. modeling/body/__pycache__/__init__.cpython-39.pyc +0 -0
  15. modeling/body/__pycache__/build.cpython-39.pyc +0 -0
  16. modeling/body/__pycache__/xdecoder_head.cpython-39.pyc +0 -0
  17. modeling/interface/__pycache__/__init__.cpython-39.pyc +0 -0
  18. modeling/interface/__pycache__/build.cpython-39.pyc +0 -0
  19. modeling/interface/__pycache__/modules.cpython-39.pyc +0 -0
  20. modeling/interface/__pycache__/seem_demo.cpython-39.pyc +0 -0
  21. modeling/interface/__pycache__/seem_v0.cpython-39.pyc +0 -0
  22. modeling/interface/__pycache__/seem_v1.cpython-39.pyc +0 -0
  23. modeling/interface/__pycache__/xdecoder.cpython-39.pyc +0 -0
  24. modeling/interface/prototype/__pycache__/__init__.cpython-39.pyc +0 -0
  25. modeling/interface/prototype/__pycache__/attention_data_struct_seemdemo.cpython-39.pyc +0 -0
  26. modeling/interface/prototype/__pycache__/attention_data_struct_seemv0.cpython-39.pyc +0 -0
  27. modeling/interface/prototype/__pycache__/attention_data_struct_seemv1.cpython-39.pyc +0 -0
  28. modeling/language/LangEncoder/__pycache__/__init__.cpython-39.pyc +0 -0
  29. modeling/language/LangEncoder/__pycache__/build.cpython-39.pyc +0 -0
  30. modeling/language/LangEncoder/__pycache__/transformer.cpython-39.pyc +0 -0
  31. modeling/language/__pycache__/__init__.cpython-39.pyc +0 -0
  32. modeling/language/__pycache__/build.cpython-39.pyc +0 -0
  33. modeling/language/__pycache__/loss.cpython-39.pyc +0 -0
  34. modeling/language/__pycache__/vlpencoder.cpython-39.pyc +0 -0
  35. modeling/modules/__pycache__/__init__.cpython-39.pyc +0 -0
  36. modeling/modules/__pycache__/attention.cpython-39.pyc +0 -0
  37. modeling/modules/__pycache__/criterion.cpython-39.pyc +0 -0
  38. modeling/modules/__pycache__/matcher.cpython-39.pyc +0 -0
  39. modeling/modules/__pycache__/point_features.cpython-39.pyc +0 -0
  40. modeling/modules/__pycache__/position_encoding.cpython-39.pyc +0 -0
  41. modeling/modules/__pycache__/postprocessing.cpython-39.pyc +0 -0
  42. modeling/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  43. modeling/utils/__pycache__/attention.cpython-39.pyc +0 -0
  44. modeling/utils/__pycache__/box_ops.cpython-39.pyc +0 -0
  45. modeling/utils/__pycache__/config.cpython-39.pyc +0 -0
  46. modeling/utils/__pycache__/interactive.cpython-39.pyc +0 -0
  47. modeling/utils/__pycache__/misc.cpython-39.pyc +0 -0
  48. modeling/vision/backbone/__pycache__/__init__.cpython-39.pyc +0 -0
  49. modeling/vision/backbone/__pycache__/backbone.cpython-39.pyc +0 -0
  50. modeling/vision/backbone/__pycache__/build.cpython-39.pyc +0 -0
app.py CHANGED
@@ -180,29 +180,49 @@ model = initialize_model()
180
  @torch.inference_mode()
181
  @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
182
  def process_image(image_path, text_prompts, modality):
183
- image = read_rgb(image_path)
184
- text_prompts = [prompt.strip() for prompt in text_prompts.split(',')]
185
-
186
- # Run inference
187
- pred_masks = interactive_infer_image(model, Image.fromarray(image), text_prompts)
188
-
189
- # Prepare outputs
190
- results = []
191
- dice_scores = []
192
- p_values = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
- for i, prompt in enumerate(text_prompts):
195
- # Calculate p-value for the selected modality
196
- print("PROMPT: ", prompt, flush=True)
197
- p_value = check_mask_stats(image, pred_masks[i] * 255, modality, prompt)
198
- p_values.append(f"P-value for '{prompt}' ({modality}): {p_value:.4f}")
199
 
200
- # Overlay predictions on the image
201
- overlay_image = image.copy()
202
- overlay_image[pred_masks[i] > 0.5] = [255, 0, 0] # Highlight predictions in red
203
- results.append(overlay_image)
204
 
205
- return results, p_values
 
 
 
 
 
 
 
 
 
 
206
 
207
  # Define Gradio interface
208
  with gr.Blocks() as demo:
@@ -210,7 +230,11 @@ with gr.Blocks() as demo:
210
  with gr.Row():
211
  with gr.Column():
212
  image_input = gr.Image(type="filepath", label="Input Image")
213
- prompts_input = gr.Textbox(lines=2, placeholder="Enter prompts separated by commas...", label="Prompts")
 
 
 
 
214
  modality_dropdown = gr.Dropdown(
215
  choices=list(BIOMEDPARSE_MODES.keys()),
216
  value=list(BIOMEDPARSE_MODES.keys())[0],
@@ -219,28 +243,18 @@ with gr.Blocks() as demo:
219
  submit_btn = gr.Button("Submit")
220
  with gr.Column():
221
  output_gallery = gr.Gallery(label="Findings")
222
- pvalue_output = gr.Textbox(label="Confidence (P-values)", interactive=False)
 
 
 
 
223
 
 
224
  submit_btn.click(
225
- process_image,
226
  inputs=[image_input, prompts_input, modality_dropdown],
227
- outputs=[output_gallery, pvalue_output]
 
228
  )
229
- # with gr.Row():
230
- # gr.Examples(
231
- # fn=process_image,
232
- # examples=IMAGE_PROCESSING_EXAMPLES,
233
- # inputs=[
234
- # image_processing_mode_dropdown_component,
235
- # image_processing_image_input_component,
236
- # image_processing_text_input_component
237
- # ],
238
- # outputs=[
239
- # image_processing_image_output_component,
240
- # image_processing_text_output_component
241
- # ],
242
- # run_on_click=True
243
- # )
244
-
245
- # Launch the app
246
  demo.launch()
 
180
  @torch.inference_mode()
181
  @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
182
  def process_image(image_path, text_prompts, modality):
183
+ try:
184
+ # Input validation
185
+ if not image_path:
186
+ raise ValueError("Please upload an image")
187
+ if not text_prompts or text_prompts.strip() == "":
188
+ raise ValueError("Please enter prompts for analysis")
189
+ if not modality:
190
+ raise ValueError("Please select a modality")
191
+
192
+ image = read_rgb(image_path)
193
+ text_prompts = [prompt.strip() for prompt in text_prompts.split(',')]
194
+
195
+ # Run inference
196
+ pred_masks = interactive_infer_image(model, Image.fromarray(image), text_prompts)
197
+
198
+ # Prepare outputs
199
+ results = []
200
+ p_values = []
201
+
202
+ for i, prompt in enumerate(text_prompts):
203
+ # Calculate p-value for the selected modality
204
+ print("PROMPT: ", prompt, flush=True)
205
+ p_value = check_mask_stats(image, pred_masks[i] * 255, modality, prompt)
206
+ p_values.append(f"P-value for '{prompt}' ({modality}): {p_value:.4f}")
207
 
208
+ # Overlay predictions on the image
209
+ overlay_image = image.copy()
210
+ overlay_image[pred_masks[i] > 0.5] = [255, 0, 0] # Highlight predictions in red
211
+ results.append(overlay_image)
 
212
 
213
+ return results, "\n".join(p_values)
 
 
 
214
 
215
+ except ValueError as ve:
216
+ # Handle validation errors
217
+ return None, f"⚠️ Input Error: {str(ve)}"
218
+ except torch.cuda.OutOfMemoryError:
219
+ # Handle CUDA out of memory errors
220
+ return None, "⚠️ Error: GPU memory exceeded. Please try with a smaller image."
221
+ except Exception as e:
222
+ # Handle all other errors
223
+ error_msg = f"��️ An error occurred: {str(e)}"
224
+ print(f"Error details: {str(e)}", flush=True) # For logging
225
+ return None, error_msg
226
 
227
  # Define Gradio interface
228
  with gr.Blocks() as demo:
 
230
  with gr.Row():
231
  with gr.Column():
232
  image_input = gr.Image(type="filepath", label="Input Image")
233
+ prompts_input = gr.Textbox(
234
+ lines=2,
235
+ placeholder="Enter prompts separated by commas...",
236
+ label="Prompts"
237
+ )
238
  modality_dropdown = gr.Dropdown(
239
  choices=list(BIOMEDPARSE_MODES.keys()),
240
  value=list(BIOMEDPARSE_MODES.keys())[0],
 
243
  submit_btn = gr.Button("Submit")
244
  with gr.Column():
245
  output_gallery = gr.Gallery(label="Findings")
246
+ pvalue_output = gr.Textbox(
247
+ label="Results",
248
+ interactive=False,
249
+ show_label=True
250
+ )
251
 
252
+ # Add error handling for the submit button
253
  submit_btn.click(
254
+ fn=process_image,
255
  inputs=[image_input, prompts_input, modality_dropdown],
256
+ outputs=[output_gallery, pvalue_output],
257
+ api_name="process"
258
  )
259
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  demo.launch()
inference_utils/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (161 Bytes). View file
 
inference_utils/__pycache__/inference.cpython-39.pyc ADDED
Binary file (2.42 kB). View file
 
inference_utils/__pycache__/output_processing.cpython-39.pyc ADDED
Binary file (3.33 kB). View file
 
inference_utils/__pycache__/processing_utils.cpython-39.pyc ADDED
Binary file (3.52 kB). View file
 
modeling/__pycache__/BaseModel.cpython-39.pyc ADDED
Binary file (1.95 kB). View file
 
modeling/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (203 Bytes). View file
 
modeling/architectures/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (317 Bytes). View file
 
modeling/architectures/__pycache__/build.cpython-39.pyc ADDED
Binary file (847 Bytes). View file
 
modeling/architectures/__pycache__/seem_model_demo.cpython-39.pyc ADDED
Binary file (25.4 kB). View file
 
modeling/architectures/__pycache__/seem_model_v0.cpython-39.pyc ADDED
Binary file (33.7 kB). View file
 
modeling/architectures/__pycache__/seem_model_v1.cpython-39.pyc ADDED
Binary file (34.3 kB). View file
 
modeling/architectures/__pycache__/xdecoder_model.cpython-39.pyc ADDED
Binary file (27.7 kB). View file
 
modeling/body/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (492 Bytes). View file
 
modeling/body/__pycache__/build.cpython-39.pyc ADDED
Binary file (609 Bytes). View file
 
modeling/body/__pycache__/xdecoder_head.cpython-39.pyc ADDED
Binary file (4.27 kB). View file
 
modeling/interface/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (546 Bytes). View file
 
modeling/interface/__pycache__/build.cpython-39.pyc ADDED
Binary file (617 Bytes). View file
 
modeling/interface/__pycache__/modules.cpython-39.pyc ADDED
Binary file (6.76 kB). View file
 
modeling/interface/__pycache__/seem_demo.cpython-39.pyc ADDED
Binary file (11.5 kB). View file
 
modeling/interface/__pycache__/seem_v0.cpython-39.pyc ADDED
Binary file (11.1 kB). View file
 
modeling/interface/__pycache__/seem_v1.cpython-39.pyc ADDED
Binary file (12.6 kB). View file
 
modeling/interface/__pycache__/xdecoder.cpython-39.pyc ADDED
Binary file (12.4 kB). View file
 
modeling/interface/prototype/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (174 Bytes). View file
 
modeling/interface/prototype/__pycache__/attention_data_struct_seemdemo.cpython-39.pyc ADDED
Binary file (9.69 kB). View file
 
modeling/interface/prototype/__pycache__/attention_data_struct_seemv0.cpython-39.pyc ADDED
Binary file (9.51 kB). View file
 
modeling/interface/prototype/__pycache__/attention_data_struct_seemv1.cpython-39.pyc ADDED
Binary file (11.5 kB). View file
 
modeling/language/LangEncoder/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (1.23 kB). View file
 
modeling/language/LangEncoder/__pycache__/build.cpython-39.pyc ADDED
Binary file (632 Bytes). View file
 
modeling/language/LangEncoder/__pycache__/transformer.cpython-39.pyc ADDED
Binary file (7.78 kB). View file
 
modeling/language/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (482 Bytes). View file
 
modeling/language/__pycache__/build.cpython-39.pyc ADDED
Binary file (614 Bytes). View file
 
modeling/language/__pycache__/loss.cpython-39.pyc ADDED
Binary file (6.78 kB). View file
 
modeling/language/__pycache__/vlpencoder.cpython-39.pyc ADDED
Binary file (6.42 kB). View file
 
modeling/modules/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (309 Bytes). View file
 
modeling/modules/__pycache__/attention.cpython-39.pyc ADDED
Binary file (15.8 kB). View file
 
modeling/modules/__pycache__/criterion.cpython-39.pyc ADDED
Binary file (24.5 kB). View file
 
modeling/modules/__pycache__/matcher.cpython-39.pyc ADDED
Binary file (16 kB). View file
 
modeling/modules/__pycache__/point_features.cpython-39.pyc ADDED
Binary file (9.79 kB). View file
 
modeling/modules/__pycache__/position_encoding.cpython-39.pyc ADDED
Binary file (2.61 kB). View file
 
modeling/modules/__pycache__/postprocessing.cpython-39.pyc ADDED
Binary file (3.98 kB). View file
 
modeling/utils/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (243 Bytes). View file
 
modeling/utils/__pycache__/attention.cpython-39.pyc ADDED
Binary file (15.9 kB). View file
 
modeling/utils/__pycache__/box_ops.cpython-39.pyc ADDED
Binary file (2.92 kB). View file
 
modeling/utils/__pycache__/config.cpython-39.pyc ADDED
Binary file (4.32 kB). View file
 
modeling/utils/__pycache__/interactive.cpython-39.pyc ADDED
Binary file (1.5 kB). View file
 
modeling/utils/__pycache__/misc.cpython-39.pyc ADDED
Binary file (11.2 kB). View file
 
modeling/vision/backbone/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (552 Bytes). View file
 
modeling/vision/backbone/__pycache__/backbone.cpython-39.pyc ADDED
Binary file (2.1 kB). View file
 
modeling/vision/backbone/__pycache__/build.cpython-39.pyc ADDED
Binary file (624 Bytes). View file