kernel-luso-comfort commited on
Commit
7320170
·
1 Parent(s): 200a1a4

Add images for ontology hierarchy and modality types; update demo description

Browse files
assets/gpt4_ontology_hierarchy.png ADDED

Git LFS Details

  • SHA256: b6be0580826da27faf4048f4f7d84b031b07176788e73255fd011036e52aae6d
  • Pointer size: 131 Bytes
  • Size of remote file: 521 kB
assets/modality_bar_plot.png ADDED

Git LFS Details

  • SHA256: 897b02c5c5b44422a9d8a4eb6f8d27a194b68123e96db790441eefe0a115a139
  • Pointer size: 130 Bytes
  • Size of remote file: 80.7 kB
main.py CHANGED
@@ -25,6 +25,9 @@ from utilities.constants import BIOMED_CLASSES
25
  from inference_utils.inference import interactive_infer_image
26
 
27
 
 
 
 
28
  def overlay_masks(image, masks, colors):
29
  overlay = image.copy()
30
  overlay = np.array(overlay, dtype=np.uint8)
@@ -76,7 +79,6 @@ def predict(image, prompts):
76
 
77
  # Get predictions
78
  pred_mask = interactive_infer_image(model, image, prompts)
79
- print(f"Predicted masks: {pred_mask}")
80
 
81
  # Generate visualization
82
  colors = generate_colors(len(prompts))
@@ -87,6 +89,18 @@ def predict(image, prompts):
87
  return pred_overlay
88
 
89
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  def run():
91
  global model
92
  model = init_model()
@@ -102,8 +116,8 @@ def run():
102
  ],
103
  outputs=gr.Image(type="pil", label="Prediction"),
104
  title="BiomedParse Demo",
105
- description="Upload a biomedical image and enter prompts (separated by commas) to detect specific features.",
106
- allow_flagging=False,
107
  examples=[
108
  ["examples/144DME_as_F.jpeg", "edema"],
109
  ["examples/C3_EndoCV2021_00462.jpg", "polyp"],
 
25
  from inference_utils.inference import interactive_infer_image
26
 
27
 
28
+ gr.set_static_paths(["assets"])
29
+
30
+
31
  def overlay_masks(image, masks, colors):
32
  overlay = image.copy()
33
  overlay = np.array(overlay, dtype=np.uint8)
 
79
 
80
  # Get predictions
81
  pred_mask = interactive_infer_image(model, image, prompts)
 
82
 
83
  # Generate visualization
84
  colors = generate_colors(len(prompts))
 
89
  return pred_overlay
90
 
91
 
92
+ description = """Upload a biomedical image and enter prompts (separated by commas) to detect specific features.
93
+
94
+ The model understands these prompts:
95
+ ![gpt4_ontology_hierarchy.png](file/assets/gpt4_ontology_hierarchy.png)
96
+
97
+ The model understands these types of biomedical images:
98
+ ![modality_bar_plot.png](file/assets/modality_bar_plot.png)
99
+
100
+ Above figures are from the [BiomedParse paper](https://arxiv.org/abs/2405.12971).
101
+ """
102
+
103
+
104
  def run():
105
  global model
106
  model = init_model()
 
116
  ],
117
  outputs=gr.Image(type="pil", label="Prediction"),
118
  title="BiomedParse Demo",
119
+ description=description,
120
+ allow_flagging="never",
121
  examples=[
122
  ["examples/144DME_as_F.jpeg", "edema"],
123
  ["examples/C3_EndoCV2021_00462.jpg", "polyp"],