johnlockejrr commited on
Commit
574eb16
Β·
verified Β·
1 Parent(s): a466386

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +9 -14
  2. app.py +257 -0
  3. requirements.txt +9 -0
README.md CHANGED
@@ -1,14 +1,9 @@
1
- ---
2
- title: Medieval-yolo11-seg Streamlit
3
- emoji: πŸ‘
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: streamlit
7
- sdk_version: 1.44.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: Medieval Manuscript YOLO11 Segmentation
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Medieval Manuscript YOLO11 Segmentation
3
+ emoji: πŸ“œ
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ app_file: app.py
8
+ pinned: false
9
+ ---
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Dict, List, Optional
2
+ import streamlit as st
3
+ import supervision as sv
4
+ import numpy as np
5
+ import cv2
6
+ from huggingface_hub import hf_hub_download
7
+ from ultralytics import YOLO
8
+ from PIL import Image
9
+ import torch
10
+
11
+ torch.cuda.is_available = lambda: False # Force CPU-only mode in HF Space
12
+
13
+ # Page config
14
+ st.set_page_config(
15
+ page_title="Medieval Manuscript Segmentation",
16
+ page_icon="πŸ“œ",
17
+ layout="wide"
18
+ )
19
+
20
+ # Define models
21
+ MODEL_OPTIONS = {
22
+ "YOLOv11-Nano": "medieval-yolo11n-seg.pt",
23
+ "YOLOv11-Small": "medieval-yolo11s-seg.pt",
24
+ "YOLOv11-Medium": "medieval-yolo11m-seg.pt",
25
+ "YOLOv11-Large": "medieval-yolo11l-seg.pt",
26
+ "YOLOv11-XLarge": "medieval-yolo11x-seg.pt",
27
+ "YOLOv11-Medium Zones": "medieval_zones-yolo11m-seg.pt",
28
+ "YOLOv11-Medium Lines": "medieval_lines-yolo11m-seg.pt",
29
+ "ms_yolo11m-seg4-YTG": "ms_yolo11m-seg4-YTG.pt",
30
+ "ms_yolo11m-seg5-swin_t": "ms_yolo11m-seg5-swin_t.pt",
31
+ "ms_yolo11x-seg2-swin_t": "ms_yolo11x-seg2-swin_t.pt",
32
+ "ms_yolo11m-seg6-convnext_tiny": "ms_yolo11m-seg6-convnext_tiny.pt",
33
+ "yolo11m-seg-gpt": "yolo11m-seg-gpt.pt",
34
+ "ms_yolo11x-seg3-swin_t-fpn": "ms_yolo11x-seg3-swin_t-fpn.pt",
35
+ "yolo11x-seg-gpt7": "yolo11x-seg-gpt7.pt"
36
+ }
37
+
38
+ @st.cache_resource
39
+ def load_models():
40
+ """Load all models and cache them."""
41
+ models: Dict[str, YOLO] = {}
42
+ for name, model_file in MODEL_OPTIONS.items():
43
+ try:
44
+ model_path = hf_hub_download(
45
+ repo_id="johnlockejrr/medieval-manuscript-yolov11-seg",
46
+ filename=model_file
47
+ )
48
+ models[name] = YOLO(model_path)
49
+ except Exception as e:
50
+ st.warning(f"Error loading model {name}: {str(e)}")
51
+ return models
52
+
53
+ def simplify_polygons(polygons: List[np.ndarray], approx_level: float = 0.01) -> List[Optional[np.ndarray]]:
54
+ """Simplify polygon contours using Douglas-Peucker algorithm.
55
+
56
+ Args:
57
+ polygons: List of polygon contours
58
+ approx_level: Approximation level (0-1), lower values mean more simplification
59
+
60
+ Returns:
61
+ List of simplified polygons (or None for invalid polygons)
62
+ """
63
+ result = []
64
+ for polygon in polygons:
65
+ if len(polygon) < 4:
66
+ result.append(None)
67
+ continue
68
+
69
+ perimeter = cv2.arcLength(polygon, True)
70
+ approx = cv2.approxPolyDP(polygon, approx_level * perimeter, True)
71
+ if len(approx) < 4:
72
+ result.append(None)
73
+ continue
74
+
75
+ result.append(approx.squeeze())
76
+ return result
77
+
78
+ # Custom MaskAnnotator for outline-only masks with simplified polygons
79
+ class OutlineMaskAnnotator:
80
+ def __init__(self, color: tuple = (255, 0, 0), thickness: int = 2, simplify: bool = False):
81
+ self.color = color
82
+ self.thickness = thickness
83
+ self.simplify = simplify
84
+
85
+ def annotate(self, scene: np.ndarray, detections: sv.Detections) -> np.ndarray:
86
+ if detections.mask is None:
87
+ return scene
88
+
89
+ scene = scene.copy()
90
+ for mask in detections.mask:
91
+ contours, _ = cv2.findContours(
92
+ mask.astype(np.uint8),
93
+ cv2.RETR_EXTERNAL,
94
+ cv2.CHAIN_APPROX_SIMPLE
95
+ )
96
+ if self.simplify:
97
+ contours = simplify_polygons(contours)
98
+ contours = [c for c in contours if c is not None]
99
+
100
+ cv2.drawContours(
101
+ scene,
102
+ contours,
103
+ -1,
104
+ self.color,
105
+ self.thickness
106
+ )
107
+ return scene
108
+
109
+ # Create annotators with new settings
110
+ LABEL_ANNOTATOR = sv.LabelAnnotator(
111
+ text_color=sv.Color.BLACK,
112
+ text_scale=0.35,
113
+ text_thickness=1,
114
+ text_padding=2
115
+ )
116
+
117
+ def detect_and_annotate(
118
+ image: np.ndarray,
119
+ model_name: str,
120
+ conf_threshold: float,
121
+ iou_threshold: float,
122
+ simplify_polygons_option: bool
123
+ ) -> np.ndarray:
124
+ # Get the selected model
125
+ model = models[model_name]
126
+
127
+ # Perform inference
128
+ results = model.predict(
129
+ image,
130
+ conf=conf_threshold,
131
+ iou=iou_threshold
132
+ )[0]
133
+
134
+ # Convert results to supervision Detections
135
+ boxes = results.boxes.xyxy.cpu().numpy()
136
+ confidence = results.boxes.conf.cpu().numpy()
137
+ class_ids = results.boxes.cls.cpu().numpy().astype(int)
138
+
139
+ # Handle masks if they exist
140
+ masks = None
141
+ if results.masks is not None:
142
+ masks = results.masks.data.cpu().numpy()
143
+ # Convert from (N,H,W) to (H,W,N) for processing
144
+ masks = np.transpose(masks, (1, 2, 0))
145
+ h, w = image.shape[:2]
146
+ resized_masks = []
147
+ for i in range(masks.shape[-1]):
148
+ resized_mask = cv2.resize(masks[..., i], (w, h), interpolation=cv2.INTER_LINEAR)
149
+ resized_masks.append(resized_mask > 0.5)
150
+ masks = np.stack(resized_masks) if resized_masks else None
151
+
152
+ # Create Detections object
153
+ detections = sv.Detections(
154
+ xyxy=boxes,
155
+ confidence=confidence,
156
+ class_id=class_ids,
157
+ mask=masks
158
+ )
159
+
160
+ # Create labels with confidence scores
161
+ labels = [
162
+ f"{results.names[class_id]} ({conf:.2f})"
163
+ for class_id, conf
164
+ in zip(class_ids, confidence)
165
+ ]
166
+
167
+ # Create mask annotator based on the simplify option
168
+ mask_annotator = OutlineMaskAnnotator(
169
+ color=(255, 0, 0),
170
+ thickness=2,
171
+ simplify=simplify_polygons_option
172
+ )
173
+
174
+ # Annotate image
175
+ annotated_image = image.copy()
176
+ if masks is not None:
177
+ annotated_image = mask_annotator.annotate(scene=annotated_image, detections=detections)
178
+ annotated_image = LABEL_ANNOTATOR.annotate(scene=annotated_image, detections=detections, labels=labels)
179
+
180
+ return annotated_image
181
+
182
+ # Load models
183
+ models = load_models()
184
+
185
+ # App title
186
+ st.title("Medieval Manuscript Segmentation with YOLO")
187
+
188
+ # Sidebar for controls
189
+ with st.sidebar:
190
+ st.header("Detection Settings")
191
+
192
+ model_name = st.selectbox(
193
+ "Model",
194
+ options=list(MODEL_OPTIONS.keys()),
195
+ index=0,
196
+ help="Select YOLO model variant"
197
+ )
198
+
199
+ conf_threshold = st.slider(
200
+ "Confidence Threshold",
201
+ min_value=0.0,
202
+ max_value=1.0,
203
+ value=0.25,
204
+ step=0.05,
205
+ help="Minimum confidence score for detections"
206
+ )
207
+
208
+ iou_threshold = st.slider(
209
+ "IoU Threshold",
210
+ min_value=0.0,
211
+ max_value=1.0,
212
+ value=0.45,
213
+ step=0.05,
214
+ help="Decrease for stricter detection, increase for more overlapping masks"
215
+ )
216
+
217
+ simplify_polygons_option = st.checkbox(
218
+ "Simplify Polygons",
219
+ value=False,
220
+ help="Simplify polygon contours for cleaner outlines"
221
+ )
222
+
223
+ # Main content area
224
+ col1, col2 = st.columns(2)
225
+
226
+ with col1:
227
+ st.subheader("Input Image")
228
+ uploaded_file = st.file_uploader(
229
+ "Upload an image",
230
+ type=["jpg", "jpeg", "png"],
231
+ key="file_uploader"
232
+ )
233
+
234
+ if uploaded_file is not None:
235
+ image = np.array(Image.open(uploaded_file))
236
+ st.image(image, caption="Uploaded Image", use_column_width=True)
237
+ else:
238
+ image = None
239
+ st.info("Please upload an image file")
240
+
241
+ with col2:
242
+ st.subheader("Detection Result")
243
+
244
+ if st.button("Detect", type="primary") and image is not None:
245
+ with st.spinner("Processing image..."):
246
+ annotated_image = detect_and_annotate(
247
+ image,
248
+ model_name,
249
+ conf_threshold,
250
+ iou_threshold,
251
+ simplify_polygons_option
252
+ )
253
+ st.image(annotated_image, caption="Detection Result", use_column_width=True)
254
+ elif image is None:
255
+ st.warning("Please upload an image first")
256
+ else:
257
+ st.info("Click the Detect button to process the image")
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ supervision
3
+ huggingface-hub
4
+ ultralytics
5
+ opencv-python
6
+ numpy
7
+ Pillow
8
+ torch
9
+ torchvision