alanbarret commited on
Commit
87a0be7
·
1 Parent(s): b415fbe

Implement initial project structure and setup

Browse files
Files changed (4) hide show
  1. .gradio/certificate.pem +31 -0
  2. app.py +194 -0
  3. models/rugai_m_v2.pt +3 -0
  4. requirements.txt +6 -0
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
app.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+ from sklearn.cluster import DBSCAN
7
+
8
+ # Load the YOLO model
9
+ model = YOLO('models/rugai_m_v2.pt')
10
+
11
+ def remove_overlapping_boxes(boxes, iou_threshold=0.3):
12
+ """Remove overlapping boxes using IoU threshold."""
13
+ if not boxes:
14
+ return []
15
+
16
+ # Convert boxes to numpy array
17
+ boxes = np.array(boxes)
18
+
19
+ # Calculate areas
20
+ areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
21
+
22
+ # Sort by area (largest first)
23
+ indices = np.argsort(areas)[::-1]
24
+
25
+ keep = []
26
+ while indices.size > 0:
27
+ i = indices[0]
28
+ keep.append(i)
29
+
30
+ # Calculate IoU with remaining boxes
31
+ xx1 = np.maximum(boxes[i, 0], boxes[indices[1:], 0])
32
+ yy1 = np.maximum(boxes[i, 1], boxes[indices[1:], 1])
33
+ xx2 = np.minimum(boxes[i, 2], boxes[indices[1:], 2])
34
+ yy2 = np.minimum(boxes[i, 3], boxes[indices[1:], 3])
35
+
36
+ w = np.maximum(0, xx2 - xx1)
37
+ h = np.maximum(0, yy2 - yy1)
38
+ overlap = (w * h) / areas[indices[1:]]
39
+
40
+ # Keep boxes with IoU less than threshold
41
+ indices = indices[1:][overlap < iou_threshold]
42
+
43
+ return keep
44
+
45
+ def process_image(image, show_boxes=True):
46
+ # Convert PIL Image to numpy array if needed
47
+ if isinstance(image, Image.Image):
48
+ image = np.array(image)
49
+
50
+ # Run inference with specific parameters
51
+ results = model.predict(image, imgsz=320, conf=0.25, iou=0.9)[0]
52
+
53
+ # Lists to store center points of knots
54
+ centers_x = []
55
+ centers_y = []
56
+
57
+ # Process each result and extract boxes
58
+ boxes = [] # Store all boxes and their centers
59
+ height, width = image.shape[:2]
60
+
61
+ for box in results.boxes:
62
+ x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
63
+ x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
64
+
65
+ # Calculate box center
66
+ center_x = (x1 + x2) // 2
67
+ center_y = (y1 + y2) // 2
68
+ boxes.append({
69
+ 'coords': (x1, y1, x2, y2),
70
+ 'center': (center_x, center_y)
71
+ })
72
+ centers_x.append(center_x)
73
+ centers_y.append(center_y)
74
+
75
+ # Remove overlapping boxes
76
+ if boxes:
77
+ box_coords = [box['coords'] for box in boxes]
78
+ keep_indices = remove_overlapping_boxes(box_coords, iou_threshold=0.3)
79
+ boxes = [boxes[i] for i in keep_indices]
80
+ centers_x = [centers_x[i] for i in keep_indices]
81
+ centers_y = [centers_y[i] for i in keep_indices]
82
+
83
+ # Sort centers
84
+ centers_y.sort()
85
+ centers_x.sort()
86
+
87
+ # Set tolerances based on average knot size
88
+ if len(boxes) > 0:
89
+ avg_width = sum((b['coords'][2] - b['coords'][0]) for b in boxes) / len(boxes)
90
+ avg_height = sum((b['coords'][3] - b['coords'][1]) for b in boxes) / len(boxes)
91
+ x_tolerance = int(avg_width * 0.22)
92
+ y_tolerance = int(avg_height * 0.22)
93
+ else:
94
+ x_tolerance = y_tolerance = 5
95
+
96
+ # Find representative points for rows and columns using DBSCAN
97
+ rows = []
98
+ cols = []
99
+
100
+ # Cluster y-coordinates into rows
101
+ if centers_y:
102
+ y_centers = np.array(centers_y).reshape(-1, 1)
103
+ y_clustering = DBSCAN(eps=y_tolerance, min_samples=2, metric='euclidean').fit(y_centers)
104
+ unique_labels = np.unique(y_clustering.labels_)
105
+ for label in unique_labels:
106
+ if label != -1: # Skip noise points
107
+ cluster_points = y_centers[y_clustering.labels_ == label]
108
+ rows.append(int(np.mean(cluster_points)))
109
+
110
+ # Cluster x-coordinates into columns
111
+ if centers_x:
112
+ x_centers = np.array(centers_x).reshape(-1, 1)
113
+ x_clustering = DBSCAN(eps=x_tolerance, min_samples=2, metric='euclidean').fit(x_centers)
114
+ unique_labels = np.unique(x_clustering.labels_)
115
+ for label in unique_labels:
116
+ if label != -1: # Skip noise points
117
+ cluster_points = x_centers[x_clustering.labels_ == label]
118
+ cols.append(int(np.mean(cluster_points)))
119
+
120
+ # Sort rows and columns
121
+ rows.sort()
122
+ cols.sort()
123
+
124
+ # Calculate total knots
125
+ total_knots = len(rows) * len(cols)
126
+
127
+ # Add padding for measurements
128
+ padding = 100
129
+ padded_img = np.full((height + 2*padding, width + 2*padding, 3), 255, dtype=np.uint8)
130
+ padded_img[padding:padding+height, padding:padding+width] = image
131
+
132
+ # Draw boxes if requested
133
+ if show_boxes:
134
+ for box in boxes:
135
+ x1, y1, x2, y2 = box['coords']
136
+ cv2.rectangle(padded_img,
137
+ (x1 + padding, y1 + padding),
138
+ (x2 + padding, y2 + padding),
139
+ (0, 255, 0), 2)
140
+
141
+ # Draw measurement lines and labels
142
+ cv2.line(padded_img, (padding, padding//2), (width+padding, padding//2), (0, 0, 0), 2)
143
+ cv2.putText(padded_img, f"{len(cols)} knots",
144
+ (padding + width//2 - 100, padding//2 - 10),
145
+ cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 0), 2)
146
+
147
+ cv2.line(padded_img, (width+padding+padding//2, padding), (width+padding+padding//2, height+padding), (0, 0, 0), 2)
148
+ cv2.putText(padded_img, f"{len(rows)} knots",
149
+ (width+padding+padding//2 + 10, padding + height//2),
150
+ cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 0), 2)
151
+
152
+ # Add total knot count and density
153
+ cv2.putText(padded_img, f"{int(total_knots)} Total Knots",
154
+ (padding + width//2 - 100, height + padding + padding//2),
155
+ cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 0), 2)
156
+
157
+ # Calculate area in cm² (assuming 1 pixel = 0.0264 cm)
158
+ area_cm2 = (width * height * 0.0264 * 0.0264)
159
+ density = total_knots / area_cm2 if area_cm2 > 0 else 0
160
+
161
+ cv2.putText(padded_img, f"{int(total_knots)} knots/sqcm",
162
+ (padding + width//2 - 100, height + padding + padding//2 + 30),
163
+ cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 0), 2)
164
+
165
+ # Prepare detection information
166
+ detection_info += f"Rows: {len(rows)}\n"
167
+ detection_info += f"Columns: {len(cols)}\n"
168
+ detection_info += f"Density: {int(total_knots)} knots/cm²"
169
+
170
+ return padded_img, detection_info
171
+
172
+ # Create Gradio interface
173
+ with gr.Blocks(title="Rug Knot Detector") as demo:
174
+ gr.Markdown("# 🧶 Rug Knot Detector")
175
+ gr.Markdown("Upload an image of a rug to detect and analyze knots using our custom YOLO model.")
176
+
177
+ with gr.Row():
178
+ with gr.Column():
179
+ input_image = gr.Image(type="pil", label="Upload Rug Image")
180
+ show_boxes = gr.Checkbox(label="Show Detection Boxes", value=True)
181
+ detect_btn = gr.Button("Detect Knots")
182
+
183
+ with gr.Column():
184
+ output_image = gr.Image(label="Detection Results")
185
+ output_text = gr.Textbox(label="Detection Information", lines=5)
186
+
187
+ detect_btn.click(
188
+ fn=process_image,
189
+ inputs=[input_image, show_boxes],
190
+ outputs=[output_image, output_text]
191
+ )
192
+
193
+ if __name__ == "__main__":
194
+ demo.launch(share=True)
models/rugai_m_v2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47f692017990db9883779d548895ab5a002cc4a33fbf477edbe28e864e370612
3
+ size 40519845
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ ultralytics==8.3.72
3
+ ultralytics-thop==2.0.14
4
+ Pillow
5
+ numpy
6
+ scikit-learn