Update app.py
Browse files
app.py
CHANGED
@@ -4,12 +4,12 @@
|
|
4 |
import gradio as gr
|
5 |
import cv2
|
6 |
import numpy as np
|
7 |
-
from PIL import Image
|
8 |
-
import mediapipe as mp
|
9 |
import os
|
10 |
import json
|
11 |
import pandas as pd
|
12 |
from datetime import datetime
|
|
|
|
|
13 |
|
14 |
# Setup folders
|
15 |
os.makedirs("pose_images", exist_ok=True)
|
@@ -28,30 +28,11 @@ pose_model = mp_pose.Pose(static_image_mode=True, model_complexity=2)
|
|
28 |
mp_drawing = mp.solutions.drawing_utils
|
29 |
mp_styles = mp.solutions.drawing_styles
|
30 |
|
31 |
-
# Define
|
32 |
-
POSE_CONNECTIONS = [
|
33 |
-
# Face
|
34 |
-
(0, 1), (1, 2), (2, 3), (3, 7), (0, 4), (4, 5), (5, 6), (6, 8),
|
35 |
-
# Torso
|
36 |
-
(9, 10), (11, 12), (11, 13), (13, 15), (15, 17), (15, 19), (15, 21),
|
37 |
-
(12, 14), (14, 16), (16, 18), (16, 20), (16, 22), (11, 23), (12, 24),
|
38 |
-
(23, 24),
|
39 |
-
# Left arm
|
40 |
-
(11, 13), (13, 15), (15, 17), (17, 19), (19, 21),
|
41 |
-
# Right arm
|
42 |
-
(12, 14), (14, 16), (16, 18), (18, 20), (20, 22),
|
43 |
-
# Left leg
|
44 |
-
(23, 25), (25, 27), (27, 29), (29, 31), (27, 31),
|
45 |
-
# Right leg
|
46 |
-
(24, 26), (26, 28), (28, 30), (30, 32), (28, 32)
|
47 |
-
]
|
48 |
-
|
49 |
def create_pose_graph_data(pose_landmarks):
|
50 |
-
"""Create nodes and edges data structure from pose landmarks"""
|
51 |
nodes = {}
|
52 |
edges = []
|
53 |
-
|
54 |
-
# Create nodes
|
55 |
for idx, lm in enumerate(pose_landmarks.landmark):
|
56 |
name = mp_pose.PoseLandmark(idx).name
|
57 |
nodes[idx] = {
|
@@ -62,11 +43,9 @@ def create_pose_graph_data(pose_landmarks):
|
|
62 |
"z": round(lm.z, 4),
|
63 |
"visibility": round(lm.visibility, 3)
|
64 |
}
|
65 |
-
|
66 |
-
# Create edges based on MediaPipe connections
|
67 |
for connection in mp_pose.POSE_CONNECTIONS:
|
68 |
-
start_idx = connection
|
69 |
-
end_idx = connection[1]
|
70 |
if start_idx < len(pose_landmarks.landmark) and end_idx < len(pose_landmarks.landmark):
|
71 |
edges.append({
|
72 |
"from": start_idx,
|
@@ -74,27 +53,23 @@ def create_pose_graph_data(pose_landmarks):
|
|
74 |
"from_name": mp_pose.PoseLandmark(start_idx).name,
|
75 |
"to_name": mp_pose.PoseLandmark(end_idx).name
|
76 |
})
|
77 |
-
|
78 |
return nodes, edges
|
79 |
|
|
|
80 |
def process_pose(image, pose_description=""):
|
81 |
-
"""Process pose image and return overlay with pose data"""
|
82 |
if image is None:
|
83 |
return None, "β Please upload an image.", "", None
|
84 |
-
|
85 |
-
# Timestamp-based ID
|
86 |
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
87 |
pose_id = f"pose_{ts}"
|
88 |
-
|
89 |
-
# Convert to OpenCV
|
90 |
img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
91 |
-
|
92 |
-
# Detect pose
|
93 |
results = pose_model.process(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB))
|
|
|
94 |
if not results.pose_landmarks:
|
95 |
return None, "β No pose detected.", "", None
|
96 |
-
|
97 |
-
# Draw overlay
|
98 |
overlay = img_bgr.copy()
|
99 |
mp_drawing.draw_landmarks(
|
100 |
overlay,
|
@@ -102,18 +77,12 @@ def process_pose(image, pose_description=""):
|
|
102 |
mp_pose.POSE_CONNECTIONS,
|
103 |
landmark_drawing_spec=mp_styles.get_default_pose_landmarks_style()
|
104 |
)
|
105 |
-
|
106 |
-
# Convert back to RGB for display
|
107 |
overlay_rgb = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
|
108 |
-
|
109 |
-
# Save overlay image
|
110 |
overlay_path = f"pose_images/{pose_id}.png"
|
111 |
cv2.imwrite(overlay_path, overlay)
|
112 |
-
|
113 |
-
# Create graph data structure
|
114 |
nodes, edges = create_pose_graph_data(results.pose_landmarks)
|
115 |
-
|
116 |
-
# Create pose data summary
|
117 |
pose_data = {
|
118 |
"pose_id": pose_id,
|
119 |
"total_nodes": len(nodes),
|
@@ -122,8 +91,7 @@ def process_pose(image, pose_description=""):
|
|
122 |
"edges": edges,
|
123 |
"description": pose_description if pose_description else "No description provided"
|
124 |
}
|
125 |
-
|
126 |
-
# Save to JSON dataset
|
127 |
pose_dataset[pose_id] = {
|
128 |
"pose_name": pose_id,
|
129 |
"image_path": overlay_path,
|
@@ -131,242 +99,141 @@ def process_pose(image, pose_description=""):
|
|
131 |
"pose_data": pose_data,
|
132 |
"timestamp": ts
|
133 |
}
|
134 |
-
|
135 |
with open(json_path, "w") as f:
|
136 |
json.dump(pose_dataset, f, indent=2)
|
137 |
-
|
138 |
-
|
139 |
-
data_display = f"""
|
140 |
-
π― **Pose Analysis Results**
|
141 |
|
142 |
π **Graph Structure:**
|
143 |
-
- Total Nodes
|
144 |
-
- Total Edges
|
145 |
|
146 |
-
π **Pose Description:** {pose_description
|
147 |
|
148 |
π **Key Nodes (First 10):**
|
149 |
"""
|
150 |
-
|
151 |
for i, (idx, node) in enumerate(list(nodes.items())[:10]):
|
152 |
-
data_display += f"β’ {node['name']}: ({node['x']
|
153 |
-
|
154 |
if len(nodes) > 10:
|
155 |
data_display += f"... and {len(nodes) - 10} more nodes\n"
|
156 |
-
|
157 |
-
data_display +=
|
158 |
-
|
159 |
-
"""
|
160 |
-
|
161 |
-
for i, edge in enumerate(edges[:5]):
|
162 |
data_display += f"β’ {edge['from_name']} β {edge['to_name']}\n"
|
163 |
-
|
164 |
if len(edges) > 5:
|
165 |
data_display += f"... and {len(edges) - 5} more connections\n"
|
166 |
-
|
167 |
-
data_display += f""
|
168 |
-
|
169 |
-
- Image: {overlay_path}
|
170 |
-
- JSON: {json_path}
|
171 |
-
- Pose ID: {pose_id}
|
172 |
-
"""
|
173 |
-
|
174 |
return overlay_rgb, data_display, f"β
Pose '{pose_id}' saved successfully!", None
|
175 |
|
|
|
176 |
def save_with_description(image, description):
|
177 |
-
"""Save pose with description"""
|
178 |
if image is None:
|
179 |
return None, "β Please upload an image first.", "β No image to process", None
|
180 |
-
|
181 |
overlay_img, data_display, status, _ = process_pose(image, description)
|
182 |
csv_file = create_csv_download()
|
183 |
return overlay_img, data_display, status, csv_file
|
184 |
|
|
|
185 |
def create_csv_download():
|
186 |
-
"""Create CSV file from pose dataset for download"""
|
187 |
if not pose_dataset:
|
188 |
return None
|
189 |
-
|
190 |
-
# Prepare data for CSV
|
191 |
csv_data = []
|
192 |
-
|
193 |
for pose_id, pose_info in pose_dataset.items():
|
194 |
-
pose_data = pose_info.get(
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
'data_type': 'node',
|
205 |
-
'element_id': node_id,
|
206 |
-
'element_name': node_info.get('name', ''),
|
207 |
-
'x_coordinate': node_info.get('x', ''),
|
208 |
-
'y_coordinate': node_info.get('y', ''),
|
209 |
-
'z_coordinate': node_info.get('z', ''),
|
210 |
-
'visibility': node_info.get('visibility', ''),
|
211 |
-
'connection_from': '',
|
212 |
-
'connection_to': '',
|
213 |
-
'connection_from_name': '',
|
214 |
-
'connection_to_name': ''
|
215 |
-
})
|
216 |
-
|
217 |
-
# Add edges data
|
218 |
-
for edge in edges:
|
219 |
-
csv_data.append({
|
220 |
-
'pose_id': pose_id,
|
221 |
-
'pose_description': pose_info.get('pose_description', ''),
|
222 |
-
'timestamp': pose_info.get('timestamp', ''),
|
223 |
-
'data_type': 'edge',
|
224 |
-
'element_id': f"{edge['from']}-{edge['to']}",
|
225 |
-
'element_name': f"{edge['from_name']}_to_{edge['to_name']}",
|
226 |
-
'x_coordinate': '',
|
227 |
-
'y_coordinate': '',
|
228 |
-
'z_coordinate': '',
|
229 |
-
'visibility': '',
|
230 |
-
'connection_from': edge['from'],
|
231 |
-
'connection_to': edge['to'],
|
232 |
-
'connection_from_name': edge['from_name'],
|
233 |
-
'connection_to_name': edge['to_name']
|
234 |
-
})
|
235 |
-
|
236 |
-
# Create DataFrame and save as CSV
|
237 |
df = pd.DataFrame(csv_data)
|
238 |
-
csv_filename = f"
|
239 |
df.to_csv(csv_filename, index=False)
|
240 |
-
|
241 |
return csv_filename
|
242 |
|
|
|
243 |
def export_current_pose_csv(image, description=""):
|
244 |
-
"""Export current pose analysis as CSV"""
|
245 |
if image is None:
|
246 |
return None, "β Please upload an image first."
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
# Create CSV for current pose only
|
252 |
if pose_dataset:
|
253 |
latest_pose_id = max(pose_dataset.keys())
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
# Add nodes data
|
264 |
-
for node_id, node_info in nodes.items():
|
265 |
-
csv_data.append({
|
266 |
-
'pose_id': latest_pose_id,
|
267 |
-
'pose_description': pose_info.get('pose_description', ''),
|
268 |
-
'timestamp': pose_info.get('timestamp', ''),
|
269 |
-
'data_type': 'node',
|
270 |
-
'element_id': node_id,
|
271 |
-
'element_name': node_info.get('name', ''),
|
272 |
-
'x_coordinate': node_info.get('x', ''),
|
273 |
-
'y_coordinate': node_info.get('y', ''),
|
274 |
-
'z_coordinate': node_info.get('z', ''),
|
275 |
-
'visibility': node_info.get('visibility', ''),
|
276 |
-
'connection_from': '',
|
277 |
-
'connection_to': '',
|
278 |
-
'connection_from_name': '',
|
279 |
-
'connection_to_name': ''
|
280 |
-
})
|
281 |
-
|
282 |
-
# Add edges data
|
283 |
-
for edge in edges:
|
284 |
-
csv_data.append({
|
285 |
-
'pose_id': latest_pose_id,
|
286 |
-
'pose_description': pose_info.get('pose_description', ''),
|
287 |
-
'timestamp': pose_info.get('timestamp', ''),
|
288 |
-
'data_type': 'edge',
|
289 |
-
'element_id': f"{edge['from']}-{edge['to']}",
|
290 |
-
'element_name': f"{edge['from_name']}_to_{edge['to_name']}",
|
291 |
-
'x_coordinate': '',
|
292 |
-
'y_coordinate': '',
|
293 |
-
'z_coordinate': '',
|
294 |
-
'visibility': '',
|
295 |
-
'connection_from': edge['from'],
|
296 |
-
'connection_to': edge['to'],
|
297 |
-
'connection_from_name': edge['from_name'],
|
298 |
-
'connection_to_name': edge['to_name']
|
299 |
-
})
|
300 |
-
|
301 |
-
# Create DataFrame and save as CSV
|
302 |
df = pd.DataFrame(csv_data)
|
303 |
csv_filename = f"current_pose_{latest_pose_id}.csv"
|
304 |
df.to_csv(csv_filename, index=False)
|
305 |
-
|
306 |
return csv_filename, f"β
Current pose exported as {csv_filename}"
|
307 |
-
|
308 |
return None, "β No pose data to export"
|
309 |
|
310 |
-
#
|
311 |
-
with gr.Blocks(title="π§
|
312 |
-
gr.Markdown("# π§
|
313 |
-
gr.Markdown("Upload a
|
314 |
-
|
315 |
with gr.Row():
|
316 |
with gr.Column(scale=1):
|
317 |
-
|
318 |
-
gr.Markdown("## π€ Input")
|
319 |
input_image = gr.Image(type="numpy", label="Upload Pose Image")
|
320 |
pose_description = gr.Textbox(
|
321 |
label="Pose Description",
|
322 |
-
placeholder="
|
323 |
lines=3
|
324 |
)
|
325 |
-
|
326 |
with gr.Row():
|
327 |
analyze_btn = gr.Button("π Analyze Pose", variant="primary")
|
328 |
save_btn = gr.Button("πΎ Save with Description", variant="secondary")
|
329 |
-
|
330 |
-
# CSV Download buttons
|
331 |
gr.Markdown("### π₯ Download Options")
|
332 |
with gr.Row():
|
333 |
download_current_btn = gr.Button("π Download Current Pose CSV", variant="secondary")
|
334 |
download_all_btn = gr.Button("π Download All Poses CSV", variant="secondary")
|
335 |
-
|
336 |
-
# Download file outputs
|
337 |
current_csv_download = gr.File(label="Current Pose CSV", visible=False)
|
338 |
all_csv_download = gr.File(label="All Poses CSV", visible=False)
|
339 |
download_status = gr.Textbox(label="Download Status", visible=False)
|
340 |
-
|
341 |
with gr.Column(scale=1):
|
342 |
-
# Output section
|
343 |
gr.Markdown("## π Results")
|
344 |
-
output_image = gr.Image(label="Pose with
|
345 |
status_text = gr.Textbox(label="Status", lines=1)
|
346 |
-
|
347 |
-
|
348 |
-
gr.
|
349 |
-
|
350 |
-
|
351 |
-
lines=15,
|
352 |
-
max_lines=20,
|
353 |
-
show_copy_button=True
|
354 |
-
)
|
355 |
-
|
356 |
-
# Button actions
|
357 |
analyze_btn.click(
|
358 |
fn=lambda img: process_pose(img, ""),
|
359 |
inputs=[input_image],
|
360 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
361 |
)
|
362 |
-
|
363 |
save_btn.click(
|
364 |
fn=save_with_description,
|
365 |
inputs=[input_image, pose_description],
|
366 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
367 |
)
|
368 |
-
|
369 |
-
# CSV Download actions
|
370 |
download_current_btn.click(
|
371 |
fn=export_current_pose_csv,
|
372 |
inputs=[input_image, pose_description],
|
@@ -378,7 +245,7 @@ with gr.Blocks(title="π§ Advanced Pose Analysis Tool") as demo:
|
|
378 |
fn=lambda: gr.update(visible=True),
|
379 |
outputs=[download_status]
|
380 |
)
|
381 |
-
|
382 |
download_all_btn.click(
|
383 |
fn=create_csv_download,
|
384 |
outputs=[all_csv_download]
|
@@ -386,39 +253,25 @@ with gr.Blocks(title="π§ Advanced Pose Analysis Tool") as demo:
|
|
386 |
fn=lambda: gr.update(visible=True),
|
387 |
outputs=[all_csv_download]
|
388 |
)
|
389 |
-
|
390 |
-
# Auto-analyze on image upload
|
391 |
input_image.change(
|
392 |
fn=lambda img: process_pose(img, ""),
|
393 |
inputs=[input_image],
|
394 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
395 |
)
|
396 |
-
|
397 |
gr.Markdown("""
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
## π CSV Format:
|
413 |
-
The CSV file contains columns:
|
414 |
-
- `pose_id`: Unique identifier for each pose
|
415 |
-
- `pose_description`: Your description of the pose
|
416 |
-
- `data_type`: 'node' for keypoints, 'edge' for connections
|
417 |
-
- `element_name`: Name of the body part or connection
|
418 |
-
- `x_coordinate`, `y_coordinate`, `z_coordinate`: 3D position
|
419 |
-
- `visibility`: Detection confidence (for nodes)
|
420 |
-
- `connection_from/to`: Start/end points (for edges)
|
421 |
-
""")
|
422 |
-
|
423 |
-
# Launch the interface
|
424 |
-
demo.launch(share=True)
|
|
|
4 |
import gradio as gr
|
5 |
import cv2
|
6 |
import numpy as np
|
|
|
|
|
7 |
import os
|
8 |
import json
|
9 |
import pandas as pd
|
10 |
from datetime import datetime
|
11 |
+
from PIL import Image
|
12 |
+
import mediapipe as mp
|
13 |
|
14 |
# Setup folders
|
15 |
os.makedirs("pose_images", exist_ok=True)
|
|
|
28 |
mp_drawing = mp.solutions.drawing_utils
|
29 |
mp_styles = mp.solutions.drawing_styles
|
30 |
|
31 |
+
# Define function to extract nodes and edges
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def create_pose_graph_data(pose_landmarks):
|
|
|
33 |
nodes = {}
|
34 |
edges = []
|
35 |
+
|
|
|
36 |
for idx, lm in enumerate(pose_landmarks.landmark):
|
37 |
name = mp_pose.PoseLandmark(idx).name
|
38 |
nodes[idx] = {
|
|
|
43 |
"z": round(lm.z, 4),
|
44 |
"visibility": round(lm.visibility, 3)
|
45 |
}
|
46 |
+
|
|
|
47 |
for connection in mp_pose.POSE_CONNECTIONS:
|
48 |
+
start_idx, end_idx = connection
|
|
|
49 |
if start_idx < len(pose_landmarks.landmark) and end_idx < len(pose_landmarks.landmark):
|
50 |
edges.append({
|
51 |
"from": start_idx,
|
|
|
53 |
"from_name": mp_pose.PoseLandmark(start_idx).name,
|
54 |
"to_name": mp_pose.PoseLandmark(end_idx).name
|
55 |
})
|
56 |
+
|
57 |
return nodes, edges
|
58 |
|
59 |
+
# Main pose processing function
|
60 |
def process_pose(image, pose_description=""):
|
|
|
61 |
if image is None:
|
62 |
return None, "β Please upload an image.", "", None
|
63 |
+
|
|
|
64 |
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
65 |
pose_id = f"pose_{ts}"
|
66 |
+
|
|
|
67 |
img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
|
|
|
|
68 |
results = pose_model.process(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB))
|
69 |
+
|
70 |
if not results.pose_landmarks:
|
71 |
return None, "β No pose detected.", "", None
|
72 |
+
|
|
|
73 |
overlay = img_bgr.copy()
|
74 |
mp_drawing.draw_landmarks(
|
75 |
overlay,
|
|
|
77 |
mp_pose.POSE_CONNECTIONS,
|
78 |
landmark_drawing_spec=mp_styles.get_default_pose_landmarks_style()
|
79 |
)
|
|
|
|
|
80 |
overlay_rgb = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
|
|
|
|
|
81 |
overlay_path = f"pose_images/{pose_id}.png"
|
82 |
cv2.imwrite(overlay_path, overlay)
|
83 |
+
|
|
|
84 |
nodes, edges = create_pose_graph_data(results.pose_landmarks)
|
85 |
+
|
|
|
86 |
pose_data = {
|
87 |
"pose_id": pose_id,
|
88 |
"total_nodes": len(nodes),
|
|
|
91 |
"edges": edges,
|
92 |
"description": pose_description if pose_description else "No description provided"
|
93 |
}
|
94 |
+
|
|
|
95 |
pose_dataset[pose_id] = {
|
96 |
"pose_name": pose_id,
|
97 |
"image_path": overlay_path,
|
|
|
99 |
"pose_data": pose_data,
|
100 |
"timestamp": ts
|
101 |
}
|
102 |
+
|
103 |
with open(json_path, "w") as f:
|
104 |
json.dump(pose_dataset, f, indent=2)
|
105 |
+
|
106 |
+
data_display = f"""π― **Pose Analysis Results**
|
|
|
|
|
107 |
|
108 |
π **Graph Structure:**
|
109 |
+
- Total Nodes: {len(nodes)}
|
110 |
+
- Total Edges: {len(edges)}
|
111 |
|
112 |
+
π **Pose Description:** {pose_description or "No description provided"}
|
113 |
|
114 |
π **Key Nodes (First 10):**
|
115 |
"""
|
|
|
116 |
for i, (idx, node) in enumerate(list(nodes.items())[:10]):
|
117 |
+
data_display += f"β’ {node['name']}: ({node['x']}, {node['y']}, {node['z']}) [vis: {node['visibility']}]\n"
|
|
|
118 |
if len(nodes) > 10:
|
119 |
data_display += f"... and {len(nodes) - 10} more nodes\n"
|
120 |
+
|
121 |
+
data_display += "\nπ **Sample Edges:**\n"
|
122 |
+
for edge in edges[:5]:
|
|
|
|
|
|
|
123 |
data_display += f"β’ {edge['from_name']} β {edge['to_name']}\n"
|
|
|
124 |
if len(edges) > 5:
|
125 |
data_display += f"... and {len(edges) - 5} more connections\n"
|
126 |
+
|
127 |
+
data_display += f"\nπΎ **Saved as:** {overlay_path}"
|
128 |
+
|
|
|
|
|
|
|
|
|
|
|
129 |
return overlay_rgb, data_display, f"β
Pose '{pose_id}' saved successfully!", None
|
130 |
|
131 |
+
# Save with description and return CSV
|
132 |
def save_with_description(image, description):
|
|
|
133 |
if image is None:
|
134 |
return None, "β Please upload an image first.", "β No image to process", None
|
135 |
+
|
136 |
overlay_img, data_display, status, _ = process_pose(image, description)
|
137 |
csv_file = create_csv_download()
|
138 |
return overlay_img, data_display, status, csv_file
|
139 |
|
140 |
+
# β
NEW Simplified CSV Creator (Only 3 fields)
|
141 |
def create_csv_download():
|
|
|
142 |
if not pose_dataset:
|
143 |
return None
|
144 |
+
|
|
|
145 |
csv_data = []
|
|
|
146 |
for pose_id, pose_info in pose_dataset.items():
|
147 |
+
pose_data = pose_info.get("pose_data", {})
|
148 |
+
pose_description = pose_info.get("pose_description", "")
|
149 |
+
image_path = pose_info.get("image_path", "")
|
150 |
+
|
151 |
+
csv_data.append({
|
152 |
+
"image_name": os.path.basename(image_path),
|
153 |
+
"pose_data": json.dumps(pose_data),
|
154 |
+
"pose_description": pose_description
|
155 |
+
})
|
156 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
df = pd.DataFrame(csv_data)
|
158 |
+
csv_filename = f"simplified_pose_dataset_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
159 |
df.to_csv(csv_filename, index=False)
|
160 |
+
|
161 |
return csv_filename
|
162 |
|
163 |
+
# Export only current pose
|
164 |
def export_current_pose_csv(image, description=""):
|
|
|
165 |
if image is None:
|
166 |
return None, "β Please upload an image first."
|
167 |
+
|
168 |
+
overlay_img, data_display, status, _ = process_pose(image, description)
|
169 |
+
|
|
|
|
|
170 |
if pose_dataset:
|
171 |
latest_pose_id = max(pose_dataset.keys())
|
172 |
+
pose_info = pose_dataset[latest_pose_id]
|
173 |
+
|
174 |
+
csv_data = [{
|
175 |
+
"image_name": os.path.basename(pose_info.get("image_path", "")),
|
176 |
+
"pose_data": json.dumps(pose_info.get("pose_data", {})),
|
177 |
+
"pose_description": pose_info.get("pose_description", "")
|
178 |
+
}]
|
179 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
df = pd.DataFrame(csv_data)
|
181 |
csv_filename = f"current_pose_{latest_pose_id}.csv"
|
182 |
df.to_csv(csv_filename, index=False)
|
183 |
+
|
184 |
return csv_filename, f"β
Current pose exported as {csv_filename}"
|
185 |
+
|
186 |
return None, "β No pose data to export"
|
187 |
|
188 |
+
# Gradio Interface
|
189 |
+
with gr.Blocks(title="π§ Simplified Pose Analysis Tool") as demo:
|
190 |
+
gr.Markdown("# π§ Pose Analysis with MediaPipe + CSV Export")
|
191 |
+
gr.Markdown("Upload a pose image and extract keypoints, description, and download results.")
|
192 |
+
|
193 |
with gr.Row():
|
194 |
with gr.Column(scale=1):
|
195 |
+
gr.Markdown("## π€ Upload")
|
|
|
196 |
input_image = gr.Image(type="numpy", label="Upload Pose Image")
|
197 |
pose_description = gr.Textbox(
|
198 |
label="Pose Description",
|
199 |
+
placeholder="e.g. 'Triangle Pose with right arm up'",
|
200 |
lines=3
|
201 |
)
|
202 |
+
|
203 |
with gr.Row():
|
204 |
analyze_btn = gr.Button("π Analyze Pose", variant="primary")
|
205 |
save_btn = gr.Button("πΎ Save with Description", variant="secondary")
|
206 |
+
|
|
|
207 |
gr.Markdown("### π₯ Download Options")
|
208 |
with gr.Row():
|
209 |
download_current_btn = gr.Button("π Download Current Pose CSV", variant="secondary")
|
210 |
download_all_btn = gr.Button("π Download All Poses CSV", variant="secondary")
|
211 |
+
|
|
|
212 |
current_csv_download = gr.File(label="Current Pose CSV", visible=False)
|
213 |
all_csv_download = gr.File(label="All Poses CSV", visible=False)
|
214 |
download_status = gr.Textbox(label="Download Status", visible=False)
|
215 |
+
|
216 |
with gr.Column(scale=1):
|
|
|
217 |
gr.Markdown("## π Results")
|
218 |
+
output_image = gr.Image(label="Pose with Overlay")
|
219 |
status_text = gr.Textbox(label="Status", lines=1)
|
220 |
+
|
221 |
+
gr.Markdown("## π§ Pose Data")
|
222 |
+
pose_data_display = gr.Textbox(label="Pose Details", lines=15, show_copy_button=True)
|
223 |
+
|
224 |
+
# Event handlers
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
analyze_btn.click(
|
226 |
fn=lambda img: process_pose(img, ""),
|
227 |
inputs=[input_image],
|
228 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
229 |
)
|
230 |
+
|
231 |
save_btn.click(
|
232 |
fn=save_with_description,
|
233 |
inputs=[input_image, pose_description],
|
234 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
235 |
)
|
236 |
+
|
|
|
237 |
download_current_btn.click(
|
238 |
fn=export_current_pose_csv,
|
239 |
inputs=[input_image, pose_description],
|
|
|
245 |
fn=lambda: gr.update(visible=True),
|
246 |
outputs=[download_status]
|
247 |
)
|
248 |
+
|
249 |
download_all_btn.click(
|
250 |
fn=create_csv_download,
|
251 |
outputs=[all_csv_download]
|
|
|
253 |
fn=lambda: gr.update(visible=True),
|
254 |
outputs=[all_csv_download]
|
255 |
)
|
256 |
+
|
|
|
257 |
input_image.change(
|
258 |
fn=lambda img: process_pose(img, ""),
|
259 |
inputs=[input_image],
|
260 |
outputs=[output_image, pose_data_display, status_text, current_csv_download]
|
261 |
)
|
262 |
+
|
263 |
gr.Markdown("""
|
264 |
+
## π How It Works
|
265 |
+
1. Upload a pose image
|
266 |
+
2. Automatically analyze and overlay pose keypoints
|
267 |
+
3. Add an optional description
|
268 |
+
4. Save and export to CSV
|
269 |
+
|
270 |
+
### CSV Format (Simplified):
|
271 |
+
- `image_name`: Name of the saved pose image
|
272 |
+
- `pose_data`: JSON string containing nodes and edges
|
273 |
+
- `pose_description`: Your text description
|
274 |
+
""")
|
275 |
+
|
276 |
+
# Launch app
|
277 |
+
demo.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|