Luigi commited on
Commit
5d346e0
·
1 Parent(s): baa9eed

Add a demo on how to make ui letting user draw on a video preview

Browse files
Files changed (1) hide show
  1. ui_demo.py +134 -0
ui_demo.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ import tempfile
5
+
6
+ def extract_first_frame(video_file):
7
+ # Open the video using OpenCV. The video_file is assumed to be a file-like object.
8
+ cap = cv2.VideoCapture(video_file.name if hasattr(video_file, "name") else video_file)
9
+ frame = None
10
+ while True:
11
+ ret, frame = cap.read()
12
+ if not ret:
13
+ break
14
+ if frame is not None and frame.size != 0:
15
+ break
16
+ cap.release()
17
+ if frame is None or frame.size == 0:
18
+ return None
19
+ # Convert frame from BGR to RGB format.
20
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
21
+ return frame_rgb
22
+
23
+ def process_video(editor_image, epsilon_ratio, video_file):
24
+ # This function is analogous to your original "process_image" but now,
25
+ # it uses the drawing from the editor (on the first frame) and overlays
26
+ # the derived polygon on every frame of the uploaded video.
27
+ if editor_image is None:
28
+ return "❌ No image provided.", None
29
+
30
+ composite = editor_image.get("composite")
31
+ original = editor_image.get("background")
32
+
33
+ if composite is None or original is None:
34
+ return "⚠️ Please load the first frame and add a drawing layer.", None
35
+
36
+ composite_np = np.array(composite)
37
+ original_np = np.array(original)
38
+
39
+ # Extract red channel information to detect drawn strokes
40
+ r_channel = composite_np[:, :, 0]
41
+ g_channel = composite_np[:, :, 1]
42
+ b_channel = composite_np[:, :, 2]
43
+ # Use a threshold to detect red strokes (assuming user draws with a vivid red)
44
+ red_mask = (r_channel > 150) & (g_channel < 100) & (b_channel < 100)
45
+ binary_mask = red_mask.astype(np.uint8) * 255
46
+
47
+ # Find contours from the binary mask
48
+ contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
49
+ if not contours:
50
+ return "⚠️ No visible drawing found. Please use the brush on a new layer.", None
51
+ largest_contour = max(contours, key=cv2.contourArea)
52
+
53
+ # Approximate contour to polygon using provided epsilon_ratio
54
+ epsilon = epsilon_ratio * cv2.arcLength(largest_contour, True)
55
+ polygon = cv2.approxPolyDP(largest_contour, epsilon, True)
56
+ if polygon is None or len(polygon) < 3:
57
+ return "⚠️ Polygon extraction failed. Try drawing a clearer shape.", None
58
+
59
+ polygon = polygon.astype(np.int32).reshape((-1, 1, 2))
60
+ polygon_coords = polygon.reshape(-1, 2).tolist()
61
+
62
+ # Open the input video for overlaying the polygon on every frame.
63
+ cap = cv2.VideoCapture(video_file.name if hasattr(video_file, "name") else video_file)
64
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
65
+ fps = cap.get(cv2.CAP_PROP_FPS)
66
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
67
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
68
+ # Create a temporary file for saving the output video
69
+ temp_output = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
70
+ out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
71
+
72
+ # Process each frame and draw the polygon overlay
73
+ while True:
74
+ ret, frame = cap.read()
75
+ if not ret:
76
+ break
77
+ overlay = frame.copy()
78
+ cv2.polylines(overlay, [polygon], isClosed=True, color=(0, 255, 0), thickness=10)
79
+ for idx, (x, y) in enumerate(polygon_coords):
80
+ cv2.circle(overlay, (x, y), 5, (0, 0, 255), -1)
81
+ cv2.putText(overlay, str(idx + 1), (x + 5, y - 5),
82
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
83
+ out.write(overlay)
84
+ cap.release()
85
+ out.release()
86
+
87
+ msg = f"✅ Polygon with {len(polygon_coords)} points (ε={epsilon_ratio}):\n{polygon_coords}"
88
+ return msg, temp_output
89
+
90
+ # Build the Gradio interface using Blocks
91
+ with gr.Blocks() as demo:
92
+ gr.HTML("<style>body { margin: 0; padding: 0; }</style>")
93
+ gr.Markdown("## 🖌️ Accurate Polygon Extraction & Video Overlay")
94
+ gr.Markdown(
95
+ """
96
+ **Instructions:**
97
+ 1. Upload a video.
98
+ 2. Click **Load First Frame to Editor** to extract a frame for annotation.
99
+ 3. ➕ Add a drawing layer and draw with the brush (use red strokes).
100
+ 4. Adjust polygon approximation if needed.
101
+ 5. Click **Process Drawing and Overlay on Video** — the generated video will show the green polygon overlaid on every frame.
102
+ """
103
+ )
104
+
105
+ with gr.Tab("Load Video"):
106
+ video_input = gr.Video(label="Upload Video", format="mp4")
107
+ load_frame_btn = gr.Button("Load First Frame to Editor")
108
+ # The ImageEditor will be preloaded with the extracted frame.
109
+ frame_editor = gr.ImageEditor(label="Draw on this frame (Add a layer first!)", type="numpy", width=1920, height=1080)
110
+
111
+ epsilon_slider = gr.Slider(
112
+ label="Polygon Approximation (ε)", minimum=0.001, maximum=0.05, value=0.01, step=0.001
113
+ )
114
+
115
+ with gr.Row():
116
+ output_text = gr.Textbox(label="Polygon Coordinates", lines=6)
117
+ video_preview = gr.Video(label="Video with Polygon Overlay", format="mp4")
118
+
119
+ # Function to load the first non-empty frame from the uploaded video.
120
+ def load_frame(video_file):
121
+ frame = extract_first_frame(video_file)
122
+ if frame is None:
123
+ return gr.update(value=None), "❌ Failed to extract frame from video."
124
+ # Return the frame for the editor and a confirmation message.
125
+ return frame, "Frame loaded successfully."
126
+
127
+ load_frame_btn.click(fn=load_frame, inputs=video_input, outputs=[frame_editor, output_text])
128
+
129
+ # Process the drawing and overlay the polygon on the video.
130
+ process_btn = gr.Button("Process Drawing and Overlay on Video")
131
+ process_btn.click(fn=process_video, inputs=[frame_editor, epsilon_slider, video_input],
132
+ outputs=[output_text, video_preview])
133
+
134
+ demo.launch()