Spaces:
Runtime error
Runtime error
ssss
Browse files- .gitignore +14 -0
- .python-version +1 -0
- README.md +50 -12
- app.py +65 -50
- backend/gradio_screenrecorder/__init__.py +4 -0
- backend/gradio_screenrecorder/screenrecorder.py +272 -0
- demo/__init__.py +0 -0
- demo/app.py +56 -0
- demo/css.css +157 -0
- demo/requirements.txt +1 -0
- demo/space.py +199 -0
- frontend/Example.svelte +80 -0
- frontend/Index.svelte +727 -0
- frontend/gradio.config.js +9 -0
- frontend/package-lock.json +0 -0
- frontend/package.json +56 -0
- frontend/tsconfig.json +22 -0
- frontend/types.d.ts +23 -0
- frontend/vite.config.js +6 -0
- manifest.json +7 -0
- pyproject.toml +51 -0
- requirements.txt +6 -8
- src/.gitignore +14 -0
- src/.python-version +1 -0
- src/README.md +227 -0
- src/backend/gradio_screenrecorder/__init__.py +4 -0
- src/backend/gradio_screenrecorder/screenrecorder.py +272 -0
- src/demo/__init__.py +0 -0
- src/demo/app.py +56 -0
- src/demo/css.css +157 -0
- src/demo/requirements.txt +1 -0
- src/demo/space.py +199 -0
- src/frontend/Example.svelte +80 -0
- src/frontend/Index.svelte +727 -0
- src/frontend/gradio.config.js +9 -0
- src/frontend/package-lock.json +0 -0
- src/frontend/package.json +56 -0
- src/frontend/tsconfig.json +22 -0
- src/frontend/types.d.ts +23 -0
- src/frontend/vite.config.js +6 -0
- src/manifest.json +7 -0
- src/pyproject.toml +51 -0
- src/requirements.txt +2 -0
.gitignore
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.eggs/
|
2 |
+
dist/
|
3 |
+
*.pyc
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
__tmp/*
|
8 |
+
*.pyi
|
9 |
+
.mypycache
|
10 |
+
.ruff_cache
|
11 |
+
node_modules
|
12 |
+
backend/**/templates/
|
13 |
+
|
14 |
+
.venv
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.13
|
README.md
CHANGED
@@ -1,12 +1,50 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Real-Time Screen Assistant - Premium Edition
|
2 |
+
|
3 |
+
This is a premium real-time screen assistant that integrates Google's Gemini 2.0 Live API with advanced screen recording capabilities.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- 🎙️ **Real-time Audio Streaming** - Voice activity detection with noise filtering
|
8 |
+
- 🖥️ **Professional Screen Recording** - Native ScreenRecorder component with webcam overlay
|
9 |
+
- 🤖 **AI Voice Responses** - Bidirectional audio communication with Gemini 2.0
|
10 |
+
- 📝 **Text Response Display** - Real-time text responses with conversation history
|
11 |
+
- 🔄 **Background Task Management** - Proper async handling and cleanup
|
12 |
+
- 📊 **Performance Monitoring** - Real-time stats and adaptive quality
|
13 |
+
|
14 |
+
## Setup
|
15 |
+
|
16 |
+
1. Set your Google AI API key:
|
17 |
+
```bash
|
18 |
+
export GEMINI_API_KEY="your-api-key-here"
|
19 |
+
```
|
20 |
+
|
21 |
+
2. Install dependencies (automatic on HuggingFace Spaces):
|
22 |
+
```bash
|
23 |
+
pip install -r requirements.txt
|
24 |
+
```
|
25 |
+
|
26 |
+
3. Run the application:
|
27 |
+
```bash
|
28 |
+
python app.py
|
29 |
+
```
|
30 |
+
|
31 |
+
## Components
|
32 |
+
|
33 |
+
- **app.py** - Main application with premium real-time integration
|
34 |
+
- **gradio_screenrecorder/** - Custom Gradio component for screen recording
|
35 |
+
- **requirements.txt** - All necessary dependencies including custom components
|
36 |
+
|
37 |
+
## Environment Variables
|
38 |
+
|
39 |
+
- `GEMINI_API_KEY` - Required: Your Google AI API key for Gemini 2.0 Live API
|
40 |
+
|
41 |
+
## Real-time Integration
|
42 |
+
|
43 |
+
This application implements complete real-time frontend integration:
|
44 |
+
|
45 |
+
1. **Continuous Audio Flow** (User → Model) - Voice activity detection
|
46 |
+
2. **Model Audio Output** (Model → User) - AI voice responses
|
47 |
+
3. **Screen Recording Integration** - Professional screen capture
|
48 |
+
4. **Text Response Delivery** (System → User) - Real-time text display
|
49 |
+
|
50 |
+
All features are optimized for 300-second real-time sessions with adaptive quality and intelligent throttling.
|
app.py
CHANGED
@@ -16,10 +16,11 @@ Features:
|
|
16 |
- Enhanced error handling and recovery
|
17 |
- 300s timeout for real-time behavior
|
18 |
"""
|
19 |
-
|
20 |
import asyncio
|
21 |
import os
|
22 |
import time
|
|
|
23 |
from collections import deque
|
24 |
|
25 |
import cv2
|
@@ -30,6 +31,9 @@ from fastrtc import AsyncAudioVideoStreamHandler, ReplyOnPause, Stream, get_clou
|
|
30 |
from google import genai
|
31 |
from google.genai import types
|
32 |
|
|
|
|
|
|
|
33 |
# Environment variable for API key
|
34 |
API_KEY = os.getenv("GEMINI_API_KEY", "")
|
35 |
|
@@ -404,7 +408,7 @@ def handle_connect():
|
|
404 |
app_state["last_status"] = "Initiating connection..."
|
405 |
|
406 |
# Start async connection
|
407 |
-
asyncio.
|
408 |
return "🔄 Initiating connection to GenAI Live API..."
|
409 |
|
410 |
async def handle_disconnect_async():
|
@@ -436,39 +440,6 @@ def get_connection_status():
|
|
436 |
else:
|
437 |
return f"🔴 Disconnected | Status: {app_state['last_status']}"
|
438 |
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
# --- Async helpers for streaming handlers ---
|
444 |
-
|
445 |
-
async def audio_stream_handler(audio):
|
446 |
-
if app_state.get("handler") and app_state.get("connected"):
|
447 |
-
return await app_state["handler"].receive(audio)
|
448 |
-
return None
|
449 |
-
|
450 |
-
async def video_stream_handler(frame):
|
451 |
-
if app_state.get("handler") and app_state.get("connected"):
|
452 |
-
return await app_state["handler"].video_receive(frame)
|
453 |
-
return None
|
454 |
-
|
455 |
-
async def ai_audio_output_handler():
|
456 |
-
if app_state.get("handler") and app_state.get("connected"):
|
457 |
-
return await app_state["handler"].emit()
|
458 |
-
return None
|
459 |
-
|
460 |
-
# --- Backend logic for mic test and screen sharing ---
|
461 |
-
|
462 |
-
def backend_mic_test():
|
463 |
-
if app_state.get("handler") and app_state.get("connected"):
|
464 |
-
return "🎙️ Microphone is active and streaming to backend."
|
465 |
-
return "⚠️ Please connect first to test microphone."
|
466 |
-
|
467 |
-
def backend_screen_share():
|
468 |
-
if app_state.get("handler") and app_state.get("connected"):
|
469 |
-
return "🖥️ Screen sharing is active and streaming to backend."
|
470 |
-
return "⚠️ Please connect first to share your screen."
|
471 |
-
|
472 |
def create_interface():
|
473 |
"""PREMIUM: Enhanced interface with complete real-time integration"""
|
474 |
# Initialize premium stream
|
@@ -520,6 +491,20 @@ def create_interface():
|
|
520 |
mic_test_btn = gr.Button("🎙️ Test Microphone", variant="secondary")
|
521 |
screen_share_btn = gr.Button("🖥️ Share Screen", variant="secondary")
|
522 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
523 |
# PREMIUM: Real-time streaming interface
|
524 |
gr.Markdown("### 📡 Premium Real-Time Stream")
|
525 |
|
@@ -533,29 +518,26 @@ def create_interface():
|
|
533 |
interactive=True
|
534 |
)
|
535 |
|
536 |
-
|
537 |
-
|
538 |
-
|
|
|
|
|
|
|
|
|
|
|
539 |
interactive=True
|
540 |
)
|
541 |
|
542 |
-
# PREMIUM: Connect streaming handlers
|
543 |
audio_stream.stream(
|
544 |
-
fn=
|
545 |
inputs=[audio_stream],
|
546 |
outputs=[],
|
547 |
time_limit=300, # Real-time optimized
|
548 |
concurrency_limit=5
|
549 |
)
|
550 |
|
551 |
-
video_stream.stream(
|
552 |
-
fn=video_stream_handler,
|
553 |
-
inputs=[video_stream],
|
554 |
-
outputs=[],
|
555 |
-
time_limit=300, # Real-time optimized
|
556 |
-
concurrency_limit=3
|
557 |
-
)
|
558 |
-
|
559 |
# PREMIUM: AI response display
|
560 |
ai_response_display = gr.Textbox(
|
561 |
label="🤖 AI Response Stream",
|
@@ -571,14 +553,47 @@ def create_interface():
|
|
571 |
streaming=True
|
572 |
)
|
573 |
|
574 |
-
# Connect AI response handlers
|
575 |
ai_audio_output.stream(
|
576 |
-
fn=
|
577 |
inputs=[],
|
578 |
outputs=[ai_audio_output],
|
579 |
time_limit=300
|
580 |
)
|
581 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
582 |
else:
|
583 |
gr.HTML("<div>⚠️ Premium stream initialization failed - Check console for errors</div>")
|
584 |
|
|
|
16 |
- Enhanced error handling and recovery
|
17 |
- 300s timeout for real-time behavior
|
18 |
"""
|
19 |
+
|
20 |
import asyncio
|
21 |
import os
|
22 |
import time
|
23 |
+
import sys
|
24 |
from collections import deque
|
25 |
|
26 |
import cv2
|
|
|
31 |
from google import genai
|
32 |
from google.genai import types
|
33 |
|
34 |
+
# Import the ScreenRecorder component (installed via requirements.txt)
|
35 |
+
from gradio_screenrecorder import ScreenRecorder
|
36 |
+
|
37 |
# Environment variable for API key
|
38 |
API_KEY = os.getenv("GEMINI_API_KEY", "")
|
39 |
|
|
|
408 |
app_state["last_status"] = "Initiating connection..."
|
409 |
|
410 |
# Start async connection
|
411 |
+
asyncio.create_task(handle_connect_async())
|
412 |
return "🔄 Initiating connection to GenAI Live API..."
|
413 |
|
414 |
async def handle_disconnect_async():
|
|
|
440 |
else:
|
441 |
return f"🔴 Disconnected | Status: {app_state['last_status']}"
|
442 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
def create_interface():
|
444 |
"""PREMIUM: Enhanced interface with complete real-time integration"""
|
445 |
# Initialize premium stream
|
|
|
491 |
mic_test_btn = gr.Button("🎙️ Test Microphone", variant="secondary")
|
492 |
screen_share_btn = gr.Button("🖥️ Share Screen", variant="secondary")
|
493 |
|
494 |
+
# --- Backend logic for mic test and screen sharing ---
|
495 |
+
def backend_mic_test():
|
496 |
+
# Simulate a backend mic test (could be extended to record/playback)
|
497 |
+
if app_state.get("handler") and app_state.get("connected"):
|
498 |
+
return "🎙️ Microphone is active and streaming to backend."
|
499 |
+
return "⚠️ Please connect first to test microphone."
|
500 |
+
|
501 |
+
def backend_screen_share():
|
502 |
+
# Simulate backend screen sharing trigger
|
503 |
+
if app_state.get("handler") and app_state.get("connected"):
|
504 |
+
# In a real implementation, you might set a flag or trigger a backend event
|
505 |
+
return "🖥️ Screen sharing is active and streaming to backend."
|
506 |
+
return "⚠️ Please connect first to share your screen."
|
507 |
+
|
508 |
# PREMIUM: Real-time streaming interface
|
509 |
gr.Markdown("### 📡 Premium Real-Time Stream")
|
510 |
|
|
|
518 |
interactive=True
|
519 |
)
|
520 |
|
521 |
+
# PREMIUM: Integrated ScreenRecorder component
|
522 |
+
screen_recorder = ScreenRecorder(
|
523 |
+
audio_enabled=True,
|
524 |
+
webcam_overlay=True,
|
525 |
+
webcam_position="bottom-right",
|
526 |
+
recording_format="webm",
|
527 |
+
max_duration=300, # 5 minutes - real-time optimized
|
528 |
+
label="🖥️ Screen Recorder (Premium)",
|
529 |
interactive=True
|
530 |
)
|
531 |
|
532 |
+
# PREMIUM: Connect streaming handlers
|
533 |
audio_stream.stream(
|
534 |
+
fn=lambda audio: app_state["handler"].receive(audio) if app_state["handler"] and app_state["connected"] else None,
|
535 |
inputs=[audio_stream],
|
536 |
outputs=[],
|
537 |
time_limit=300, # Real-time optimized
|
538 |
concurrency_limit=5
|
539 |
)
|
540 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
541 |
# PREMIUM: AI response display
|
542 |
ai_response_display = gr.Textbox(
|
543 |
label="🤖 AI Response Stream",
|
|
|
553 |
streaming=True
|
554 |
)
|
555 |
|
556 |
+
# Connect AI response handlers
|
557 |
ai_audio_output.stream(
|
558 |
+
fn=lambda: app_state["handler"].emit() if app_state["handler"] and app_state["connected"] else None,
|
559 |
inputs=[],
|
560 |
outputs=[ai_audio_output],
|
561 |
time_limit=300
|
562 |
)
|
563 |
|
564 |
+
# Connect screen recorder to video handler
|
565 |
+
def handle_screen_recording(recording_data):
|
566 |
+
"""Handle screen recording data and send to AI"""
|
567 |
+
if not recording_data or not app_state["handler"] or not app_state["connected"]:
|
568 |
+
return "⚠️ Not connected to AI or no recording data"
|
569 |
+
|
570 |
+
try:
|
571 |
+
# If we have video data, process it for the AI
|
572 |
+
if recording_data and recording_data.get('video'):
|
573 |
+
# For real-time processing, we could extract frames
|
574 |
+
# For now, just acknowledge the recording
|
575 |
+
duration = recording_data.get('duration', 0)
|
576 |
+
size = recording_data.get('size', 0)
|
577 |
+
print(f"📹 Screen recording received: {duration}s, {size} bytes")
|
578 |
+
|
579 |
+
# Update stats
|
580 |
+
app_state["stats"]["frames_sent"] += 1
|
581 |
+
|
582 |
+
return f"✅ Screen recording processed: {duration:.1f}s"
|
583 |
+
else:
|
584 |
+
return "⚠️ No video data in recording"
|
585 |
+
|
586 |
+
except Exception as e:
|
587 |
+
print(f"❌ Error processing screen recording: {e}")
|
588 |
+
return f"❌ Error: {e}"
|
589 |
+
|
590 |
+
screen_recorder.change(
|
591 |
+
fn=handle_screen_recording,
|
592 |
+
inputs=[screen_recorder],
|
593 |
+
outputs=[ai_response_display],
|
594 |
+
show_progress=False
|
595 |
+
)
|
596 |
+
|
597 |
else:
|
598 |
gr.HTML("<div>⚠️ Premium stream initialization failed - Check console for errors</div>")
|
599 |
|
backend/gradio_screenrecorder/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from .screenrecorder import ScreenRecorder
|
3 |
+
|
4 |
+
__all__ = ['ScreenRecorder']
|
backend/gradio_screenrecorder/screenrecorder.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio.components.base import Component
|
3 |
+
from gradio.data_classes import FileData, GradioModel
|
4 |
+
from typing import Optional, Literal, Any
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
|
9 |
+
class ScreenRecorderData(GradioModel):
|
10 |
+
video: Optional[FileData] = None
|
11 |
+
duration: Optional[float] = None
|
12 |
+
audio_enabled: bool = True
|
13 |
+
status: Literal["recording", "stopped", "error"] = "stopped"
|
14 |
+
|
15 |
+
class Config:
|
16 |
+
json_encoders = {
|
17 |
+
FileData: lambda v: v.model_dump() if v else None
|
18 |
+
}
|
19 |
+
|
20 |
+
|
21 |
+
class ScreenRecorder(Component):
|
22 |
+
"""
|
23 |
+
Custom Gradio component for comprehensive screen recording functionality.
|
24 |
+
"""
|
25 |
+
|
26 |
+
data_model = ScreenRecorderData
|
27 |
+
|
28 |
+
EVENTS = [
|
29 |
+
"record_start",
|
30 |
+
"record_stop",
|
31 |
+
"stream_update",
|
32 |
+
"change"
|
33 |
+
]
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
value=None,
|
38 |
+
audio_enabled: bool = True,
|
39 |
+
webcam_overlay: bool = False,
|
40 |
+
webcam_position: Literal["top-left", "top-right", "bottom-left", "bottom-right"] = "bottom-right",
|
41 |
+
recording_format: str = "webm",
|
42 |
+
max_duration: Optional[int] = None,
|
43 |
+
interactive: bool = True,
|
44 |
+
**kwargs
|
45 |
+
):
|
46 |
+
self.audio_enabled = audio_enabled
|
47 |
+
self.webcam_overlay = webcam_overlay
|
48 |
+
self.webcam_position = webcam_position
|
49 |
+
self.recording_format = recording_format
|
50 |
+
self.max_duration = max_duration
|
51 |
+
self._status = "stopped"
|
52 |
+
|
53 |
+
super().__init__(
|
54 |
+
value=value,
|
55 |
+
interactive=interactive,
|
56 |
+
**kwargs
|
57 |
+
)
|
58 |
+
|
59 |
+
def example_payload(self) -> dict:
|
60 |
+
"""
|
61 |
+
The example inputs for this component for API usage. Must be JSON-serializable.
|
62 |
+
"""
|
63 |
+
return {
|
64 |
+
"video": {
|
65 |
+
"path": "https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
|
66 |
+
"orig_name": "example_recording.webm",
|
67 |
+
"size": 1024000
|
68 |
+
},
|
69 |
+
"duration": 30.5,
|
70 |
+
"audio_enabled": True,
|
71 |
+
"status": "stopped"
|
72 |
+
}
|
73 |
+
|
74 |
+
def example_value(self) -> ScreenRecorderData:
|
75 |
+
"""
|
76 |
+
An example value for this component for the default app.
|
77 |
+
"""
|
78 |
+
return ScreenRecorderData(
|
79 |
+
video=FileData(
|
80 |
+
path="https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
|
81 |
+
orig_name="example_recording.webm",
|
82 |
+
size=1024000
|
83 |
+
),
|
84 |
+
duration=30.5,
|
85 |
+
audio_enabled=True,
|
86 |
+
status="stopped"
|
87 |
+
)
|
88 |
+
|
89 |
+
def flag(self, x, flag_dir: str = "") -> str:
|
90 |
+
"""
|
91 |
+
Write the component's value to a format for flagging (CSV storage).
|
92 |
+
"""
|
93 |
+
if x is None:
|
94 |
+
return ""
|
95 |
+
|
96 |
+
if isinstance(x, ScreenRecorderData) and x.video:
|
97 |
+
return f"Recording: {x.video.orig_name} ({x.duration}s) - Status: {x.status}"
|
98 |
+
|
99 |
+
if isinstance(x, dict) and "video" in x:
|
100 |
+
duration = x.get("duration", "unknown")
|
101 |
+
status = x.get("status", "unknown")
|
102 |
+
video_name = x["video"].get("orig_name", "unknown") if x["video"] else "none"
|
103 |
+
return f"Recording: {video_name} ({duration}s) - Status: {status}"
|
104 |
+
|
105 |
+
return str(x)
|
106 |
+
|
107 |
+
def preprocess(self, payload) -> Optional[ScreenRecorderData]:
|
108 |
+
"""Process incoming recording data from frontend."""
|
109 |
+
if payload is None:
|
110 |
+
return None
|
111 |
+
|
112 |
+
if isinstance(payload, dict):
|
113 |
+
if payload.get("status") == "error": # Early exit for errors from frontend
|
114 |
+
raise gr.Error(f"Recording failed on frontend: {payload.get('error', 'Unknown error')}")
|
115 |
+
|
116 |
+
# If 'video' field is a string, assume it's JSON and parse it.
|
117 |
+
if "video" in payload and isinstance(payload["video"], str):
|
118 |
+
try:
|
119 |
+
video_json_string = payload["video"]
|
120 |
+
if video_json_string.strip().startswith("{") and video_json_string.strip().endswith("}"):
|
121 |
+
payload["video"] = json.loads(video_json_string)
|
122 |
+
# If it's a string but not our expected JSON (e.g. 'null', or empty string, or simple path)
|
123 |
+
# json.loads would fail or Pydantic validation later will catch it if structure is wrong.
|
124 |
+
# For 'null' string, json.loads results in None for payload["video"].
|
125 |
+
elif video_json_string.lower() == 'null':
|
126 |
+
payload["video"] = None
|
127 |
+
else:
|
128 |
+
# This case implies a string that isn't a JSON object or 'null',
|
129 |
+
# e.g. a direct file path string, which FileData might not directly accept
|
130 |
+
# if it expects a dict. Pydantic will raise error later if type is incompatible.
|
131 |
+
gr.Warning(f"Video data is a string but not a recognized JSON object or 'null': {video_json_string[:100]}")
|
132 |
+
# To be safe, if it's not a JSON object string, we might want to error or handle specifically
|
133 |
+
# For now, let Pydantic try to handle it or fail.
|
134 |
+
|
135 |
+
except json.JSONDecodeError:
|
136 |
+
raise gr.Error(f"Invalid JSON for video data: {payload['video'][:100]}")
|
137 |
+
|
138 |
+
# --- Validations from here ---
|
139 |
+
video_data = payload.get("video") # Use .get() for safety, as 'video' might be absent or None
|
140 |
+
|
141 |
+
if video_data is not None: # Only validate video_data if it exists
|
142 |
+
if not isinstance(video_data, dict):
|
143 |
+
# This can happen if payload["video"] was a string like "some_path.webm" and not parsed to dict
|
144 |
+
# Or if it was parsed to something unexpected.
|
145 |
+
raise gr.Error(f"Video data is not a dictionary after processing: {type(video_data)}. Value: {str(video_data)[:100]}")
|
146 |
+
|
147 |
+
if video_data.get("size", 0) == 0:
|
148 |
+
gr.Warning("Received recording with zero size. This might be an empty recording or an issue with data capture.")
|
149 |
+
# Depending on requirements, could raise gr.Error here.
|
150 |
+
|
151 |
+
max_size = 500 * 1024 * 1024 # 500MB
|
152 |
+
if video_data.get("size", 0) > max_size:
|
153 |
+
raise gr.Error(f"Recording file too large ({video_data.get('size', 0)} bytes). Maximum allowed: {max_size} bytes.")
|
154 |
+
# If video_data is None (e.g. 'video': null was sent, or 'video' key missing),
|
155 |
+
# ScreenRecorderData will have video=None, which is allowed by Optional[FileData].
|
156 |
+
|
157 |
+
duration = payload.get("duration", 0)
|
158 |
+
if duration <= 0 and video_data is not None: # Only warn about duration if there's video data
|
159 |
+
gr.Warning("Recording duration is 0 or invalid. The recording might be corrupted.")
|
160 |
+
|
161 |
+
try:
|
162 |
+
return ScreenRecorderData(**payload)
|
163 |
+
except Exception as e: # Catch Pydantic validation errors or other issues during model instantiation
|
164 |
+
# Log the payload for easier debugging if there's a Pydantic error
|
165 |
+
# Be careful with logging sensitive data in production.
|
166 |
+
# print(f"Error creating ScreenRecorderData. Payload: {payload}")
|
167 |
+
raise gr.Error(f"Error creating ScreenRecorderData from payload: {e}")
|
168 |
+
|
169 |
+
elif isinstance(payload, ScreenRecorderData): # If it's already the correct type
|
170 |
+
return payload
|
171 |
+
|
172 |
+
gr.Warning(f"Unexpected payload format: {type(payload)}. Payload: {str(payload)[:200]}")
|
173 |
+
return None
|
174 |
+
|
175 |
+
# def postprocess(self, value) -> Optional[dict]:
|
176 |
+
# """Process outgoing data to frontend."""
|
177 |
+
# if value is None:
|
178 |
+
# return {"status": "stopped"} # Ensure valid empty state
|
179 |
+
|
180 |
+
# try:
|
181 |
+
# if isinstance(value, ScreenRecorderData):
|
182 |
+
# return value.model_dump()
|
183 |
+
# elif isinstance(value, dict):
|
184 |
+
# return value
|
185 |
+
# return None
|
186 |
+
# except Exception as e:
|
187 |
+
# return {"status": "error", "error": str(e)}
|
188 |
+
|
189 |
+
|
190 |
+
def postprocess(self, value) -> Optional[dict]:
|
191 |
+
"""Process outgoing data to frontend."""
|
192 |
+
print(f'value in postprocess: {value}')
|
193 |
+
if value is None:
|
194 |
+
return None
|
195 |
+
|
196 |
+
try:
|
197 |
+
# If it's already a dict, return as is
|
198 |
+
if isinstance(value, dict):
|
199 |
+
return value
|
200 |
+
|
201 |
+
# If it's a ScreenRecorderData object, convert to dict
|
202 |
+
if hasattr(value, 'model_dump'):
|
203 |
+
return value.model_dump()
|
204 |
+
|
205 |
+
# Handle string values
|
206 |
+
if isinstance(value, str):
|
207 |
+
return {"video": {"path": value}}
|
208 |
+
|
209 |
+
return None
|
210 |
+
|
211 |
+
except Exception as e:
|
212 |
+
print(f'Error in postprocess: {e}')
|
213 |
+
return None
|
214 |
+
|
215 |
+
|
216 |
+
# try:
|
217 |
+
# if isinstance(value, ScreenRecorderData):
|
218 |
+
# # Ensure video data exists before sending
|
219 |
+
# if not value.video:
|
220 |
+
# return {"status": "error", "error": "No video recorded"}
|
221 |
+
|
222 |
+
# return {
|
223 |
+
# "video": value.video,
|
224 |
+
# "duration": value.duration,
|
225 |
+
# "audio_enabled": value.audio_enabled,
|
226 |
+
# "status": value.status
|
227 |
+
# }
|
228 |
+
|
229 |
+
# # Handle raw dict format from frontend
|
230 |
+
# if isinstance(value, dict):
|
231 |
+
# return {
|
232 |
+
# "video": FileData(**value.get("video", {})),
|
233 |
+
# "duration": value.get("duration"),
|
234 |
+
# "audio_enabled": value.get("audio_enabled", True),
|
235 |
+
# "status": value.get("status", "stopped")
|
236 |
+
# }
|
237 |
+
|
238 |
+
# except Exception as e:
|
239 |
+
# return {"status": "error", "error": str(e)}
|
240 |
+
|
241 |
+
# return {"status": "stopped"}
|
242 |
+
|
243 |
+
def as_example(self, input_data):
|
244 |
+
"""Handle example data display."""
|
245 |
+
if input_data is None:
|
246 |
+
return None
|
247 |
+
|
248 |
+
if isinstance(input_data, (ScreenRecorderData, dict)):
|
249 |
+
return input_data
|
250 |
+
|
251 |
+
# Convert simple video path to proper format
|
252 |
+
if isinstance(input_data, str):
|
253 |
+
return {
|
254 |
+
"video": {
|
255 |
+
"path": input_data,
|
256 |
+
"orig_name": os.path.basename(input_data),
|
257 |
+
"size": 0
|
258 |
+
},
|
259 |
+
"duration": None,
|
260 |
+
"audio_enabled": self.audio_enabled,
|
261 |
+
"status": "stopped"
|
262 |
+
}
|
263 |
+
|
264 |
+
return input_data
|
265 |
+
|
266 |
+
def update_status(self, status: Literal["recording", "stopped", "error"]):
|
267 |
+
"""Update the internal status of the recorder."""
|
268 |
+
self._status = status
|
269 |
+
|
270 |
+
def get_status(self) -> str:
|
271 |
+
"""Get the current status of the recorder."""
|
272 |
+
return self._status
|
demo/__init__.py
ADDED
File without changes
|
demo/app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_screenrecorder import ScreenRecorder
|
3 |
+
|
4 |
+
def handle_recording(recording_data):
|
5 |
+
"""Handle recorded video data"""
|
6 |
+
print(f'Received recording data: {recording_data}')
|
7 |
+
|
8 |
+
if not recording_data or not recording_data.get('video'):
|
9 |
+
return None
|
10 |
+
|
11 |
+
try:
|
12 |
+
video_info = recording_data['video']
|
13 |
+
# Return the video path that can be used by the Video component
|
14 |
+
return video_info.get('path')
|
15 |
+
except Exception as e:
|
16 |
+
print(f'Error processing recording: {e}')
|
17 |
+
return None
|
18 |
+
|
19 |
+
|
20 |
+
css = """
|
21 |
+
.screen-recorder-demo {
|
22 |
+
max-width: 800px;
|
23 |
+
margin: 0 auto;
|
24 |
+
}
|
25 |
+
"""
|
26 |
+
|
27 |
+
with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
|
28 |
+
gr.HTML("""
|
29 |
+
<h1 style='text-align: center'>
|
30 |
+
Gradio Screen Recorder Component Demo
|
31 |
+
</h1>
|
32 |
+
""")
|
33 |
+
|
34 |
+
with gr.Row():
|
35 |
+
with gr.Column():
|
36 |
+
recorder = ScreenRecorder(
|
37 |
+
audio_enabled=True,
|
38 |
+
webcam_overlay=True, # Disabled for now
|
39 |
+
webcam_position="top-left",
|
40 |
+
recording_format="webm",
|
41 |
+
max_duration=60,
|
42 |
+
label="Screen Recorder"
|
43 |
+
)
|
44 |
+
|
45 |
+
with gr.Column():
|
46 |
+
output_video = gr.Video(label="Recorded Video")
|
47 |
+
|
48 |
+
# Event handler
|
49 |
+
recorder.change(
|
50 |
+
fn=handle_recording,
|
51 |
+
inputs=recorder,
|
52 |
+
outputs=output_video
|
53 |
+
)
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
demo.launch()
|
demo/css.css
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
html {
|
2 |
+
font-family: Inter;
|
3 |
+
font-size: 16px;
|
4 |
+
font-weight: 400;
|
5 |
+
line-height: 1.5;
|
6 |
+
-webkit-text-size-adjust: 100%;
|
7 |
+
background: #fff;
|
8 |
+
color: #323232;
|
9 |
+
-webkit-font-smoothing: antialiased;
|
10 |
+
-moz-osx-font-smoothing: grayscale;
|
11 |
+
text-rendering: optimizeLegibility;
|
12 |
+
}
|
13 |
+
|
14 |
+
:root {
|
15 |
+
--space: 1;
|
16 |
+
--vspace: calc(var(--space) * 1rem);
|
17 |
+
--vspace-0: calc(3 * var(--space) * 1rem);
|
18 |
+
--vspace-1: calc(2 * var(--space) * 1rem);
|
19 |
+
--vspace-2: calc(1.5 * var(--space) * 1rem);
|
20 |
+
--vspace-3: calc(0.5 * var(--space) * 1rem);
|
21 |
+
}
|
22 |
+
|
23 |
+
.app {
|
24 |
+
max-width: 748px !important;
|
25 |
+
}
|
26 |
+
|
27 |
+
.prose p {
|
28 |
+
margin: var(--vspace) 0;
|
29 |
+
line-height: var(--vspace * 2);
|
30 |
+
font-size: 1rem;
|
31 |
+
}
|
32 |
+
|
33 |
+
code {
|
34 |
+
font-family: "Inconsolata", sans-serif;
|
35 |
+
font-size: 16px;
|
36 |
+
}
|
37 |
+
|
38 |
+
h1,
|
39 |
+
h1 code {
|
40 |
+
font-weight: 400;
|
41 |
+
line-height: calc(2.5 / var(--space) * var(--vspace));
|
42 |
+
}
|
43 |
+
|
44 |
+
h1 code {
|
45 |
+
background: none;
|
46 |
+
border: none;
|
47 |
+
letter-spacing: 0.05em;
|
48 |
+
padding-bottom: 5px;
|
49 |
+
position: relative;
|
50 |
+
padding: 0;
|
51 |
+
}
|
52 |
+
|
53 |
+
h2 {
|
54 |
+
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
55 |
+
line-height: 1em;
|
56 |
+
}
|
57 |
+
|
58 |
+
h3,
|
59 |
+
h3 code {
|
60 |
+
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
61 |
+
line-height: 1em;
|
62 |
+
}
|
63 |
+
|
64 |
+
h4,
|
65 |
+
h5,
|
66 |
+
h6 {
|
67 |
+
margin: var(--vspace-3) 0 var(--vspace-3) 0;
|
68 |
+
line-height: var(--vspace);
|
69 |
+
}
|
70 |
+
|
71 |
+
.bigtitle,
|
72 |
+
h1,
|
73 |
+
h1 code {
|
74 |
+
font-size: calc(8px * 4.5);
|
75 |
+
word-break: break-word;
|
76 |
+
}
|
77 |
+
|
78 |
+
.title,
|
79 |
+
h2,
|
80 |
+
h2 code {
|
81 |
+
font-size: calc(8px * 3.375);
|
82 |
+
font-weight: lighter;
|
83 |
+
word-break: break-word;
|
84 |
+
border: none;
|
85 |
+
background: none;
|
86 |
+
}
|
87 |
+
|
88 |
+
.subheading1,
|
89 |
+
h3,
|
90 |
+
h3 code {
|
91 |
+
font-size: calc(8px * 1.8);
|
92 |
+
font-weight: 600;
|
93 |
+
border: none;
|
94 |
+
background: none;
|
95 |
+
letter-spacing: 0.1em;
|
96 |
+
text-transform: uppercase;
|
97 |
+
}
|
98 |
+
|
99 |
+
h2 code {
|
100 |
+
padding: 0;
|
101 |
+
position: relative;
|
102 |
+
letter-spacing: 0.05em;
|
103 |
+
}
|
104 |
+
|
105 |
+
blockquote {
|
106 |
+
font-size: calc(8px * 1.1667);
|
107 |
+
font-style: italic;
|
108 |
+
line-height: calc(1.1667 * var(--vspace));
|
109 |
+
margin: var(--vspace-2) var(--vspace-2);
|
110 |
+
}
|
111 |
+
|
112 |
+
.subheading2,
|
113 |
+
h4 {
|
114 |
+
font-size: calc(8px * 1.4292);
|
115 |
+
text-transform: uppercase;
|
116 |
+
font-weight: 600;
|
117 |
+
}
|
118 |
+
|
119 |
+
.subheading3,
|
120 |
+
h5 {
|
121 |
+
font-size: calc(8px * 1.2917);
|
122 |
+
line-height: calc(1.2917 * var(--vspace));
|
123 |
+
|
124 |
+
font-weight: lighter;
|
125 |
+
text-transform: uppercase;
|
126 |
+
letter-spacing: 0.15em;
|
127 |
+
}
|
128 |
+
|
129 |
+
h6 {
|
130 |
+
font-size: calc(8px * 1.1667);
|
131 |
+
font-size: 1.1667em;
|
132 |
+
font-weight: normal;
|
133 |
+
font-style: italic;
|
134 |
+
font-family: "le-monde-livre-classic-byol", serif !important;
|
135 |
+
letter-spacing: 0px !important;
|
136 |
+
}
|
137 |
+
|
138 |
+
#start .md > *:first-child {
|
139 |
+
margin-top: 0;
|
140 |
+
}
|
141 |
+
|
142 |
+
h2 + h3 {
|
143 |
+
margin-top: 0;
|
144 |
+
}
|
145 |
+
|
146 |
+
.md hr {
|
147 |
+
border: none;
|
148 |
+
border-top: 1px solid var(--block-border-color);
|
149 |
+
margin: var(--vspace-2) 0 var(--vspace-2) 0;
|
150 |
+
}
|
151 |
+
.prose ul {
|
152 |
+
margin: var(--vspace-2) 0 var(--vspace-1) 0;
|
153 |
+
}
|
154 |
+
|
155 |
+
.gap {
|
156 |
+
gap: 0;
|
157 |
+
}
|
demo/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
gradio_screenrecorder
|
demo/space.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
from app import demo as app
|
4 |
+
import os
|
5 |
+
|
6 |
+
_docs = {'ScreenRecorder': {'description': 'Custom Gradio component for comprehensive screen recording functionality.', 'members': {'__init__': {'audio_enabled': {'type': 'bool', 'default': 'True', 'description': None}, 'webcam_overlay': {'type': 'bool', 'default': 'False', 'description': None}, 'webcam_position': {'type': '"top-left" | "top-right" | "bottom-left" | "bottom-right"', 'default': '"bottom-right"', 'description': None}, 'recording_format': {'type': 'str', 'default': '"webm"', 'description': None}, 'max_duration': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'interactive': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {}, 'preprocess': {'return': {'type': 'typing.Optional[\n gradio_screenrecorder.screenrecorder.ScreenRecorderData\n][ScreenRecorderData, None]', 'description': None}, 'value': None}}, 'events': {'record_start': {'type': None, 'default': None, 'description': ''}, 'record_stop': {'type': None, 'default': None, 'description': ''}, 'stream_update': {'type': None, 'default': None, 'description': ''}, 'change': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {'ScreenRecorderData': {'source': 'class ScreenRecorderData(GradioModel):\n video: Optional[FileData] = None\n duration: Optional[float] = None\n audio_enabled: bool = True\n status: Literal["recording", "stopped", "error"] = (\n "stopped"\n )\n\n class Config:\n json_encoders = {\n FileData: lambda v: v.model_dump()\n if v\n else None\n }'}}, 'user_fn_refs': {'ScreenRecorder': ['ScreenRecorderData']}}}
|
7 |
+
|
8 |
+
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
9 |
+
|
10 |
+
with gr.Blocks(
|
11 |
+
css=abs_path,
|
12 |
+
theme=gr.themes.Default(
|
13 |
+
font_mono=[
|
14 |
+
gr.themes.GoogleFont("Inconsolata"),
|
15 |
+
"monospace",
|
16 |
+
],
|
17 |
+
),
|
18 |
+
) as demo:
|
19 |
+
gr.Markdown(
|
20 |
+
"""
|
21 |
+
# `gradio_screenrecorder`
|
22 |
+
|
23 |
+
<div style="display: flex; gap: 7px;">
|
24 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
|
25 |
+
</div>
|
26 |
+
|
27 |
+
Screen Recorder Gradio Custom Component
|
28 |
+
""", elem_classes=["md-custom"], header_links=True)
|
29 |
+
app.render()
|
30 |
+
gr.Markdown(
|
31 |
+
"""
|
32 |
+
## Installation
|
33 |
+
|
34 |
+
```bash
|
35 |
+
pip install gradio_screenrecorder
|
36 |
+
```
|
37 |
+
|
38 |
+
## Usage
|
39 |
+
|
40 |
+
```python
|
41 |
+
import gradio as gr
|
42 |
+
from gradio_screenrecorder import ScreenRecorder
|
43 |
+
|
44 |
+
def handle_recording(recording_data):
|
45 |
+
\"\"\"Handle recorded video data\"\"\"
|
46 |
+
print(f'Received recording data: {recording_data}')
|
47 |
+
|
48 |
+
if not recording_data or not recording_data.get('video'):
|
49 |
+
return None
|
50 |
+
|
51 |
+
try:
|
52 |
+
video_info = recording_data['video']
|
53 |
+
# Return the video path that can be used by the Video component
|
54 |
+
return video_info.get('path')
|
55 |
+
except Exception as e:
|
56 |
+
print(f'Error processing recording: {e}')
|
57 |
+
return None
|
58 |
+
|
59 |
+
|
60 |
+
css = \"\"\"
|
61 |
+
.screen-recorder-demo {
|
62 |
+
max-width: 800px;
|
63 |
+
margin: 0 auto;
|
64 |
+
}
|
65 |
+
\"\"\"
|
66 |
+
|
67 |
+
with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
|
68 |
+
gr.HTML(\"\"\"
|
69 |
+
<h1 style='text-align: center'>
|
70 |
+
Gradio Screen Recorder Component Demo
|
71 |
+
</h1>
|
72 |
+
\"\"\")
|
73 |
+
|
74 |
+
with gr.Row():
|
75 |
+
with gr.Column():
|
76 |
+
recorder = ScreenRecorder(
|
77 |
+
audio_enabled=True,
|
78 |
+
webcam_overlay=True, # Disabled for now
|
79 |
+
webcam_position="top-left",
|
80 |
+
recording_format="webm",
|
81 |
+
max_duration=60,
|
82 |
+
label="Screen Recorder"
|
83 |
+
)
|
84 |
+
|
85 |
+
with gr.Column():
|
86 |
+
output_video = gr.Video(label="Recorded Video")
|
87 |
+
|
88 |
+
# Event handler
|
89 |
+
recorder.change(
|
90 |
+
fn=handle_recording,
|
91 |
+
inputs=recorder,
|
92 |
+
outputs=output_video
|
93 |
+
)
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
demo.launch()
|
97 |
+
|
98 |
+
```
|
99 |
+
""", elem_classes=["md-custom"], header_links=True)
|
100 |
+
|
101 |
+
|
102 |
+
gr.Markdown("""
|
103 |
+
## `ScreenRecorder`
|
104 |
+
|
105 |
+
### Initialization
|
106 |
+
""", elem_classes=["md-custom"], header_links=True)
|
107 |
+
|
108 |
+
gr.ParamViewer(value=_docs["ScreenRecorder"]["members"]["__init__"], linkify=['ScreenRecorderData'])
|
109 |
+
|
110 |
+
|
111 |
+
gr.Markdown("### Events")
|
112 |
+
gr.ParamViewer(value=_docs["ScreenRecorder"]["events"], linkify=['Event'])
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
gr.Markdown("""
|
118 |
+
|
119 |
+
### User function
|
120 |
+
|
121 |
+
The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
|
122 |
+
|
123 |
+
- When used as an Input, the component only impacts the input signature of the user function.
|
124 |
+
- When used as an output, the component only impacts the return signature of the user function.
|
125 |
+
|
126 |
+
The code snippet below is accurate in cases where the component is used as both an input and an output.
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
```python
|
131 |
+
def predict(
|
132 |
+
value: typing.Optional[
|
133 |
+
gradio_screenrecorder.screenrecorder.ScreenRecorderData
|
134 |
+
][ScreenRecorderData, None]
|
135 |
+
) -> Unknown:
|
136 |
+
return value
|
137 |
+
```
|
138 |
+
""", elem_classes=["md-custom", "ScreenRecorder-user-fn"], header_links=True)
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
code_ScreenRecorderData = gr.Markdown("""
|
144 |
+
## `ScreenRecorderData`
|
145 |
+
```python
|
146 |
+
class ScreenRecorderData(GradioModel):
|
147 |
+
video: Optional[FileData] = None
|
148 |
+
duration: Optional[float] = None
|
149 |
+
audio_enabled: bool = True
|
150 |
+
status: Literal["recording", "stopped", "error"] = (
|
151 |
+
"stopped"
|
152 |
+
)
|
153 |
+
|
154 |
+
class Config:
|
155 |
+
json_encoders = {
|
156 |
+
FileData: lambda v: v.model_dump()
|
157 |
+
if v
|
158 |
+
else None
|
159 |
+
}
|
160 |
+
```""", elem_classes=["md-custom", "ScreenRecorderData"], header_links=True)
|
161 |
+
|
162 |
+
demo.load(None, js=r"""function() {
|
163 |
+
const refs = {
|
164 |
+
ScreenRecorderData: [], };
|
165 |
+
const user_fn_refs = {
|
166 |
+
ScreenRecorder: ['ScreenRecorderData'], };
|
167 |
+
requestAnimationFrame(() => {
|
168 |
+
|
169 |
+
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
170 |
+
if (refs.length > 0) {
|
171 |
+
const el = document.querySelector(`.${key}-user-fn`);
|
172 |
+
if (!el) return;
|
173 |
+
refs.forEach(ref => {
|
174 |
+
el.innerHTML = el.innerHTML.replace(
|
175 |
+
new RegExp("\\b"+ref+"\\b", "g"),
|
176 |
+
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
177 |
+
);
|
178 |
+
})
|
179 |
+
}
|
180 |
+
})
|
181 |
+
|
182 |
+
Object.entries(refs).forEach(([key, refs]) => {
|
183 |
+
if (refs.length > 0) {
|
184 |
+
const el = document.querySelector(`.${key}`);
|
185 |
+
if (!el) return;
|
186 |
+
refs.forEach(ref => {
|
187 |
+
el.innerHTML = el.innerHTML.replace(
|
188 |
+
new RegExp("\\b"+ref+"\\b", "g"),
|
189 |
+
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
190 |
+
);
|
191 |
+
})
|
192 |
+
}
|
193 |
+
})
|
194 |
+
})
|
195 |
+
}
|
196 |
+
|
197 |
+
""")
|
198 |
+
|
199 |
+
demo.launch()
|
frontend/Example.svelte
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let value: any;
|
3 |
+
|
4 |
+
function formatDuration(duration: number): string {
|
5 |
+
const minutes = Math.floor(duration / 60);
|
6 |
+
const seconds = Math.floor(duration % 60);
|
7 |
+
return `${minutes}:${seconds.toString().padStart(2, '0')}`;
|
8 |
+
}
|
9 |
+
</script>
|
10 |
+
|
11 |
+
<div class="example-container">
|
12 |
+
{#if value && value.video}
|
13 |
+
<div class="video-thumbnail">
|
14 |
+
<video
|
15 |
+
src={value.video.path}
|
16 |
+
controls={false}
|
17 |
+
muted
|
18 |
+
style="width: 100%; height: 60px; object-fit: cover;"
|
19 |
+
>
|
20 |
+
</video>
|
21 |
+
<div class="overlay">
|
22 |
+
<span class="duration">
|
23 |
+
{value.duration ? formatDuration(value.duration) : 'Recording'}
|
24 |
+
</span>
|
25 |
+
<span class="format">
|
26 |
+
{value.video.orig_name?.split('.').pop()?.toUpperCase() || 'VIDEO'}
|
27 |
+
</span>
|
28 |
+
</div>
|
29 |
+
</div>
|
30 |
+
{:else}
|
31 |
+
<div class="placeholder">
|
32 |
+
📹 Screen Recording
|
33 |
+
</div>
|
34 |
+
{/if}
|
35 |
+
</div>
|
36 |
+
|
37 |
+
<style>
|
38 |
+
.example-container {
|
39 |
+
width: 100%;
|
40 |
+
height: 80px;
|
41 |
+
border-radius: 4px;
|
42 |
+
overflow: hidden;
|
43 |
+
position: relative;
|
44 |
+
}
|
45 |
+
|
46 |
+
.video-thumbnail {
|
47 |
+
position: relative;
|
48 |
+
width: 100%;
|
49 |
+
height: 100%;
|
50 |
+
}
|
51 |
+
|
52 |
+
.overlay {
|
53 |
+
position: absolute;
|
54 |
+
bottom: 0;
|
55 |
+
left: 0;
|
56 |
+
right: 0;
|
57 |
+
background: linear-gradient(transparent, rgba(0,0,0,0.7));
|
58 |
+
padding: 4px 8px;
|
59 |
+
display: flex;
|
60 |
+
justify-content: space-between;
|
61 |
+
align-items: flex-end;
|
62 |
+
}
|
63 |
+
|
64 |
+
.duration, .format {
|
65 |
+
color: white;
|
66 |
+
font-size: 10px;
|
67 |
+
font-weight: bold;
|
68 |
+
}
|
69 |
+
|
70 |
+
.placeholder {
|
71 |
+
display: flex;
|
72 |
+
align-items: center;
|
73 |
+
justify-content: center;
|
74 |
+
width: 100%;
|
75 |
+
height: 100%;
|
76 |
+
background: #f0f0f0;
|
77 |
+
color: #666;
|
78 |
+
font-size: 12px;
|
79 |
+
}
|
80 |
+
</style>
|
frontend/Index.svelte
ADDED
@@ -0,0 +1,727 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount, onDestroy, createEventDispatcher } from 'svelte';
|
3 |
+
import { Block } from '@gradio/atoms';
|
4 |
+
import { StatusTracker } from '@gradio/statustracker';
|
5 |
+
import type { LoadingStatus } from "@gradio/statustracker";
|
6 |
+
import type { Gradio } from "@gradio/utils";
|
7 |
+
import fixWebmDuration from 'fix-webm-duration';
|
8 |
+
|
9 |
+
// Type definitions
|
10 |
+
interface MediaRecorderOptions {
|
11 |
+
mimeType?: string;
|
12 |
+
audioBitsPerSecond?: number;
|
13 |
+
videoBitsPerSecond?: number;
|
14 |
+
bitsPerSecond?: number;
|
15 |
+
}
|
16 |
+
|
17 |
+
interface MediaTrackConstraints {
|
18 |
+
displaySurface?: 'browser' | 'monitor' | 'window';
|
19 |
+
cursor?: 'always' | 'motion' | 'never';
|
20 |
+
}
|
21 |
+
|
22 |
+
// Type definitions
|
23 |
+
interface RecordingData {
|
24 |
+
video: string;
|
25 |
+
duration: number;
|
26 |
+
audio_enabled?: boolean;
|
27 |
+
status?: string;
|
28 |
+
orig_name?: string;
|
29 |
+
size?: number | null;
|
30 |
+
data?: string; // Base64 encoded data for Gradio
|
31 |
+
name?: string; // Alias for orig_name for Gradio compatibility
|
32 |
+
is_file?: boolean;
|
33 |
+
type?: string; // MIME type of the recording
|
34 |
+
}
|
35 |
+
|
36 |
+
interface Position {
|
37 |
+
x: number;
|
38 |
+
y: number;
|
39 |
+
}
|
40 |
+
|
41 |
+
// Event types for the component
|
42 |
+
type EventMap = {
|
43 |
+
'error': { message: string; error: string };
|
44 |
+
'recording-started': void;
|
45 |
+
'recording-stopped': RecordingData;
|
46 |
+
'record_stop': RecordingData;
|
47 |
+
'change': RecordingData;
|
48 |
+
'webcam-error': { message: string; error: string };
|
49 |
+
};
|
50 |
+
|
51 |
+
// Component props with proper types and defaults
|
52 |
+
export let gradio: Gradio<any>;
|
53 |
+
export let value: Partial<RecordingData> | null = null;
|
54 |
+
export const elem_id = ''; // Marked as const since it's not modified
|
55 |
+
export let elem_classes: string[] = [];
|
56 |
+
export let scale: number | null = null;
|
57 |
+
export let min_width: number | null = null;
|
58 |
+
export let visible = true;
|
59 |
+
export let interactive = true;
|
60 |
+
export let loading_status: LoadingStatus | null = null;
|
61 |
+
export let audio_enabled = false;
|
62 |
+
export let webcam_overlay = false;
|
63 |
+
export let webcam_position: 'top-left' | 'top-right' | 'bottom-left' | 'bottom-right' = 'bottom-right';
|
64 |
+
export let recording_format: 'webm' | 'mp4' | 'gif' = 'webm';
|
65 |
+
export let max_duration: number | null = null;
|
66 |
+
|
67 |
+
// Computed styles for the container
|
68 |
+
let containerStyle = '';
|
69 |
+
|
70 |
+
// Component methods interface
|
71 |
+
interface ComponentMethods {
|
72 |
+
startRecording: () => Promise<void>;
|
73 |
+
stopRecording: () => void;
|
74 |
+
togglePause: () => void;
|
75 |
+
cleanup: () => void;
|
76 |
+
}
|
77 |
+
|
78 |
+
// Component state with explicit types and initial values
|
79 |
+
let isPaused = false;
|
80 |
+
let isRecording = false;
|
81 |
+
let recordingTime = 0;
|
82 |
+
let recordingTimer: number | null = null;
|
83 |
+
let recordedChunks: Blob[] = [];
|
84 |
+
|
85 |
+
// Media streams and elements
|
86 |
+
let screenStream: MediaStream | null = null;
|
87 |
+
let webcamStream: MediaStream | null = null;
|
88 |
+
let combinedStream: MediaStream | null = null;
|
89 |
+
let canvas: HTMLCanvasElement | null = null;
|
90 |
+
let ctx: CanvasRenderingContext2D | null = null;
|
91 |
+
let animationFrameId: number | null = null;
|
92 |
+
let previewVideo: HTMLVideoElement | null = null;
|
93 |
+
let webcamVideo: HTMLVideoElement | null = null;
|
94 |
+
let recordingStartTime = 0;
|
95 |
+
let mediaRecorder: MediaRecorder | null = null;
|
96 |
+
|
97 |
+
// Internal video elements
|
98 |
+
let webcamVideoInternal: HTMLVideoElement | null = null;
|
99 |
+
let screenVideoInternal: HTMLVideoElement | null = null;
|
100 |
+
|
101 |
+
// Bind canvas element
|
102 |
+
function bindCanvas(node: HTMLCanvasElement) {
|
103 |
+
canvas = node;
|
104 |
+
if (canvas) {
|
105 |
+
const context = canvas.getContext('2d', { willReadFrequently: true });
|
106 |
+
if (context) {
|
107 |
+
ctx = context;
|
108 |
+
// Set canvas dimensions with null checks
|
109 |
+
const width = canvas.offsetWidth;
|
110 |
+
const height = canvas.offsetHeight;
|
111 |
+
if (width && height) {
|
112 |
+
canvas.width = width;
|
113 |
+
canvas.height = height;
|
114 |
+
}
|
115 |
+
}
|
116 |
+
}
|
117 |
+
return {
|
118 |
+
destroy() {
|
119 |
+
canvas = null;
|
120 |
+
ctx = null;
|
121 |
+
}
|
122 |
+
};
|
123 |
+
}
|
124 |
+
|
125 |
+
// Canvas binding is now handled by the bindCanvas function
|
126 |
+
|
127 |
+
// Configuration
|
128 |
+
const webcam_size = 200;
|
129 |
+
const webcam_border = 10;
|
130 |
+
const webcam_radius = '50%';
|
131 |
+
|
132 |
+
// Ensure max_duration has a default value if null
|
133 |
+
$: effectiveMaxDuration = max_duration ?? 0;
|
134 |
+
|
135 |
+
// Computed styles for the container
|
136 |
+
$: containerStyle = [
|
137 |
+
scale !== null ? `--scale: ${scale};` : '',
|
138 |
+
min_width !== null ? `min-width: ${min_width}px;` : ''
|
139 |
+
].filter(Boolean).join(' ');
|
140 |
+
|
141 |
+
onDestroy(() => {
|
142 |
+
if (isRecording) {
|
143 |
+
componentMethods.stopRecording();
|
144 |
+
}
|
145 |
+
componentMethods.cleanup();
|
146 |
+
if (animationFrameId) {
|
147 |
+
cancelAnimationFrame(animationFrameId);
|
148 |
+
animationFrameId = null;
|
149 |
+
}
|
150 |
+
});
|
151 |
+
|
152 |
+
// Component state and props are already declared above
|
153 |
+
|
154 |
+
// Event dispatcher with proper typing
|
155 |
+
const dispatch = createEventDispatcher<EventMap>();
|
156 |
+
|
157 |
+
// Type guard for error handling
|
158 |
+
function isErrorWithMessage(error: unknown): error is Error {
|
159 |
+
return error instanceof Error;
|
160 |
+
}
|
161 |
+
|
162 |
+
// Component methods implementation
|
163 |
+
const componentMethods: ComponentMethods = {
|
164 |
+
startRecording: async (): Promise<void> => {
|
165 |
+
if (isRecording) return;
|
166 |
+
isRecording = true;
|
167 |
+
recordedChunks = [];
|
168 |
+
recordingTime = 0;
|
169 |
+
|
170 |
+
try {
|
171 |
+
// Composite screen and optional webcam overlay via hidden canvas
|
172 |
+
const screenStreamCapture = await navigator.mediaDevices.getDisplayMedia({ video: true, audio: false });
|
173 |
+
screenStream = screenStreamCapture;
|
174 |
+
// Assign to hidden video for composition
|
175 |
+
if (screenVideoInternal) {
|
176 |
+
screenVideoInternal.srcObject = screenStreamCapture;
|
177 |
+
await screenVideoInternal.play().catch(() => {});
|
178 |
+
}
|
179 |
+
let captureStream: MediaStream;
|
180 |
+
if (webcam_overlay && webcamVideoInternal && canvas && ctx) {
|
181 |
+
try {
|
182 |
+
webcamStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
|
183 |
+
webcamVideoInternal.srcObject = webcamStream;
|
184 |
+
await webcamVideoInternal.play().catch(() => {});
|
185 |
+
// Resize canvas to match screen video
|
186 |
+
canvas.width = screenVideoInternal!.videoWidth;
|
187 |
+
canvas.height = screenVideoInternal!.videoHeight;
|
188 |
+
const overlaySize = Math.min(canvas.width, canvas.height) / 4;
|
189 |
+
const posMap: Record<string, [number, number]> = {
|
190 |
+
'top-left': [10, 10],
|
191 |
+
'top-right': [canvas.width - overlaySize - 10, 10],
|
192 |
+
'bottom-left': [10, canvas.height - overlaySize - 10],
|
193 |
+
'bottom-right': [canvas.width - overlaySize - 10, canvas.height - overlaySize - 10]
|
194 |
+
};
|
195 |
+
const [ox, oy] = posMap[webcam_position];
|
196 |
+
function draw() {
|
197 |
+
ctx!.drawImage(screenVideoInternal!, 0, 0, canvas.width, canvas.height);
|
198 |
+
ctx!.drawImage(webcamVideoInternal!, ox, oy, overlaySize, overlaySize);
|
199 |
+
animationFrameId = requestAnimationFrame(draw);
|
200 |
+
}
|
201 |
+
draw();
|
202 |
+
const canvasStream = canvas.captureStream(30);
|
203 |
+
const audioTracks = audio_enabled
|
204 |
+
? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
|
205 |
+
: screenStreamCapture.getAudioTracks();
|
206 |
+
combinedStream = new MediaStream([...canvasStream.getVideoTracks(), ...audioTracks]);
|
207 |
+
captureStream = combinedStream;
|
208 |
+
} catch (err) {
|
209 |
+
console.warn('Webcam overlay failed, falling back to screen only', err);
|
210 |
+
captureStream = screenStreamCapture;
|
211 |
+
}
|
212 |
+
} else {
|
213 |
+
// No overlay: combine audio if enabled with screen
|
214 |
+
const audioTracks = audio_enabled
|
215 |
+
? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
|
216 |
+
: screenStreamCapture.getAudioTracks();
|
217 |
+
combinedStream = new MediaStream([...screenStreamCapture.getVideoTracks(), ...audioTracks]);
|
218 |
+
captureStream = combinedStream;
|
219 |
+
}
|
220 |
+
|
221 |
+
// Handle track ended event
|
222 |
+
screenStreamCapture.getVideoTracks()[0].onended = () => {
|
223 |
+
if (isRecording) {
|
224 |
+
componentMethods.stopRecording();
|
225 |
+
}
|
226 |
+
};
|
227 |
+
|
228 |
+
// Start recording
|
229 |
+
const options: MediaRecorderOptions = {
|
230 |
+
mimeType: recording_format === 'webm' ? 'video/webm;codecs=vp9' : 'video/mp4'
|
231 |
+
};
|
232 |
+
|
233 |
+
mediaRecorder = new MediaRecorder(captureStream, options);
|
234 |
+
mediaRecorder.ondataavailable = handleDataAvailable;
|
235 |
+
mediaRecorder.onstop = handleRecordingStop;
|
236 |
+
mediaRecorder.start();
|
237 |
+
|
238 |
+
recordingStartTime = Date.now();
|
239 |
+
updateRecordingTime();
|
240 |
+
|
241 |
+
dispatch('recording-started');
|
242 |
+
} catch (error) {
|
243 |
+
isRecording = false;
|
244 |
+
if (isErrorWithMessage(error)) {
|
245 |
+
dispatch('error', {
|
246 |
+
message: 'Failed to start recording',
|
247 |
+
error: error.message
|
248 |
+
});
|
249 |
+
}
|
250 |
+
}
|
251 |
+
},
|
252 |
+
|
253 |
+
stopRecording: (): void => {
|
254 |
+
if (!isRecording || !mediaRecorder) return;
|
255 |
+
|
256 |
+
try {
|
257 |
+
mediaRecorder.stop();
|
258 |
+
isRecording = false;
|
259 |
+
|
260 |
+
// Stop all tracks
|
261 |
+
[screenStream, webcamStream, combinedStream].forEach(stream => {
|
262 |
+
if (stream) {
|
263 |
+
stream.getTracks().forEach(track => track.stop());
|
264 |
+
}
|
265 |
+
});
|
266 |
+
|
267 |
+
if (recordingTimer) {
|
268 |
+
clearTimeout(recordingTimer);
|
269 |
+
recordingTimer = null;
|
270 |
+
}
|
271 |
+
|
272 |
+
const recordingData: RecordingData = {
|
273 |
+
video: '',
|
274 |
+
duration: recordingTime / 1000,
|
275 |
+
audio_enabled: audio_enabled,
|
276 |
+
status: 'completed'
|
277 |
+
};
|
278 |
+
|
279 |
+
dispatch('recording-stopped', recordingData);
|
280 |
+
dispatch('record_stop', recordingData);
|
281 |
+
dispatch('change', recordingData);
|
282 |
+
} catch (error) {
|
283 |
+
isRecording = false;
|
284 |
+
if (isErrorWithMessage(error)) {
|
285 |
+
dispatch('error', {
|
286 |
+
message: 'Error stopping recording',
|
287 |
+
error: error.message
|
288 |
+
});
|
289 |
+
}
|
290 |
+
}
|
291 |
+
},
|
292 |
+
|
293 |
+
togglePause: (): void => {
|
294 |
+
if (!mediaRecorder) return;
|
295 |
+
|
296 |
+
isPaused = !isPaused;
|
297 |
+
|
298 |
+
if (isPaused) {
|
299 |
+
mediaRecorder.pause();
|
300 |
+
if (recordingTimer) {
|
301 |
+
clearTimeout(recordingTimer);
|
302 |
+
recordingTimer = null;
|
303 |
+
}
|
304 |
+
} else {
|
305 |
+
mediaRecorder.resume();
|
306 |
+
updateRecordingTime();
|
307 |
+
}
|
308 |
+
if (isPaused) {
|
309 |
+
// Pause logic
|
310 |
+
} else {
|
311 |
+
// Resume logic
|
312 |
+
}
|
313 |
+
},
|
314 |
+
|
315 |
+
cleanup: (): void => {
|
316 |
+
// Stop all media streams
|
317 |
+
[screenStream, webcamStream, combinedStream].forEach(stream => {
|
318 |
+
if (stream) {
|
319 |
+
stream.getTracks().forEach(track => track.stop());
|
320 |
+
}
|
321 |
+
});
|
322 |
+
|
323 |
+
// Clear media recorder
|
324 |
+
if (mediaRecorder) {
|
325 |
+
if (mediaRecorder.state !== 'inactive') {
|
326 |
+
mediaRecorder.stop();
|
327 |
+
}
|
328 |
+
mediaRecorder = null;
|
329 |
+
}
|
330 |
+
|
331 |
+
// Clear canvas
|
332 |
+
if (ctx) {
|
333 |
+
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
|
334 |
+
}
|
335 |
+
|
336 |
+
// Reset state
|
337 |
+
isRecording = false;
|
338 |
+
isPaused = false;
|
339 |
+
recordingTime = 0;
|
340 |
+
recordedChunks = [];
|
341 |
+
|
342 |
+
// Clear timers
|
343 |
+
if (recordingTimer) {
|
344 |
+
clearInterval(recordingTimer);
|
345 |
+
recordingTimer = null;
|
346 |
+
}
|
347 |
+
|
348 |
+
if (animationFrameId) {
|
349 |
+
cancelAnimationFrame(animationFrameId);
|
350 |
+
animationFrameId = null;
|
351 |
+
}
|
352 |
+
}
|
353 |
+
};
|
354 |
+
|
355 |
+
// Handle data available event
|
356 |
+
function handleDataAvailable(event: BlobEvent): void {
|
357 |
+
if (event.data && event.data.size > 0) {
|
358 |
+
recordedChunks.push(event.data);
|
359 |
+
}
|
360 |
+
}
|
361 |
+
|
362 |
+
// Handle recording stop
|
363 |
+
function handleRecordingStop(): void {
|
364 |
+
if (recordedChunks.length === 0) {
|
365 |
+
console.warn('No recording data available');
|
366 |
+
return;
|
367 |
+
}
|
368 |
+
|
369 |
+
const mimeType = recording_format === 'webm' ? 'video/webm' : 'video/mp4';
|
370 |
+
const blob = new Blob(recordedChunks, { type: mimeType });
|
371 |
+
const url = URL.createObjectURL(blob);
|
372 |
+
|
373 |
+
console.log('Recording stopped. Blob size:', blob.size, 'bytes');
|
374 |
+
|
375 |
+
// Create a file reader to read the blob as base64
|
376 |
+
const reader = new FileReader();
|
377 |
+
reader.onload = (e) => {
|
378 |
+
const base64data = e.target?.result as string;
|
379 |
+
// Extract the base64 data (remove the data URL prefix)
|
380 |
+
const base64Content = base64data.split(',')[1];
|
381 |
+
const fileName = `recording_${Date.now()}.${recording_format}`;
|
382 |
+
|
383 |
+
// Dispatch event with recording data
|
384 |
+
const recordingData: RecordingData = {
|
385 |
+
video: url,
|
386 |
+
duration: recordingTime,
|
387 |
+
audio_enabled: audio_enabled,
|
388 |
+
status: 'completed',
|
389 |
+
size: blob.size > 0 ? blob.size : undefined,
|
390 |
+
orig_name: fileName,
|
391 |
+
name: fileName, // Alias for Gradio compatibility
|
392 |
+
is_file: true,
|
393 |
+
type: mimeType,
|
394 |
+
data: base64Content
|
395 |
+
};
|
396 |
+
|
397 |
+
console.log('Dispatching recording-stopped event');
|
398 |
+
dispatch('recording-stopped', recordingData);
|
399 |
+
dispatch('record_stop', recordingData);
|
400 |
+
dispatch('change', recordingData);
|
401 |
+
|
402 |
+
// Update the value prop to trigger re-render
|
403 |
+
value = { ...value, ...recordingData };
|
404 |
+
};
|
405 |
+
|
406 |
+
reader.onerror = (error) => {
|
407 |
+
console.error('Error reading blob:', error);
|
408 |
+
dispatch('error', {
|
409 |
+
message: 'Failed to process recording',
|
410 |
+
error: 'Could not read recording data'
|
411 |
+
});
|
412 |
+
};
|
413 |
+
|
414 |
+
// Read the blob as data URL
|
415 |
+
reader.readAsDataURL(blob);
|
416 |
+
}
|
417 |
+
|
418 |
+
// Update recording time
|
419 |
+
function updateRecordingTime(): void {
|
420 |
+
if (!isRecording) return;
|
421 |
+
|
422 |
+
recordingTime = Math.floor((Date.now() - recordingStartTime) / 1000);
|
423 |
+
|
424 |
+
// Check if max duration has been reached
|
425 |
+
if (max_duration !== null && max_duration > 0 && recordingTime >= max_duration) {
|
426 |
+
console.log('Max duration reached, stopping');
|
427 |
+
componentMethods.stopRecording();
|
428 |
+
return;
|
429 |
+
}
|
430 |
+
|
431 |
+
// Schedule the next update
|
432 |
+
recordingTimer = window.setTimeout(updateRecordingTime, 1000);
|
433 |
+
}
|
434 |
+
|
435 |
+
function stopTimer(): void {
|
436 |
+
if (recordingTimer) {
|
437 |
+
clearTimeout(recordingTimer);
|
438 |
+
recordingTimer = null;
|
439 |
+
}
|
440 |
+
}
|
441 |
+
|
442 |
+
// Format time as MM:SS
|
443 |
+
function formatTime(seconds: number): string {
|
444 |
+
const mins = Math.floor(seconds / 60);
|
445 |
+
const secs = Math.floor(seconds % 60);
|
446 |
+
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
|
447 |
+
}
|
448 |
+
|
449 |
+
// Format file size in human-readable format
|
450 |
+
function formatFileSize(bytes: number | string | null | undefined): string {
|
451 |
+
if (bytes === null || bytes === undefined) return '0 B';
|
452 |
+
const numBytes = Number(bytes);
|
453 |
+
if (isNaN(numBytes) || numBytes === 0) return '0 B';
|
454 |
+
const k = 1024;
|
455 |
+
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
456 |
+
const i = Math.floor(Math.log(numBytes) / Math.log(k));
|
457 |
+
return parseFloat((numBytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
|
458 |
+
}
|
459 |
+
</script>
|
460 |
+
|
461 |
+
<div class="screen-recorder-container {!visible ? 'invisible' : ''} {elem_classes.join(' ')}" style="{containerStyle}">
|
462 |
+
{#if loading_status}
|
463 |
+
<StatusTracker
|
464 |
+
autoscroll={gradio.autoscroll}
|
465 |
+
i18n={gradio.i18n}
|
466 |
+
{...loading_status}
|
467 |
+
/>
|
468 |
+
{/if}
|
469 |
+
|
470 |
+
<div class="screen-recorder">
|
471 |
+
<div class="controls">
|
472 |
+
{#if !isRecording}
|
473 |
+
<button
|
474 |
+
class="record-btn start"
|
475 |
+
on:click={componentMethods.startRecording}
|
476 |
+
disabled={!interactive}
|
477 |
+
>
|
478 |
+
<span class="recording-icon">●</span> Start Recording
|
479 |
+
</button>
|
480 |
+
{:else}
|
481 |
+
<button
|
482 |
+
class="record-btn stop"
|
483 |
+
on:click={componentMethods.stopRecording}
|
484 |
+
>
|
485 |
+
<span class="stop-icon">■</span> Stop Recording
|
486 |
+
</button>
|
487 |
+
<span class="recording-time">
|
488 |
+
{formatTime(recordingTime)}
|
489 |
+
</span>
|
490 |
+
{#if max_duration}
|
491 |
+
<span class="max-duration">/ {formatTime(max_duration)}</span>
|
492 |
+
{/if}
|
493 |
+
{/if}
|
494 |
+
</div>
|
495 |
+
|
496 |
+
<!-- Live Preview - Always show when recording -->
|
497 |
+
{#if isRecording}
|
498 |
+
<div class="preview-container">
|
499 |
+
<video
|
500 |
+
bind:this={previewVideo}
|
501 |
+
class="preview-video"
|
502 |
+
autoplay
|
503 |
+
muted
|
504 |
+
playsinline
|
505 |
+
aria-label="Live preview"
|
506 |
+
on:loadedmetadata={() => {
|
507 |
+
if (previewVideo) {
|
508 |
+
previewVideo.play().catch(console.warn);
|
509 |
+
}
|
510 |
+
}}
|
511 |
+
>
|
512 |
+
<track kind="captions" />
|
513 |
+
</video>
|
514 |
+
{#if webcam_overlay}
|
515 |
+
<video
|
516 |
+
bind:this={webcamVideo}
|
517 |
+
class="webcam-overlay {webcam_position}"
|
518 |
+
style="width: 200px; height: 200px;"
|
519 |
+
autoplay
|
520 |
+
muted
|
521 |
+
playsinline
|
522 |
+
aria-label="Webcam overlay"
|
523 |
+
>
|
524 |
+
<track kind="captions" />
|
525 |
+
</video>
|
526 |
+
{/if}
|
527 |
+
<div class="recording-indicator">
|
528 |
+
<span class="pulse">●</span> RECORDING
|
529 |
+
</div>
|
530 |
+
</div>
|
531 |
+
{/if}
|
532 |
+
|
533 |
+
{#if value?.video}
|
534 |
+
<div class="recording-preview" style="position: relative;">
|
535 |
+
{#if audio_enabled}
|
536 |
+
<div class="speaker-overlay">🔊</div>
|
537 |
+
{/if}
|
538 |
+
<video
|
539 |
+
src={value.video}
|
540 |
+
controls
|
541 |
+
class="preview-video"
|
542 |
+
aria-label="Recording preview"
|
543 |
+
on:loadedmetadata
|
544 |
+
on:loadeddata
|
545 |
+
on:error={(e) => console.error('Video error:', e)}
|
546 |
+
>
|
547 |
+
<track kind="captions" />
|
548 |
+
</video>
|
549 |
+
<div class="recording-info">
|
550 |
+
<div>Duration: {value.duration ? value.duration.toFixed(1) : '0.0'}s</div>
|
551 |
+
{#if value.size}
|
552 |
+
<div>Size: {formatFileSize(value.size)}</div>
|
553 |
+
{/if}
|
554 |
+
</div>
|
555 |
+
</div>
|
556 |
+
{/if}
|
557 |
+
|
558 |
+
<!-- Configuration Display -->
|
559 |
+
<div class="config-info">
|
560 |
+
<span>Audio: {audio_enabled ? '🔊' : '🔇'}</span>
|
561 |
+
<span>Format: {recording_format.toUpperCase()}</span>
|
562 |
+
{#if max_duration}
|
563 |
+
<span>Max: {formatTime(max_duration)}</span>
|
564 |
+
{/if}
|
565 |
+
</div>
|
566 |
+
|
567 |
+
<!-- Debug info -->
|
568 |
+
{#if value}
|
569 |
+
<div class="debug-info">
|
570 |
+
<small>Last recording: {value.orig_name} ({Math.round(value.size / 1024)}KB)</small>
|
571 |
+
</div>
|
572 |
+
{/if}
|
573 |
+
</div>
|
574 |
+
<video bind:this={screenVideoInternal} hidden muted playsinline style="display:none"></video>
|
575 |
+
{#if webcam_overlay}
|
576 |
+
<video bind:this={webcamVideoInternal} hidden muted playsinline style="display:none"></video>
|
577 |
+
{/if}
|
578 |
+
<canvas bind:this={canvas} use:bindCanvas hidden style="display:none"></canvas>
|
579 |
+
</div>
|
580 |
+
|
581 |
+
<style>
|
582 |
+
.screen-recorder-container {
|
583 |
+
display: block;
|
584 |
+
width: 100%;
|
585 |
+
box-sizing: border-box;
|
586 |
+
}
|
587 |
+
|
588 |
+
.screen-recorder-container.invisible {
|
589 |
+
display: none;
|
590 |
+
}
|
591 |
+
|
592 |
+
.screen-recorder {
|
593 |
+
border: 2px solid #e0e0e0;
|
594 |
+
border-radius: 8px;
|
595 |
+
padding: 16px;
|
596 |
+
background: #f9f9f9;
|
597 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
598 |
+
}
|
599 |
+
|
600 |
+
.controls {
|
601 |
+
display: flex;
|
602 |
+
align-items: center;
|
603 |
+
gap: 12px;
|
604 |
+
margin-bottom: 12px;
|
605 |
+
flex-wrap: wrap;
|
606 |
+
}
|
607 |
+
|
608 |
+
.record-btn {
|
609 |
+
padding: 10px 20px;
|
610 |
+
border: none;
|
611 |
+
border-radius: 6px;
|
612 |
+
font-size: 14px;
|
613 |
+
font-weight: 500;
|
614 |
+
cursor: pointer;
|
615 |
+
transition: all 0.2s;
|
616 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
617 |
+
}
|
618 |
+
|
619 |
+
.record-btn.start {
|
620 |
+
background: #4CAF50;
|
621 |
+
color: white;
|
622 |
+
}
|
623 |
+
|
624 |
+
.record-btn.start:hover {
|
625 |
+
background: #45a049;
|
626 |
+
}
|
627 |
+
|
628 |
+
.record-btn.stop {
|
629 |
+
background: #f44336;
|
630 |
+
color: white;
|
631 |
+
}
|
632 |
+
|
633 |
+
.record-btn.stop:hover {
|
634 |
+
background: #da190b;
|
635 |
+
}
|
636 |
+
|
637 |
+
.record-btn:disabled {
|
638 |
+
opacity: 0.5;
|
639 |
+
cursor: not-allowed;
|
640 |
+
}
|
641 |
+
|
642 |
+
.recording-time {
|
643 |
+
font-family: 'Courier New', monospace;
|
644 |
+
font-size: 18px;
|
645 |
+
font-weight: bold;
|
646 |
+
color: #f44336;
|
647 |
+
}
|
648 |
+
|
649 |
+
.max-duration {
|
650 |
+
font-family: 'Courier New', monospace;
|
651 |
+
font-size: 14px;
|
652 |
+
color: #666;
|
653 |
+
}
|
654 |
+
|
655 |
+
|
656 |
+
.preview-container {
|
657 |
+
position: relative;
|
658 |
+
margin: 12px 0;
|
659 |
+
border-radius: 6px;
|
660 |
+
overflow: hidden;
|
661 |
+
background: black;
|
662 |
+
min-height: 200px;
|
663 |
+
}
|
664 |
+
|
665 |
+
.preview-video {
|
666 |
+
width: 100%;
|
667 |
+
max-height: 400px;
|
668 |
+
display: block;
|
669 |
+
object-fit: contain;
|
670 |
+
}
|
671 |
+
|
672 |
+
|
673 |
+
.recording-indicator {
|
674 |
+
position: absolute;
|
675 |
+
top: 10px;
|
676 |
+
left: 10px;
|
677 |
+
background: rgba(244, 67, 54, 0.9);
|
678 |
+
color: white;
|
679 |
+
padding: 6px 12px;
|
680 |
+
border-radius: 4px;
|
681 |
+
font-size: 12px;
|
682 |
+
font-weight: bold;
|
683 |
+
animation: pulse 1s infinite;
|
684 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
|
685 |
+
}
|
686 |
+
|
687 |
+
@keyframes pulse {
|
688 |
+
0%, 100% { opacity: 1; }
|
689 |
+
50% { opacity: 0.7; }
|
690 |
+
}
|
691 |
+
|
692 |
+
.config-info {
|
693 |
+
display: flex;
|
694 |
+
gap: 8px;
|
695 |
+
font-size: 12px;
|
696 |
+
color: #666;
|
697 |
+
margin-top: 8px;
|
698 |
+
flex-wrap: wrap;
|
699 |
+
}
|
700 |
+
|
701 |
+
.config-info span {
|
702 |
+
padding: 4px 8px;
|
703 |
+
background: #e8e8e8;
|
704 |
+
border-radius: 4px;
|
705 |
+
border: 1px solid #ddd;
|
706 |
+
}
|
707 |
+
|
708 |
+
.debug-info {
|
709 |
+
margin-top: 8px;
|
710 |
+
padding: 8px;
|
711 |
+
background: #e8f5e8;
|
712 |
+
border-radius: 4px;
|
713 |
+
border: 1px solid #c8e6c8;
|
714 |
+
}
|
715 |
+
|
716 |
+
.speaker-overlay {
|
717 |
+
position: absolute;
|
718 |
+
top: 8px;
|
719 |
+
right: 8px;
|
720 |
+
background: rgba(0,0,0,0.5);
|
721 |
+
color: white;
|
722 |
+
padding: 4px;
|
723 |
+
border-radius: 4px;
|
724 |
+
font-size: 14px;
|
725 |
+
pointer-events: none;
|
726 |
+
}
|
727 |
+
</style>
|
frontend/gradio.config.js
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export default {
|
2 |
+
plugins: [],
|
3 |
+
svelte: {
|
4 |
+
preprocess: [],
|
5 |
+
},
|
6 |
+
build: {
|
7 |
+
target: "modules",
|
8 |
+
},
|
9 |
+
};
|
frontend/package-lock.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
frontend/package.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "gradio_screenrecorder",
|
3 |
+
"version": "0.3.21",
|
4 |
+
"description": "Gradio Screen Recorder Component",
|
5 |
+
"type": "module",
|
6 |
+
"author": "",
|
7 |
+
"license": "ISC",
|
8 |
+
"private": false,
|
9 |
+
"main_changeset": true,
|
10 |
+
"scripts": {
|
11 |
+
"build": "vite build",
|
12 |
+
"dev": "vite",
|
13 |
+
"preview": "vite preview"
|
14 |
+
},
|
15 |
+
"exports": {
|
16 |
+
".": {
|
17 |
+
"gradio": "./Index.svelte",
|
18 |
+
"svelte": "./dist/Index.svelte",
|
19 |
+
"types": "./dist/Index.svelte.d.ts"
|
20 |
+
},
|
21 |
+
"./example": {
|
22 |
+
"gradio": "./Example.svelte",
|
23 |
+
"svelte": "./dist/Example.svelte",
|
24 |
+
"types": "./dist/Example.svelte.d.ts"
|
25 |
+
},
|
26 |
+
"./package.json": "./package.json"
|
27 |
+
},
|
28 |
+
"dependencies": {
|
29 |
+
"@gradio/atoms": "0.16.1",
|
30 |
+
"@gradio/icons": "0.12.0",
|
31 |
+
"@gradio/statustracker": "0.10.11",
|
32 |
+
"@gradio/utils": "0.10.2",
|
33 |
+
"fix-webm-duration": "^1.0.6",
|
34 |
+
"svelte": "^4.2.7"
|
35 |
+
},
|
36 |
+
"devDependencies": {
|
37 |
+
"@gradio/preview": "0.13.0",
|
38 |
+
"@sveltejs/vite-plugin-svelte": "^3.0.0",
|
39 |
+
"@tsconfig/svelte": "^5.0.4",
|
40 |
+
"svelte-preprocess": "^6.0.3",
|
41 |
+
"typescript": "^5.8.3",
|
42 |
+
"vite": "^5.0.0",
|
43 |
+
"vite-plugin-svelte": "^3.0.0"
|
44 |
+
},
|
45 |
+
"peerDependencies": {
|
46 |
+
"svelte": "^4.0.0"
|
47 |
+
},
|
48 |
+
"repository": {
|
49 |
+
"type": "git",
|
50 |
+
"url": "git+https://github.com/your-username/your-repo.git",
|
51 |
+
"directory": "screenrecorder"
|
52 |
+
},
|
53 |
+
"files": [
|
54 |
+
"dist"
|
55 |
+
]
|
56 |
+
}
|
frontend/tsconfig.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"compilerOptions": {
|
3 |
+
"target": "ESNext",
|
4 |
+
"module": "ESNext",
|
5 |
+
"moduleResolution": "node",
|
6 |
+
"esModuleInterop": true,
|
7 |
+
"resolveJsonModule": true,
|
8 |
+
"strict": true,
|
9 |
+
"skipLibCheck": true,
|
10 |
+
"forceConsistentCasingInFileNames": true,
|
11 |
+
"isolatedModules": true,
|
12 |
+
"verbatimModuleSyntax": true,
|
13 |
+
"jsx": "preserve",
|
14 |
+
"lib": ["DOM", "DOM.Iterable", "ESNext"],
|
15 |
+
"types": ["svelte"],
|
16 |
+
"paths": {
|
17 |
+
"@/*": ["./*"]
|
18 |
+
}
|
19 |
+
},
|
20 |
+
"include": ["**/*.d.ts", "**/*.ts", "**/*.js", "**/*.svelte"],
|
21 |
+
"exclude": ["node_modules", "**/node_modules/*"]
|
22 |
+
}
|
frontend/types.d.ts
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Extend the Window interface
|
2 |
+
declare global {
|
3 |
+
interface Window {
|
4 |
+
requestAnimationFrame(callback: FrameRequestCallback): number;
|
5 |
+
cancelAnimationFrame(handle: number): void;
|
6 |
+
MediaRecorder: typeof MediaRecorder;
|
7 |
+
}
|
8 |
+
|
9 |
+
interface MediaRecorderOptions {
|
10 |
+
mimeType?: string;
|
11 |
+
audioBitsPerSecond?: number;
|
12 |
+
videoBitsPerSecond?: number;
|
13 |
+
bitsPerSecond?: number;
|
14 |
+
}
|
15 |
+
|
16 |
+
interface MediaTrackConstraints {
|
17 |
+
displaySurface?: 'browser' | 'monitor' | 'window';
|
18 |
+
cursor?: 'always' | 'motion' | 'never';
|
19 |
+
}
|
20 |
+
}
|
21 |
+
|
22 |
+
// Export the types
|
23 |
+
export {};
|
frontend/vite.config.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { defineConfig } from 'vite';
|
2 |
+
import { svelte } from '@sveltejs/vite-plugin-svelte';
|
3 |
+
|
4 |
+
export default defineConfig({
|
5 |
+
plugins: [svelte()],
|
6 |
+
});
|
manifest.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Screen Recorder Component",
|
3 |
+
"short_name": "ScreenRecorder",
|
4 |
+
"description": "Gradio Screen Recording Component",
|
5 |
+
"start_url": "/",
|
6 |
+
"display": "standalone"
|
7 |
+
}
|
pyproject.toml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = [
|
3 |
+
"hatchling",
|
4 |
+
"hatch-requirements-txt",
|
5 |
+
"hatch-fancy-pypi-readme>=22.5.0",
|
6 |
+
]
|
7 |
+
build-backend = "hatchling.build"
|
8 |
+
|
9 |
+
[project]
|
10 |
+
name = "gradio_screenrecorder"
|
11 |
+
version = "0.0.1"
|
12 |
+
description = "Screen Recorder Gradio Custom Component"
|
13 |
+
readme = "README.md"
|
14 |
+
license = "apache-2.0"
|
15 |
+
requires-python = ">=3.10"
|
16 |
+
authors = [{ name = "YOUR NAME", email = "[email protected]" }]
|
17 |
+
keywords = ["gradio-custom-component", "custom-component-track", "gradio", "screen-recorder"]
|
18 |
+
# Add dependencies here
|
19 |
+
dependencies = ["gradio>=4.0,<6.0"]
|
20 |
+
classifiers = [
|
21 |
+
'Development Status :: 3 - Alpha',
|
22 |
+
'Operating System :: OS Independent',
|
23 |
+
'Programming Language :: Python :: 3',
|
24 |
+
'Programming Language :: Python :: 3 :: Only',
|
25 |
+
'Programming Language :: Python :: 3.8',
|
26 |
+
'Programming Language :: Python :: 3.9',
|
27 |
+
'Programming Language :: Python :: 3.10',
|
28 |
+
'Programming Language :: Python :: 3.11',
|
29 |
+
'Topic :: Scientific/Engineering',
|
30 |
+
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
31 |
+
'Topic :: Scientific/Engineering :: Visualization',
|
32 |
+
]
|
33 |
+
|
34 |
+
# The repository and space URLs are optional, but recommended.
|
35 |
+
# Adding a repository URL will create a badge in the auto-generated README that links to the repository.
|
36 |
+
# Adding a space URL will create a badge in the auto-generated README that links to the space.
|
37 |
+
# This will make it easy for people to find your deployed demo or source code when they
|
38 |
+
# encounter your project in the wild.
|
39 |
+
|
40 |
+
# [project.urls]
|
41 |
+
# repository = "your github repository"
|
42 |
+
# space = "your space url"
|
43 |
+
|
44 |
+
[project.optional-dependencies]
|
45 |
+
dev = ["build", "twine"]
|
46 |
+
|
47 |
+
[tool.hatch.build]
|
48 |
+
artifacts = ["/backend/gradio_screenrecorder/templates", "*.pyi"]
|
49 |
+
|
50 |
+
[tool.hatch.build.targets.wheel]
|
51 |
+
packages = ["/backend/gradio_screenrecorder"]
|
requirements.txt
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
-
gradio>=
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
websockets>=13.0
|
8 |
-
uvloop
|
|
|
1 |
+
gradio>=4,<6
|
2 |
+
google-generativeai>=0.3.0
|
3 |
+
fastrtc>=0.1.5
|
4 |
+
opencv-python-headless>=4.10.0
|
5 |
+
numpy>=1.24
|
6 |
+
-e ./gradio_screenrecorder/src
|
|
|
|
src/.gitignore
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.eggs/
|
2 |
+
dist/
|
3 |
+
*.pyc
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
__tmp/*
|
8 |
+
*.pyi
|
9 |
+
.mypycache
|
10 |
+
.ruff_cache
|
11 |
+
node_modules
|
12 |
+
backend/**/templates/
|
13 |
+
|
14 |
+
.venv
|
src/.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.13
|
src/README.md
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags: [custom-component-track, gradio-custom-component, screen-recorder, PIP, picture-in-picture]
|
3 |
+
title: gradio_screenrecorder
|
4 |
+
short_description: Screen Recorder + Picture in Picture Gradio Custom Component
|
5 |
+
colorFrom: blue
|
6 |
+
colorTo: yellow
|
7 |
+
sdk: gradio
|
8 |
+
pinned: false
|
9 |
+
app_file: space.py
|
10 |
+
---
|
11 |
+
|
12 |
+
# `gradio_screenrecorder`
|
13 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
|
14 |
+
|
15 |
+
Screen Recorder Gradio Custom Component
|
16 |
+
|
17 |
+
## Installation
|
18 |
+
|
19 |
+
```bash
|
20 |
+
pip install gradio_screenrecorder
|
21 |
+
```
|
22 |
+
|
23 |
+
## Usage
|
24 |
+
|
25 |
+
```python
|
26 |
+
import gradio as gr
|
27 |
+
from gradio_screenrecorder import ScreenRecorder
|
28 |
+
|
29 |
+
def handle_recording(recording_data):
|
30 |
+
"""Handle recorded video data"""
|
31 |
+
print(f'Received recording data: {recording_data}')
|
32 |
+
|
33 |
+
if not recording_data or not recording_data.get('video'):
|
34 |
+
return None
|
35 |
+
|
36 |
+
try:
|
37 |
+
video_info = recording_data['video']
|
38 |
+
# Return the video path that can be used by the Video component
|
39 |
+
return video_info.get('path')
|
40 |
+
except Exception as e:
|
41 |
+
print(f'Error processing recording: {e}')
|
42 |
+
return None
|
43 |
+
|
44 |
+
|
45 |
+
css = """
|
46 |
+
.screen-recorder-demo {
|
47 |
+
max-width: 800px;
|
48 |
+
margin: 0 auto;
|
49 |
+
}
|
50 |
+
"""
|
51 |
+
|
52 |
+
with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
|
53 |
+
gr.HTML("""
|
54 |
+
<h1 style='text-align: center'>
|
55 |
+
Gradio Screen Recorder Component Demo
|
56 |
+
</h1>
|
57 |
+
""")
|
58 |
+
|
59 |
+
with gr.Row():
|
60 |
+
with gr.Column():
|
61 |
+
recorder = ScreenRecorder(
|
62 |
+
audio_enabled=True,
|
63 |
+
webcam_overlay=True, # Disabled for now
|
64 |
+
webcam_position="top-left",
|
65 |
+
recording_format="webm",
|
66 |
+
max_duration=60,
|
67 |
+
label="Screen Recorder"
|
68 |
+
)
|
69 |
+
|
70 |
+
with gr.Column():
|
71 |
+
output_video = gr.Video(label="Recorded Video")
|
72 |
+
|
73 |
+
# Event handler
|
74 |
+
recorder.change(
|
75 |
+
fn=handle_recording,
|
76 |
+
inputs=recorder,
|
77 |
+
outputs=output_video
|
78 |
+
)
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
demo.launch()
|
82 |
+
|
83 |
+
```
|
84 |
+
|
85 |
+
## `ScreenRecorder`
|
86 |
+
|
87 |
+
### Initialization
|
88 |
+
|
89 |
+
<table>
|
90 |
+
<thead>
|
91 |
+
<tr>
|
92 |
+
<th align="left">name</th>
|
93 |
+
<th align="left" style="width: 25%;">type</th>
|
94 |
+
<th align="left">default</th>
|
95 |
+
<th align="left">description</th>
|
96 |
+
</tr>
|
97 |
+
</thead>
|
98 |
+
<tbody>
|
99 |
+
<tr>
|
100 |
+
<td align="left"><code>audio_enabled</code></td>
|
101 |
+
<td align="left" style="width: 25%;">
|
102 |
+
|
103 |
+
```python
|
104 |
+
bool
|
105 |
+
```
|
106 |
+
|
107 |
+
</td>
|
108 |
+
<td align="left"><code>True</code></td>
|
109 |
+
<td align="left">None</td>
|
110 |
+
</tr>
|
111 |
+
|
112 |
+
<tr>
|
113 |
+
<td align="left"><code>webcam_overlay</code></td>
|
114 |
+
<td align="left" style="width: 25%;">
|
115 |
+
|
116 |
+
```python
|
117 |
+
bool
|
118 |
+
```
|
119 |
+
|
120 |
+
</td>
|
121 |
+
<td align="left"><code>False</code></td>
|
122 |
+
<td align="left">None</td>
|
123 |
+
</tr>
|
124 |
+
|
125 |
+
<tr>
|
126 |
+
<td align="left"><code>webcam_position</code></td>
|
127 |
+
<td align="left" style="width: 25%;">
|
128 |
+
|
129 |
+
```python
|
130 |
+
"top-left" | "top-right" | "bottom-left" | "bottom-right"
|
131 |
+
```
|
132 |
+
|
133 |
+
</td>
|
134 |
+
<td align="left"><code>"bottom-right"</code></td>
|
135 |
+
<td align="left">None</td>
|
136 |
+
</tr>
|
137 |
+
|
138 |
+
<tr>
|
139 |
+
<td align="left"><code>recording_format</code></td>
|
140 |
+
<td align="left" style="width: 25%;">
|
141 |
+
|
142 |
+
```python
|
143 |
+
str
|
144 |
+
```
|
145 |
+
|
146 |
+
</td>
|
147 |
+
<td align="left"><code>"webm"</code></td>
|
148 |
+
<td align="left">None</td>
|
149 |
+
</tr>
|
150 |
+
|
151 |
+
<tr>
|
152 |
+
<td align="left"><code>max_duration</code></td>
|
153 |
+
<td align="left" style="width: 25%;">
|
154 |
+
|
155 |
+
```python
|
156 |
+
typing.Optional[int][int, None]
|
157 |
+
```
|
158 |
+
|
159 |
+
</td>
|
160 |
+
<td align="left"><code>None</code></td>
|
161 |
+
<td align="left">None</td>
|
162 |
+
</tr>
|
163 |
+
|
164 |
+
<tr>
|
165 |
+
<td align="left"><code>interactive</code></td>
|
166 |
+
<td align="left" style="width: 25%;">
|
167 |
+
|
168 |
+
```python
|
169 |
+
bool
|
170 |
+
```
|
171 |
+
|
172 |
+
</td>
|
173 |
+
<td align="left"><code>True</code></td>
|
174 |
+
<td align="left">None</td>
|
175 |
+
</tr>
|
176 |
+
</tbody></table>
|
177 |
+
|
178 |
+
|
179 |
+
### Events
|
180 |
+
|
181 |
+
| name | description |
|
182 |
+
|:-----|:------------|
|
183 |
+
| `record_start` | |
|
184 |
+
| `record_stop` | |
|
185 |
+
| `stream_update` | |
|
186 |
+
| `change` | |
|
187 |
+
|
188 |
+
|
189 |
+
|
190 |
+
### User function
|
191 |
+
|
192 |
+
The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
|
193 |
+
|
194 |
+
- When used as an Input, the component only impacts the input signature of the user function.
|
195 |
+
- When used as an output, the component only impacts the return signature of the user function.
|
196 |
+
|
197 |
+
The code snippet below is accurate in cases where the component is used as both an input and an output.
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
```python
|
202 |
+
def predict(
|
203 |
+
value: typing.Optional[
|
204 |
+
gradio_screenrecorder.screenrecorder.ScreenRecorderData
|
205 |
+
][ScreenRecorderData, None]
|
206 |
+
) -> Unknown:
|
207 |
+
return value
|
208 |
+
```
|
209 |
+
|
210 |
+
|
211 |
+
## `ScreenRecorderData`
|
212 |
+
```python
|
213 |
+
class ScreenRecorderData(GradioModel):
|
214 |
+
video: Optional[FileData] = None
|
215 |
+
duration: Optional[float] = None
|
216 |
+
audio_enabled: bool = True
|
217 |
+
status: Literal["recording", "stopped", "error"] = (
|
218 |
+
"stopped"
|
219 |
+
)
|
220 |
+
|
221 |
+
class Config:
|
222 |
+
json_encoders = {
|
223 |
+
FileData: lambda v: v.model_dump()
|
224 |
+
if v
|
225 |
+
else None
|
226 |
+
}
|
227 |
+
```
|
src/backend/gradio_screenrecorder/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from .screenrecorder import ScreenRecorder
|
3 |
+
|
4 |
+
__all__ = ['ScreenRecorder']
|
src/backend/gradio_screenrecorder/screenrecorder.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio.components.base import Component
|
3 |
+
from gradio.data_classes import FileData, GradioModel
|
4 |
+
from typing import Optional, Literal, Any
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
|
9 |
+
class ScreenRecorderData(GradioModel):
|
10 |
+
video: Optional[FileData] = None
|
11 |
+
duration: Optional[float] = None
|
12 |
+
audio_enabled: bool = True
|
13 |
+
status: Literal["recording", "stopped", "error"] = "stopped"
|
14 |
+
|
15 |
+
class Config:
|
16 |
+
json_encoders = {
|
17 |
+
FileData: lambda v: v.model_dump() if v else None
|
18 |
+
}
|
19 |
+
|
20 |
+
|
21 |
+
class ScreenRecorder(Component):
|
22 |
+
"""
|
23 |
+
Custom Gradio component for comprehensive screen recording functionality.
|
24 |
+
"""
|
25 |
+
|
26 |
+
data_model = ScreenRecorderData
|
27 |
+
|
28 |
+
EVENTS = [
|
29 |
+
"record_start",
|
30 |
+
"record_stop",
|
31 |
+
"stream_update",
|
32 |
+
"change"
|
33 |
+
]
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
value=None,
|
38 |
+
audio_enabled: bool = True,
|
39 |
+
webcam_overlay: bool = False,
|
40 |
+
webcam_position: Literal["top-left", "top-right", "bottom-left", "bottom-right"] = "bottom-right",
|
41 |
+
recording_format: str = "webm",
|
42 |
+
max_duration: Optional[int] = None,
|
43 |
+
interactive: bool = True,
|
44 |
+
**kwargs
|
45 |
+
):
|
46 |
+
self.audio_enabled = audio_enabled
|
47 |
+
self.webcam_overlay = webcam_overlay
|
48 |
+
self.webcam_position = webcam_position
|
49 |
+
self.recording_format = recording_format
|
50 |
+
self.max_duration = max_duration
|
51 |
+
self._status = "stopped"
|
52 |
+
|
53 |
+
super().__init__(
|
54 |
+
value=value,
|
55 |
+
interactive=interactive,
|
56 |
+
**kwargs
|
57 |
+
)
|
58 |
+
|
59 |
+
def example_payload(self) -> dict:
|
60 |
+
"""
|
61 |
+
The example inputs for this component for API usage. Must be JSON-serializable.
|
62 |
+
"""
|
63 |
+
return {
|
64 |
+
"video": {
|
65 |
+
"path": "https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
|
66 |
+
"orig_name": "example_recording.webm",
|
67 |
+
"size": 1024000
|
68 |
+
},
|
69 |
+
"duration": 30.5,
|
70 |
+
"audio_enabled": True,
|
71 |
+
"status": "stopped"
|
72 |
+
}
|
73 |
+
|
74 |
+
def example_value(self) -> ScreenRecorderData:
|
75 |
+
"""
|
76 |
+
An example value for this component for the default app.
|
77 |
+
"""
|
78 |
+
return ScreenRecorderData(
|
79 |
+
video=FileData(
|
80 |
+
path="https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
|
81 |
+
orig_name="example_recording.webm",
|
82 |
+
size=1024000
|
83 |
+
),
|
84 |
+
duration=30.5,
|
85 |
+
audio_enabled=True,
|
86 |
+
status="stopped"
|
87 |
+
)
|
88 |
+
|
89 |
+
def flag(self, x, flag_dir: str = "") -> str:
|
90 |
+
"""
|
91 |
+
Write the component's value to a format for flagging (CSV storage).
|
92 |
+
"""
|
93 |
+
if x is None:
|
94 |
+
return ""
|
95 |
+
|
96 |
+
if isinstance(x, ScreenRecorderData) and x.video:
|
97 |
+
return f"Recording: {x.video.orig_name} ({x.duration}s) - Status: {x.status}"
|
98 |
+
|
99 |
+
if isinstance(x, dict) and "video" in x:
|
100 |
+
duration = x.get("duration", "unknown")
|
101 |
+
status = x.get("status", "unknown")
|
102 |
+
video_name = x["video"].get("orig_name", "unknown") if x["video"] else "none"
|
103 |
+
return f"Recording: {video_name} ({duration}s) - Status: {status}"
|
104 |
+
|
105 |
+
return str(x)
|
106 |
+
|
107 |
+
def preprocess(self, payload) -> Optional[ScreenRecorderData]:
|
108 |
+
"""Process incoming recording data from frontend."""
|
109 |
+
if payload is None:
|
110 |
+
return None
|
111 |
+
|
112 |
+
if isinstance(payload, dict):
|
113 |
+
if payload.get("status") == "error": # Early exit for errors from frontend
|
114 |
+
raise gr.Error(f"Recording failed on frontend: {payload.get('error', 'Unknown error')}")
|
115 |
+
|
116 |
+
# If 'video' field is a string, assume it's JSON and parse it.
|
117 |
+
if "video" in payload and isinstance(payload["video"], str):
|
118 |
+
try:
|
119 |
+
video_json_string = payload["video"]
|
120 |
+
if video_json_string.strip().startswith("{") and video_json_string.strip().endswith("}"):
|
121 |
+
payload["video"] = json.loads(video_json_string)
|
122 |
+
# If it's a string but not our expected JSON (e.g. 'null', or empty string, or simple path)
|
123 |
+
# json.loads would fail or Pydantic validation later will catch it if structure is wrong.
|
124 |
+
# For 'null' string, json.loads results in None for payload["video"].
|
125 |
+
elif video_json_string.lower() == 'null':
|
126 |
+
payload["video"] = None
|
127 |
+
else:
|
128 |
+
# This case implies a string that isn't a JSON object or 'null',
|
129 |
+
# e.g. a direct file path string, which FileData might not directly accept
|
130 |
+
# if it expects a dict. Pydantic will raise error later if type is incompatible.
|
131 |
+
gr.Warning(f"Video data is a string but not a recognized JSON object or 'null': {video_json_string[:100]}")
|
132 |
+
# To be safe, if it's not a JSON object string, we might want to error or handle specifically
|
133 |
+
# For now, let Pydantic try to handle it or fail.
|
134 |
+
|
135 |
+
except json.JSONDecodeError:
|
136 |
+
raise gr.Error(f"Invalid JSON for video data: {payload['video'][:100]}")
|
137 |
+
|
138 |
+
# --- Validations from here ---
|
139 |
+
video_data = payload.get("video") # Use .get() for safety, as 'video' might be absent or None
|
140 |
+
|
141 |
+
if video_data is not None: # Only validate video_data if it exists
|
142 |
+
if not isinstance(video_data, dict):
|
143 |
+
# This can happen if payload["video"] was a string like "some_path.webm" and not parsed to dict
|
144 |
+
# Or if it was parsed to something unexpected.
|
145 |
+
raise gr.Error(f"Video data is not a dictionary after processing: {type(video_data)}. Value: {str(video_data)[:100]}")
|
146 |
+
|
147 |
+
if video_data.get("size", 0) == 0:
|
148 |
+
gr.Warning("Received recording with zero size. This might be an empty recording or an issue with data capture.")
|
149 |
+
# Depending on requirements, could raise gr.Error here.
|
150 |
+
|
151 |
+
max_size = 500 * 1024 * 1024 # 500MB
|
152 |
+
if video_data.get("size", 0) > max_size:
|
153 |
+
raise gr.Error(f"Recording file too large ({video_data.get('size', 0)} bytes). Maximum allowed: {max_size} bytes.")
|
154 |
+
# If video_data is None (e.g. 'video': null was sent, or 'video' key missing),
|
155 |
+
# ScreenRecorderData will have video=None, which is allowed by Optional[FileData].
|
156 |
+
|
157 |
+
duration = payload.get("duration", 0)
|
158 |
+
if duration <= 0 and video_data is not None: # Only warn about duration if there's video data
|
159 |
+
gr.Warning("Recording duration is 0 or invalid. The recording might be corrupted.")
|
160 |
+
|
161 |
+
try:
|
162 |
+
return ScreenRecorderData(**payload)
|
163 |
+
except Exception as e: # Catch Pydantic validation errors or other issues during model instantiation
|
164 |
+
# Log the payload for easier debugging if there's a Pydantic error
|
165 |
+
# Be careful with logging sensitive data in production.
|
166 |
+
# print(f"Error creating ScreenRecorderData. Payload: {payload}")
|
167 |
+
raise gr.Error(f"Error creating ScreenRecorderData from payload: {e}")
|
168 |
+
|
169 |
+
elif isinstance(payload, ScreenRecorderData): # If it's already the correct type
|
170 |
+
return payload
|
171 |
+
|
172 |
+
gr.Warning(f"Unexpected payload format: {type(payload)}. Payload: {str(payload)[:200]}")
|
173 |
+
return None
|
174 |
+
|
175 |
+
# def postprocess(self, value) -> Optional[dict]:
|
176 |
+
# """Process outgoing data to frontend."""
|
177 |
+
# if value is None:
|
178 |
+
# return {"status": "stopped"} # Ensure valid empty state
|
179 |
+
|
180 |
+
# try:
|
181 |
+
# if isinstance(value, ScreenRecorderData):
|
182 |
+
# return value.model_dump()
|
183 |
+
# elif isinstance(value, dict):
|
184 |
+
# return value
|
185 |
+
# return None
|
186 |
+
# except Exception as e:
|
187 |
+
# return {"status": "error", "error": str(e)}
|
188 |
+
|
189 |
+
|
190 |
+
def postprocess(self, value) -> Optional[dict]:
|
191 |
+
"""Process outgoing data to frontend."""
|
192 |
+
print(f'value in postprocess: {value}')
|
193 |
+
if value is None:
|
194 |
+
return None
|
195 |
+
|
196 |
+
try:
|
197 |
+
# If it's already a dict, return as is
|
198 |
+
if isinstance(value, dict):
|
199 |
+
return value
|
200 |
+
|
201 |
+
# If it's a ScreenRecorderData object, convert to dict
|
202 |
+
if hasattr(value, 'model_dump'):
|
203 |
+
return value.model_dump()
|
204 |
+
|
205 |
+
# Handle string values
|
206 |
+
if isinstance(value, str):
|
207 |
+
return {"video": {"path": value}}
|
208 |
+
|
209 |
+
return None
|
210 |
+
|
211 |
+
except Exception as e:
|
212 |
+
print(f'Error in postprocess: {e}')
|
213 |
+
return None
|
214 |
+
|
215 |
+
|
216 |
+
# try:
|
217 |
+
# if isinstance(value, ScreenRecorderData):
|
218 |
+
# # Ensure video data exists before sending
|
219 |
+
# if not value.video:
|
220 |
+
# return {"status": "error", "error": "No video recorded"}
|
221 |
+
|
222 |
+
# return {
|
223 |
+
# "video": value.video,
|
224 |
+
# "duration": value.duration,
|
225 |
+
# "audio_enabled": value.audio_enabled,
|
226 |
+
# "status": value.status
|
227 |
+
# }
|
228 |
+
|
229 |
+
# # Handle raw dict format from frontend
|
230 |
+
# if isinstance(value, dict):
|
231 |
+
# return {
|
232 |
+
# "video": FileData(**value.get("video", {})),
|
233 |
+
# "duration": value.get("duration"),
|
234 |
+
# "audio_enabled": value.get("audio_enabled", True),
|
235 |
+
# "status": value.get("status", "stopped")
|
236 |
+
# }
|
237 |
+
|
238 |
+
# except Exception as e:
|
239 |
+
# return {"status": "error", "error": str(e)}
|
240 |
+
|
241 |
+
# return {"status": "stopped"}
|
242 |
+
|
243 |
+
def as_example(self, input_data):
|
244 |
+
"""Handle example data display."""
|
245 |
+
if input_data is None:
|
246 |
+
return None
|
247 |
+
|
248 |
+
if isinstance(input_data, (ScreenRecorderData, dict)):
|
249 |
+
return input_data
|
250 |
+
|
251 |
+
# Convert simple video path to proper format
|
252 |
+
if isinstance(input_data, str):
|
253 |
+
return {
|
254 |
+
"video": {
|
255 |
+
"path": input_data,
|
256 |
+
"orig_name": os.path.basename(input_data),
|
257 |
+
"size": 0
|
258 |
+
},
|
259 |
+
"duration": None,
|
260 |
+
"audio_enabled": self.audio_enabled,
|
261 |
+
"status": "stopped"
|
262 |
+
}
|
263 |
+
|
264 |
+
return input_data
|
265 |
+
|
266 |
+
def update_status(self, status: Literal["recording", "stopped", "error"]):
|
267 |
+
"""Update the internal status of the recorder."""
|
268 |
+
self._status = status
|
269 |
+
|
270 |
+
def get_status(self) -> str:
|
271 |
+
"""Get the current status of the recorder."""
|
272 |
+
return self._status
|
src/demo/__init__.py
ADDED
File without changes
|
src/demo/app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_screenrecorder import ScreenRecorder
|
3 |
+
|
4 |
+
def handle_recording(recording_data):
|
5 |
+
"""Handle recorded video data"""
|
6 |
+
print(f'Received recording data: {recording_data}')
|
7 |
+
|
8 |
+
if not recording_data or not recording_data.get('video'):
|
9 |
+
return None
|
10 |
+
|
11 |
+
try:
|
12 |
+
video_info = recording_data['video']
|
13 |
+
# Return the video path that can be used by the Video component
|
14 |
+
return video_info.get('path')
|
15 |
+
except Exception as e:
|
16 |
+
print(f'Error processing recording: {e}')
|
17 |
+
return None
|
18 |
+
|
19 |
+
|
20 |
+
css = """
|
21 |
+
.screen-recorder-demo {
|
22 |
+
max-width: 800px;
|
23 |
+
margin: 0 auto;
|
24 |
+
}
|
25 |
+
"""
|
26 |
+
|
27 |
+
with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
|
28 |
+
gr.HTML("""
|
29 |
+
<h1 style='text-align: center'>
|
30 |
+
Gradio Screen Recorder Component Demo
|
31 |
+
</h1>
|
32 |
+
""")
|
33 |
+
|
34 |
+
with gr.Row():
|
35 |
+
with gr.Column():
|
36 |
+
recorder = ScreenRecorder(
|
37 |
+
audio_enabled=True,
|
38 |
+
webcam_overlay=True, # Disabled for now
|
39 |
+
webcam_position="top-left",
|
40 |
+
recording_format="webm",
|
41 |
+
max_duration=60,
|
42 |
+
label="Screen Recorder"
|
43 |
+
)
|
44 |
+
|
45 |
+
with gr.Column():
|
46 |
+
output_video = gr.Video(label="Recorded Video")
|
47 |
+
|
48 |
+
# Event handler
|
49 |
+
recorder.change(
|
50 |
+
fn=handle_recording,
|
51 |
+
inputs=recorder,
|
52 |
+
outputs=output_video
|
53 |
+
)
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
demo.launch()
|
src/demo/css.css
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
html {
|
2 |
+
font-family: Inter;
|
3 |
+
font-size: 16px;
|
4 |
+
font-weight: 400;
|
5 |
+
line-height: 1.5;
|
6 |
+
-webkit-text-size-adjust: 100%;
|
7 |
+
background: #fff;
|
8 |
+
color: #323232;
|
9 |
+
-webkit-font-smoothing: antialiased;
|
10 |
+
-moz-osx-font-smoothing: grayscale;
|
11 |
+
text-rendering: optimizeLegibility;
|
12 |
+
}
|
13 |
+
|
14 |
+
:root {
|
15 |
+
--space: 1;
|
16 |
+
--vspace: calc(var(--space) * 1rem);
|
17 |
+
--vspace-0: calc(3 * var(--space) * 1rem);
|
18 |
+
--vspace-1: calc(2 * var(--space) * 1rem);
|
19 |
+
--vspace-2: calc(1.5 * var(--space) * 1rem);
|
20 |
+
--vspace-3: calc(0.5 * var(--space) * 1rem);
|
21 |
+
}
|
22 |
+
|
23 |
+
.app {
|
24 |
+
max-width: 748px !important;
|
25 |
+
}
|
26 |
+
|
27 |
+
.prose p {
|
28 |
+
margin: var(--vspace) 0;
|
29 |
+
line-height: var(--vspace * 2);
|
30 |
+
font-size: 1rem;
|
31 |
+
}
|
32 |
+
|
33 |
+
code {
|
34 |
+
font-family: "Inconsolata", sans-serif;
|
35 |
+
font-size: 16px;
|
36 |
+
}
|
37 |
+
|
38 |
+
h1,
|
39 |
+
h1 code {
|
40 |
+
font-weight: 400;
|
41 |
+
line-height: calc(2.5 / var(--space) * var(--vspace));
|
42 |
+
}
|
43 |
+
|
44 |
+
h1 code {
|
45 |
+
background: none;
|
46 |
+
border: none;
|
47 |
+
letter-spacing: 0.05em;
|
48 |
+
padding-bottom: 5px;
|
49 |
+
position: relative;
|
50 |
+
padding: 0;
|
51 |
+
}
|
52 |
+
|
53 |
+
h2 {
|
54 |
+
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
55 |
+
line-height: 1em;
|
56 |
+
}
|
57 |
+
|
58 |
+
h3,
|
59 |
+
h3 code {
|
60 |
+
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
61 |
+
line-height: 1em;
|
62 |
+
}
|
63 |
+
|
64 |
+
h4,
|
65 |
+
h5,
|
66 |
+
h6 {
|
67 |
+
margin: var(--vspace-3) 0 var(--vspace-3) 0;
|
68 |
+
line-height: var(--vspace);
|
69 |
+
}
|
70 |
+
|
71 |
+
.bigtitle,
|
72 |
+
h1,
|
73 |
+
h1 code {
|
74 |
+
font-size: calc(8px * 4.5);
|
75 |
+
word-break: break-word;
|
76 |
+
}
|
77 |
+
|
78 |
+
.title,
|
79 |
+
h2,
|
80 |
+
h2 code {
|
81 |
+
font-size: calc(8px * 3.375);
|
82 |
+
font-weight: lighter;
|
83 |
+
word-break: break-word;
|
84 |
+
border: none;
|
85 |
+
background: none;
|
86 |
+
}
|
87 |
+
|
88 |
+
.subheading1,
|
89 |
+
h3,
|
90 |
+
h3 code {
|
91 |
+
font-size: calc(8px * 1.8);
|
92 |
+
font-weight: 600;
|
93 |
+
border: none;
|
94 |
+
background: none;
|
95 |
+
letter-spacing: 0.1em;
|
96 |
+
text-transform: uppercase;
|
97 |
+
}
|
98 |
+
|
99 |
+
h2 code {
|
100 |
+
padding: 0;
|
101 |
+
position: relative;
|
102 |
+
letter-spacing: 0.05em;
|
103 |
+
}
|
104 |
+
|
105 |
+
blockquote {
|
106 |
+
font-size: calc(8px * 1.1667);
|
107 |
+
font-style: italic;
|
108 |
+
line-height: calc(1.1667 * var(--vspace));
|
109 |
+
margin: var(--vspace-2) var(--vspace-2);
|
110 |
+
}
|
111 |
+
|
112 |
+
.subheading2,
|
113 |
+
h4 {
|
114 |
+
font-size: calc(8px * 1.4292);
|
115 |
+
text-transform: uppercase;
|
116 |
+
font-weight: 600;
|
117 |
+
}
|
118 |
+
|
119 |
+
.subheading3,
|
120 |
+
h5 {
|
121 |
+
font-size: calc(8px * 1.2917);
|
122 |
+
line-height: calc(1.2917 * var(--vspace));
|
123 |
+
|
124 |
+
font-weight: lighter;
|
125 |
+
text-transform: uppercase;
|
126 |
+
letter-spacing: 0.15em;
|
127 |
+
}
|
128 |
+
|
129 |
+
h6 {
|
130 |
+
font-size: calc(8px * 1.1667);
|
131 |
+
font-size: 1.1667em;
|
132 |
+
font-weight: normal;
|
133 |
+
font-style: italic;
|
134 |
+
font-family: "le-monde-livre-classic-byol", serif !important;
|
135 |
+
letter-spacing: 0px !important;
|
136 |
+
}
|
137 |
+
|
138 |
+
#start .md > *:first-child {
|
139 |
+
margin-top: 0;
|
140 |
+
}
|
141 |
+
|
142 |
+
h2 + h3 {
|
143 |
+
margin-top: 0;
|
144 |
+
}
|
145 |
+
|
146 |
+
.md hr {
|
147 |
+
border: none;
|
148 |
+
border-top: 1px solid var(--block-border-color);
|
149 |
+
margin: var(--vspace-2) 0 var(--vspace-2) 0;
|
150 |
+
}
|
151 |
+
.prose ul {
|
152 |
+
margin: var(--vspace-2) 0 var(--vspace-1) 0;
|
153 |
+
}
|
154 |
+
|
155 |
+
.gap {
|
156 |
+
gap: 0;
|
157 |
+
}
|
src/demo/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
gradio_screenrecorder
|
src/demo/space.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
from app import demo as app
|
4 |
+
import os
|
5 |
+
|
6 |
+
_docs = {'ScreenRecorder': {'description': 'Custom Gradio component for comprehensive screen recording functionality.', 'members': {'__init__': {'audio_enabled': {'type': 'bool', 'default': 'True', 'description': None}, 'webcam_overlay': {'type': 'bool', 'default': 'False', 'description': None}, 'webcam_position': {'type': '"top-left" | "top-right" | "bottom-left" | "bottom-right"', 'default': '"bottom-right"', 'description': None}, 'recording_format': {'type': 'str', 'default': '"webm"', 'description': None}, 'max_duration': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'interactive': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {}, 'preprocess': {'return': {'type': 'typing.Optional[\n gradio_screenrecorder.screenrecorder.ScreenRecorderData\n][ScreenRecorderData, None]', 'description': None}, 'value': None}}, 'events': {'record_start': {'type': None, 'default': None, 'description': ''}, 'record_stop': {'type': None, 'default': None, 'description': ''}, 'stream_update': {'type': None, 'default': None, 'description': ''}, 'change': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {'ScreenRecorderData': {'source': 'class ScreenRecorderData(GradioModel):\n video: Optional[FileData] = None\n duration: Optional[float] = None\n audio_enabled: bool = True\n status: Literal["recording", "stopped", "error"] = (\n "stopped"\n )\n\n class Config:\n json_encoders = {\n FileData: lambda v: v.model_dump()\n if v\n else None\n }'}}, 'user_fn_refs': {'ScreenRecorder': ['ScreenRecorderData']}}}
|
7 |
+
|
8 |
+
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
9 |
+
|
10 |
+
with gr.Blocks(
|
11 |
+
css=abs_path,
|
12 |
+
theme=gr.themes.Default(
|
13 |
+
font_mono=[
|
14 |
+
gr.themes.GoogleFont("Inconsolata"),
|
15 |
+
"monospace",
|
16 |
+
],
|
17 |
+
),
|
18 |
+
) as demo:
|
19 |
+
gr.Markdown(
|
20 |
+
"""
|
21 |
+
# `gradio_screenrecorder`
|
22 |
+
|
23 |
+
<div style="display: flex; gap: 7px;">
|
24 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
|
25 |
+
</div>
|
26 |
+
|
27 |
+
Screen Recorder Gradio Custom Component
|
28 |
+
""", elem_classes=["md-custom"], header_links=True)
|
29 |
+
app.render()
|
30 |
+
gr.Markdown(
|
31 |
+
"""
|
32 |
+
## Installation
|
33 |
+
|
34 |
+
```bash
|
35 |
+
pip install gradio_screenrecorder
|
36 |
+
```
|
37 |
+
|
38 |
+
## Usage
|
39 |
+
|
40 |
+
```python
|
41 |
+
import gradio as gr
|
42 |
+
from gradio_screenrecorder import ScreenRecorder
|
43 |
+
|
44 |
+
def handle_recording(recording_data):
|
45 |
+
\"\"\"Handle recorded video data\"\"\"
|
46 |
+
print(f'Received recording data: {recording_data}')
|
47 |
+
|
48 |
+
if not recording_data or not recording_data.get('video'):
|
49 |
+
return None
|
50 |
+
|
51 |
+
try:
|
52 |
+
video_info = recording_data['video']
|
53 |
+
# Return the video path that can be used by the Video component
|
54 |
+
return video_info.get('path')
|
55 |
+
except Exception as e:
|
56 |
+
print(f'Error processing recording: {e}')
|
57 |
+
return None
|
58 |
+
|
59 |
+
|
60 |
+
css = \"\"\"
|
61 |
+
.screen-recorder-demo {
|
62 |
+
max-width: 800px;
|
63 |
+
margin: 0 auto;
|
64 |
+
}
|
65 |
+
\"\"\"
|
66 |
+
|
67 |
+
with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
|
68 |
+
gr.HTML(\"\"\"
|
69 |
+
<h1 style='text-align: center'>
|
70 |
+
Gradio Screen Recorder Component Demo
|
71 |
+
</h1>
|
72 |
+
\"\"\")
|
73 |
+
|
74 |
+
with gr.Row():
|
75 |
+
with gr.Column():
|
76 |
+
recorder = ScreenRecorder(
|
77 |
+
audio_enabled=True,
|
78 |
+
webcam_overlay=True, # Disabled for now
|
79 |
+
webcam_position="top-left",
|
80 |
+
recording_format="webm",
|
81 |
+
max_duration=60,
|
82 |
+
label="Screen Recorder"
|
83 |
+
)
|
84 |
+
|
85 |
+
with gr.Column():
|
86 |
+
output_video = gr.Video(label="Recorded Video")
|
87 |
+
|
88 |
+
# Event handler
|
89 |
+
recorder.change(
|
90 |
+
fn=handle_recording,
|
91 |
+
inputs=recorder,
|
92 |
+
outputs=output_video
|
93 |
+
)
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
demo.launch()
|
97 |
+
|
98 |
+
```
|
99 |
+
""", elem_classes=["md-custom"], header_links=True)
|
100 |
+
|
101 |
+
|
102 |
+
gr.Markdown("""
|
103 |
+
## `ScreenRecorder`
|
104 |
+
|
105 |
+
### Initialization
|
106 |
+
""", elem_classes=["md-custom"], header_links=True)
|
107 |
+
|
108 |
+
gr.ParamViewer(value=_docs["ScreenRecorder"]["members"]["__init__"], linkify=['ScreenRecorderData'])
|
109 |
+
|
110 |
+
|
111 |
+
gr.Markdown("### Events")
|
112 |
+
gr.ParamViewer(value=_docs["ScreenRecorder"]["events"], linkify=['Event'])
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
gr.Markdown("""
|
118 |
+
|
119 |
+
### User function
|
120 |
+
|
121 |
+
The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
|
122 |
+
|
123 |
+
- When used as an Input, the component only impacts the input signature of the user function.
|
124 |
+
- When used as an output, the component only impacts the return signature of the user function.
|
125 |
+
|
126 |
+
The code snippet below is accurate in cases where the component is used as both an input and an output.
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
```python
|
131 |
+
def predict(
|
132 |
+
value: typing.Optional[
|
133 |
+
gradio_screenrecorder.screenrecorder.ScreenRecorderData
|
134 |
+
][ScreenRecorderData, None]
|
135 |
+
) -> Unknown:
|
136 |
+
return value
|
137 |
+
```
|
138 |
+
""", elem_classes=["md-custom", "ScreenRecorder-user-fn"], header_links=True)
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
code_ScreenRecorderData = gr.Markdown("""
|
144 |
+
## `ScreenRecorderData`
|
145 |
+
```python
|
146 |
+
class ScreenRecorderData(GradioModel):
|
147 |
+
video: Optional[FileData] = None
|
148 |
+
duration: Optional[float] = None
|
149 |
+
audio_enabled: bool = True
|
150 |
+
status: Literal["recording", "stopped", "error"] = (
|
151 |
+
"stopped"
|
152 |
+
)
|
153 |
+
|
154 |
+
class Config:
|
155 |
+
json_encoders = {
|
156 |
+
FileData: lambda v: v.model_dump()
|
157 |
+
if v
|
158 |
+
else None
|
159 |
+
}
|
160 |
+
```""", elem_classes=["md-custom", "ScreenRecorderData"], header_links=True)
|
161 |
+
|
162 |
+
demo.load(None, js=r"""function() {
|
163 |
+
const refs = {
|
164 |
+
ScreenRecorderData: [], };
|
165 |
+
const user_fn_refs = {
|
166 |
+
ScreenRecorder: ['ScreenRecorderData'], };
|
167 |
+
requestAnimationFrame(() => {
|
168 |
+
|
169 |
+
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
170 |
+
if (refs.length > 0) {
|
171 |
+
const el = document.querySelector(`.${key}-user-fn`);
|
172 |
+
if (!el) return;
|
173 |
+
refs.forEach(ref => {
|
174 |
+
el.innerHTML = el.innerHTML.replace(
|
175 |
+
new RegExp("\\b"+ref+"\\b", "g"),
|
176 |
+
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
177 |
+
);
|
178 |
+
})
|
179 |
+
}
|
180 |
+
})
|
181 |
+
|
182 |
+
Object.entries(refs).forEach(([key, refs]) => {
|
183 |
+
if (refs.length > 0) {
|
184 |
+
const el = document.querySelector(`.${key}`);
|
185 |
+
if (!el) return;
|
186 |
+
refs.forEach(ref => {
|
187 |
+
el.innerHTML = el.innerHTML.replace(
|
188 |
+
new RegExp("\\b"+ref+"\\b", "g"),
|
189 |
+
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
190 |
+
);
|
191 |
+
})
|
192 |
+
}
|
193 |
+
})
|
194 |
+
})
|
195 |
+
}
|
196 |
+
|
197 |
+
""")
|
198 |
+
|
199 |
+
demo.launch()
|
src/frontend/Example.svelte
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let value: any;
|
3 |
+
|
4 |
+
function formatDuration(duration: number): string {
|
5 |
+
const minutes = Math.floor(duration / 60);
|
6 |
+
const seconds = Math.floor(duration % 60);
|
7 |
+
return `${minutes}:${seconds.toString().padStart(2, '0')}`;
|
8 |
+
}
|
9 |
+
</script>
|
10 |
+
|
11 |
+
<div class="example-container">
|
12 |
+
{#if value && value.video}
|
13 |
+
<div class="video-thumbnail">
|
14 |
+
<video
|
15 |
+
src={value.video.path}
|
16 |
+
controls={false}
|
17 |
+
muted
|
18 |
+
style="width: 100%; height: 60px; object-fit: cover;"
|
19 |
+
>
|
20 |
+
</video>
|
21 |
+
<div class="overlay">
|
22 |
+
<span class="duration">
|
23 |
+
{value.duration ? formatDuration(value.duration) : 'Recording'}
|
24 |
+
</span>
|
25 |
+
<span class="format">
|
26 |
+
{value.video.orig_name?.split('.').pop()?.toUpperCase() || 'VIDEO'}
|
27 |
+
</span>
|
28 |
+
</div>
|
29 |
+
</div>
|
30 |
+
{:else}
|
31 |
+
<div class="placeholder">
|
32 |
+
📹 Screen Recording
|
33 |
+
</div>
|
34 |
+
{/if}
|
35 |
+
</div>
|
36 |
+
|
37 |
+
<style>
|
38 |
+
.example-container {
|
39 |
+
width: 100%;
|
40 |
+
height: 80px;
|
41 |
+
border-radius: 4px;
|
42 |
+
overflow: hidden;
|
43 |
+
position: relative;
|
44 |
+
}
|
45 |
+
|
46 |
+
.video-thumbnail {
|
47 |
+
position: relative;
|
48 |
+
width: 100%;
|
49 |
+
height: 100%;
|
50 |
+
}
|
51 |
+
|
52 |
+
.overlay {
|
53 |
+
position: absolute;
|
54 |
+
bottom: 0;
|
55 |
+
left: 0;
|
56 |
+
right: 0;
|
57 |
+
background: linear-gradient(transparent, rgba(0,0,0,0.7));
|
58 |
+
padding: 4px 8px;
|
59 |
+
display: flex;
|
60 |
+
justify-content: space-between;
|
61 |
+
align-items: flex-end;
|
62 |
+
}
|
63 |
+
|
64 |
+
.duration, .format {
|
65 |
+
color: white;
|
66 |
+
font-size: 10px;
|
67 |
+
font-weight: bold;
|
68 |
+
}
|
69 |
+
|
70 |
+
.placeholder {
|
71 |
+
display: flex;
|
72 |
+
align-items: center;
|
73 |
+
justify-content: center;
|
74 |
+
width: 100%;
|
75 |
+
height: 100%;
|
76 |
+
background: #f0f0f0;
|
77 |
+
color: #666;
|
78 |
+
font-size: 12px;
|
79 |
+
}
|
80 |
+
</style>
|
src/frontend/Index.svelte
ADDED
@@ -0,0 +1,727 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount, onDestroy, createEventDispatcher } from 'svelte';
|
3 |
+
import { Block } from '@gradio/atoms';
|
4 |
+
import { StatusTracker } from '@gradio/statustracker';
|
5 |
+
import type { LoadingStatus } from "@gradio/statustracker";
|
6 |
+
import type { Gradio } from "@gradio/utils";
|
7 |
+
import fixWebmDuration from 'fix-webm-duration';
|
8 |
+
|
9 |
+
// Type definitions
|
10 |
+
interface MediaRecorderOptions {
|
11 |
+
mimeType?: string;
|
12 |
+
audioBitsPerSecond?: number;
|
13 |
+
videoBitsPerSecond?: number;
|
14 |
+
bitsPerSecond?: number;
|
15 |
+
}
|
16 |
+
|
17 |
+
interface MediaTrackConstraints {
|
18 |
+
displaySurface?: 'browser' | 'monitor' | 'window';
|
19 |
+
cursor?: 'always' | 'motion' | 'never';
|
20 |
+
}
|
21 |
+
|
22 |
+
// Type definitions
|
23 |
+
interface RecordingData {
|
24 |
+
video: string;
|
25 |
+
duration: number;
|
26 |
+
audio_enabled?: boolean;
|
27 |
+
status?: string;
|
28 |
+
orig_name?: string;
|
29 |
+
size?: number | null;
|
30 |
+
data?: string; // Base64 encoded data for Gradio
|
31 |
+
name?: string; // Alias for orig_name for Gradio compatibility
|
32 |
+
is_file?: boolean;
|
33 |
+
type?: string; // MIME type of the recording
|
34 |
+
}
|
35 |
+
|
36 |
+
interface Position {
|
37 |
+
x: number;
|
38 |
+
y: number;
|
39 |
+
}
|
40 |
+
|
41 |
+
// Event types for the component
|
42 |
+
type EventMap = {
|
43 |
+
'error': { message: string; error: string };
|
44 |
+
'recording-started': void;
|
45 |
+
'recording-stopped': RecordingData;
|
46 |
+
'record_stop': RecordingData;
|
47 |
+
'change': RecordingData;
|
48 |
+
'webcam-error': { message: string; error: string };
|
49 |
+
};
|
50 |
+
|
51 |
+
// Component props with proper types and defaults
|
52 |
+
export let gradio: Gradio<any>;
|
53 |
+
export let value: Partial<RecordingData> | null = null;
|
54 |
+
export const elem_id = ''; // Marked as const since it's not modified
|
55 |
+
export let elem_classes: string[] = [];
|
56 |
+
export let scale: number | null = null;
|
57 |
+
export let min_width: number | null = null;
|
58 |
+
export let visible = true;
|
59 |
+
export let interactive = true;
|
60 |
+
export let loading_status: LoadingStatus | null = null;
|
61 |
+
export let audio_enabled = false;
|
62 |
+
export let webcam_overlay = false;
|
63 |
+
export let webcam_position: 'top-left' | 'top-right' | 'bottom-left' | 'bottom-right' = 'bottom-right';
|
64 |
+
export let recording_format: 'webm' | 'mp4' | 'gif' = 'webm';
|
65 |
+
export let max_duration: number | null = null;
|
66 |
+
|
67 |
+
// Computed styles for the container
|
68 |
+
let containerStyle = '';
|
69 |
+
|
70 |
+
// Component methods interface
|
71 |
+
interface ComponentMethods {
|
72 |
+
startRecording: () => Promise<void>;
|
73 |
+
stopRecording: () => void;
|
74 |
+
togglePause: () => void;
|
75 |
+
cleanup: () => void;
|
76 |
+
}
|
77 |
+
|
78 |
+
// Component state with explicit types and initial values
|
79 |
+
let isPaused = false;
|
80 |
+
let isRecording = false;
|
81 |
+
let recordingTime = 0;
|
82 |
+
let recordingTimer: number | null = null;
|
83 |
+
let recordedChunks: Blob[] = [];
|
84 |
+
|
85 |
+
// Media streams and elements
|
86 |
+
let screenStream: MediaStream | null = null;
|
87 |
+
let webcamStream: MediaStream | null = null;
|
88 |
+
let combinedStream: MediaStream | null = null;
|
89 |
+
let canvas: HTMLCanvasElement | null = null;
|
90 |
+
let ctx: CanvasRenderingContext2D | null = null;
|
91 |
+
let animationFrameId: number | null = null;
|
92 |
+
let previewVideo: HTMLVideoElement | null = null;
|
93 |
+
let webcamVideo: HTMLVideoElement | null = null;
|
94 |
+
let recordingStartTime = 0;
|
95 |
+
let mediaRecorder: MediaRecorder | null = null;
|
96 |
+
|
97 |
+
// Internal video elements
|
98 |
+
let webcamVideoInternal: HTMLVideoElement | null = null;
|
99 |
+
let screenVideoInternal: HTMLVideoElement | null = null;
|
100 |
+
|
101 |
+
// Bind canvas element
|
102 |
+
function bindCanvas(node: HTMLCanvasElement) {
|
103 |
+
canvas = node;
|
104 |
+
if (canvas) {
|
105 |
+
const context = canvas.getContext('2d', { willReadFrequently: true });
|
106 |
+
if (context) {
|
107 |
+
ctx = context;
|
108 |
+
// Set canvas dimensions with null checks
|
109 |
+
const width = canvas.offsetWidth;
|
110 |
+
const height = canvas.offsetHeight;
|
111 |
+
if (width && height) {
|
112 |
+
canvas.width = width;
|
113 |
+
canvas.height = height;
|
114 |
+
}
|
115 |
+
}
|
116 |
+
}
|
117 |
+
return {
|
118 |
+
destroy() {
|
119 |
+
canvas = null;
|
120 |
+
ctx = null;
|
121 |
+
}
|
122 |
+
};
|
123 |
+
}
|
124 |
+
|
125 |
+
// Canvas binding is now handled by the bindCanvas function
|
126 |
+
|
127 |
+
// Configuration
|
128 |
+
const webcam_size = 200;
|
129 |
+
const webcam_border = 10;
|
130 |
+
const webcam_radius = '50%';
|
131 |
+
|
132 |
+
// Ensure max_duration has a default value if null
|
133 |
+
$: effectiveMaxDuration = max_duration ?? 0;
|
134 |
+
|
135 |
+
// Computed styles for the container
|
136 |
+
$: containerStyle = [
|
137 |
+
scale !== null ? `--scale: ${scale};` : '',
|
138 |
+
min_width !== null ? `min-width: ${min_width}px;` : ''
|
139 |
+
].filter(Boolean).join(' ');
|
140 |
+
|
141 |
+
onDestroy(() => {
|
142 |
+
if (isRecording) {
|
143 |
+
componentMethods.stopRecording();
|
144 |
+
}
|
145 |
+
componentMethods.cleanup();
|
146 |
+
if (animationFrameId) {
|
147 |
+
cancelAnimationFrame(animationFrameId);
|
148 |
+
animationFrameId = null;
|
149 |
+
}
|
150 |
+
});
|
151 |
+
|
152 |
+
// Component state and props are already declared above
|
153 |
+
|
154 |
+
// Event dispatcher with proper typing
|
155 |
+
const dispatch = createEventDispatcher<EventMap>();
|
156 |
+
|
157 |
+
// Type guard for error handling
|
158 |
+
function isErrorWithMessage(error: unknown): error is Error {
|
159 |
+
return error instanceof Error;
|
160 |
+
}
|
161 |
+
|
162 |
+
// Component methods implementation
|
163 |
+
const componentMethods: ComponentMethods = {
|
164 |
+
startRecording: async (): Promise<void> => {
|
165 |
+
if (isRecording) return;
|
166 |
+
isRecording = true;
|
167 |
+
recordedChunks = [];
|
168 |
+
recordingTime = 0;
|
169 |
+
|
170 |
+
try {
|
171 |
+
// Composite screen and optional webcam overlay via hidden canvas
|
172 |
+
const screenStreamCapture = await navigator.mediaDevices.getDisplayMedia({ video: true, audio: false });
|
173 |
+
screenStream = screenStreamCapture;
|
174 |
+
// Assign to hidden video for composition
|
175 |
+
if (screenVideoInternal) {
|
176 |
+
screenVideoInternal.srcObject = screenStreamCapture;
|
177 |
+
await screenVideoInternal.play().catch(() => {});
|
178 |
+
}
|
179 |
+
let captureStream: MediaStream;
|
180 |
+
if (webcam_overlay && webcamVideoInternal && canvas && ctx) {
|
181 |
+
try {
|
182 |
+
webcamStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
|
183 |
+
webcamVideoInternal.srcObject = webcamStream;
|
184 |
+
await webcamVideoInternal.play().catch(() => {});
|
185 |
+
// Resize canvas to match screen video
|
186 |
+
canvas.width = screenVideoInternal!.videoWidth;
|
187 |
+
canvas.height = screenVideoInternal!.videoHeight;
|
188 |
+
const overlaySize = Math.min(canvas.width, canvas.height) / 4;
|
189 |
+
const posMap: Record<string, [number, number]> = {
|
190 |
+
'top-left': [10, 10],
|
191 |
+
'top-right': [canvas.width - overlaySize - 10, 10],
|
192 |
+
'bottom-left': [10, canvas.height - overlaySize - 10],
|
193 |
+
'bottom-right': [canvas.width - overlaySize - 10, canvas.height - overlaySize - 10]
|
194 |
+
};
|
195 |
+
const [ox, oy] = posMap[webcam_position];
|
196 |
+
function draw() {
|
197 |
+
ctx!.drawImage(screenVideoInternal!, 0, 0, canvas.width, canvas.height);
|
198 |
+
ctx!.drawImage(webcamVideoInternal!, ox, oy, overlaySize, overlaySize);
|
199 |
+
animationFrameId = requestAnimationFrame(draw);
|
200 |
+
}
|
201 |
+
draw();
|
202 |
+
const canvasStream = canvas.captureStream(30);
|
203 |
+
const audioTracks = audio_enabled
|
204 |
+
? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
|
205 |
+
: screenStreamCapture.getAudioTracks();
|
206 |
+
combinedStream = new MediaStream([...canvasStream.getVideoTracks(), ...audioTracks]);
|
207 |
+
captureStream = combinedStream;
|
208 |
+
} catch (err) {
|
209 |
+
console.warn('Webcam overlay failed, falling back to screen only', err);
|
210 |
+
captureStream = screenStreamCapture;
|
211 |
+
}
|
212 |
+
} else {
|
213 |
+
// No overlay: combine audio if enabled with screen
|
214 |
+
const audioTracks = audio_enabled
|
215 |
+
? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
|
216 |
+
: screenStreamCapture.getAudioTracks();
|
217 |
+
combinedStream = new MediaStream([...screenStreamCapture.getVideoTracks(), ...audioTracks]);
|
218 |
+
captureStream = combinedStream;
|
219 |
+
}
|
220 |
+
|
221 |
+
// Handle track ended event
|
222 |
+
screenStreamCapture.getVideoTracks()[0].onended = () => {
|
223 |
+
if (isRecording) {
|
224 |
+
componentMethods.stopRecording();
|
225 |
+
}
|
226 |
+
};
|
227 |
+
|
228 |
+
// Start recording
|
229 |
+
const options: MediaRecorderOptions = {
|
230 |
+
mimeType: recording_format === 'webm' ? 'video/webm;codecs=vp9' : 'video/mp4'
|
231 |
+
};
|
232 |
+
|
233 |
+
mediaRecorder = new MediaRecorder(captureStream, options);
|
234 |
+
mediaRecorder.ondataavailable = handleDataAvailable;
|
235 |
+
mediaRecorder.onstop = handleRecordingStop;
|
236 |
+
mediaRecorder.start();
|
237 |
+
|
238 |
+
recordingStartTime = Date.now();
|
239 |
+
updateRecordingTime();
|
240 |
+
|
241 |
+
dispatch('recording-started');
|
242 |
+
} catch (error) {
|
243 |
+
isRecording = false;
|
244 |
+
if (isErrorWithMessage(error)) {
|
245 |
+
dispatch('error', {
|
246 |
+
message: 'Failed to start recording',
|
247 |
+
error: error.message
|
248 |
+
});
|
249 |
+
}
|
250 |
+
}
|
251 |
+
},
|
252 |
+
|
253 |
+
stopRecording: (): void => {
|
254 |
+
if (!isRecording || !mediaRecorder) return;
|
255 |
+
|
256 |
+
try {
|
257 |
+
mediaRecorder.stop();
|
258 |
+
isRecording = false;
|
259 |
+
|
260 |
+
// Stop all tracks
|
261 |
+
[screenStream, webcamStream, combinedStream].forEach(stream => {
|
262 |
+
if (stream) {
|
263 |
+
stream.getTracks().forEach(track => track.stop());
|
264 |
+
}
|
265 |
+
});
|
266 |
+
|
267 |
+
if (recordingTimer) {
|
268 |
+
clearTimeout(recordingTimer);
|
269 |
+
recordingTimer = null;
|
270 |
+
}
|
271 |
+
|
272 |
+
const recordingData: RecordingData = {
|
273 |
+
video: '',
|
274 |
+
duration: recordingTime / 1000,
|
275 |
+
audio_enabled: audio_enabled,
|
276 |
+
status: 'completed'
|
277 |
+
};
|
278 |
+
|
279 |
+
dispatch('recording-stopped', recordingData);
|
280 |
+
dispatch('record_stop', recordingData);
|
281 |
+
dispatch('change', recordingData);
|
282 |
+
} catch (error) {
|
283 |
+
isRecording = false;
|
284 |
+
if (isErrorWithMessage(error)) {
|
285 |
+
dispatch('error', {
|
286 |
+
message: 'Error stopping recording',
|
287 |
+
error: error.message
|
288 |
+
});
|
289 |
+
}
|
290 |
+
}
|
291 |
+
},
|
292 |
+
|
293 |
+
togglePause: (): void => {
|
294 |
+
if (!mediaRecorder) return;
|
295 |
+
|
296 |
+
isPaused = !isPaused;
|
297 |
+
|
298 |
+
if (isPaused) {
|
299 |
+
mediaRecorder.pause();
|
300 |
+
if (recordingTimer) {
|
301 |
+
clearTimeout(recordingTimer);
|
302 |
+
recordingTimer = null;
|
303 |
+
}
|
304 |
+
} else {
|
305 |
+
mediaRecorder.resume();
|
306 |
+
updateRecordingTime();
|
307 |
+
}
|
308 |
+
if (isPaused) {
|
309 |
+
// Pause logic
|
310 |
+
} else {
|
311 |
+
// Resume logic
|
312 |
+
}
|
313 |
+
},
|
314 |
+
|
315 |
+
cleanup: (): void => {
|
316 |
+
// Stop all media streams
|
317 |
+
[screenStream, webcamStream, combinedStream].forEach(stream => {
|
318 |
+
if (stream) {
|
319 |
+
stream.getTracks().forEach(track => track.stop());
|
320 |
+
}
|
321 |
+
});
|
322 |
+
|
323 |
+
// Clear media recorder
|
324 |
+
if (mediaRecorder) {
|
325 |
+
if (mediaRecorder.state !== 'inactive') {
|
326 |
+
mediaRecorder.stop();
|
327 |
+
}
|
328 |
+
mediaRecorder = null;
|
329 |
+
}
|
330 |
+
|
331 |
+
// Clear canvas
|
332 |
+
if (ctx) {
|
333 |
+
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
|
334 |
+
}
|
335 |
+
|
336 |
+
// Reset state
|
337 |
+
isRecording = false;
|
338 |
+
isPaused = false;
|
339 |
+
recordingTime = 0;
|
340 |
+
recordedChunks = [];
|
341 |
+
|
342 |
+
// Clear timers
|
343 |
+
if (recordingTimer) {
|
344 |
+
clearInterval(recordingTimer);
|
345 |
+
recordingTimer = null;
|
346 |
+
}
|
347 |
+
|
348 |
+
if (animationFrameId) {
|
349 |
+
cancelAnimationFrame(animationFrameId);
|
350 |
+
animationFrameId = null;
|
351 |
+
}
|
352 |
+
}
|
353 |
+
};
|
354 |
+
|
355 |
+
// Handle data available event
|
356 |
+
function handleDataAvailable(event: BlobEvent): void {
|
357 |
+
if (event.data && event.data.size > 0) {
|
358 |
+
recordedChunks.push(event.data);
|
359 |
+
}
|
360 |
+
}
|
361 |
+
|
362 |
+
// Handle recording stop
|
363 |
+
function handleRecordingStop(): void {
|
364 |
+
if (recordedChunks.length === 0) {
|
365 |
+
console.warn('No recording data available');
|
366 |
+
return;
|
367 |
+
}
|
368 |
+
|
369 |
+
const mimeType = recording_format === 'webm' ? 'video/webm' : 'video/mp4';
|
370 |
+
const blob = new Blob(recordedChunks, { type: mimeType });
|
371 |
+
const url = URL.createObjectURL(blob);
|
372 |
+
|
373 |
+
console.log('Recording stopped. Blob size:', blob.size, 'bytes');
|
374 |
+
|
375 |
+
// Create a file reader to read the blob as base64
|
376 |
+
const reader = new FileReader();
|
377 |
+
reader.onload = (e) => {
|
378 |
+
const base64data = e.target?.result as string;
|
379 |
+
// Extract the base64 data (remove the data URL prefix)
|
380 |
+
const base64Content = base64data.split(',')[1];
|
381 |
+
const fileName = `recording_${Date.now()}.${recording_format}`;
|
382 |
+
|
383 |
+
// Dispatch event with recording data
|
384 |
+
const recordingData: RecordingData = {
|
385 |
+
video: url,
|
386 |
+
duration: recordingTime,
|
387 |
+
audio_enabled: audio_enabled,
|
388 |
+
status: 'completed',
|
389 |
+
size: blob.size > 0 ? blob.size : undefined,
|
390 |
+
orig_name: fileName,
|
391 |
+
name: fileName, // Alias for Gradio compatibility
|
392 |
+
is_file: true,
|
393 |
+
type: mimeType,
|
394 |
+
data: base64Content
|
395 |
+
};
|
396 |
+
|
397 |
+
console.log('Dispatching recording-stopped event');
|
398 |
+
dispatch('recording-stopped', recordingData);
|
399 |
+
dispatch('record_stop', recordingData);
|
400 |
+
dispatch('change', recordingData);
|
401 |
+
|
402 |
+
// Update the value prop to trigger re-render
|
403 |
+
value = { ...value, ...recordingData };
|
404 |
+
};
|
405 |
+
|
406 |
+
reader.onerror = (error) => {
|
407 |
+
console.error('Error reading blob:', error);
|
408 |
+
dispatch('error', {
|
409 |
+
message: 'Failed to process recording',
|
410 |
+
error: 'Could not read recording data'
|
411 |
+
});
|
412 |
+
};
|
413 |
+
|
414 |
+
// Read the blob as data URL
|
415 |
+
reader.readAsDataURL(blob);
|
416 |
+
}
|
417 |
+
|
418 |
+
// Update recording time
|
419 |
+
function updateRecordingTime(): void {
|
420 |
+
if (!isRecording) return;
|
421 |
+
|
422 |
+
recordingTime = Math.floor((Date.now() - recordingStartTime) / 1000);
|
423 |
+
|
424 |
+
// Check if max duration has been reached
|
425 |
+
if (max_duration !== null && max_duration > 0 && recordingTime >= max_duration) {
|
426 |
+
console.log('Max duration reached, stopping');
|
427 |
+
componentMethods.stopRecording();
|
428 |
+
return;
|
429 |
+
}
|
430 |
+
|
431 |
+
// Schedule the next update
|
432 |
+
recordingTimer = window.setTimeout(updateRecordingTime, 1000);
|
433 |
+
}
|
434 |
+
|
435 |
+
function stopTimer(): void {
|
436 |
+
if (recordingTimer) {
|
437 |
+
clearTimeout(recordingTimer);
|
438 |
+
recordingTimer = null;
|
439 |
+
}
|
440 |
+
}
|
441 |
+
|
442 |
+
// Format time as MM:SS
|
443 |
+
function formatTime(seconds: number): string {
|
444 |
+
const mins = Math.floor(seconds / 60);
|
445 |
+
const secs = Math.floor(seconds % 60);
|
446 |
+
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
|
447 |
+
}
|
448 |
+
|
449 |
+
// Format file size in human-readable format
|
450 |
+
function formatFileSize(bytes: number | string | null | undefined): string {
|
451 |
+
if (bytes === null || bytes === undefined) return '0 B';
|
452 |
+
const numBytes = Number(bytes);
|
453 |
+
if (isNaN(numBytes) || numBytes === 0) return '0 B';
|
454 |
+
const k = 1024;
|
455 |
+
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
456 |
+
const i = Math.floor(Math.log(numBytes) / Math.log(k));
|
457 |
+
return parseFloat((numBytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
|
458 |
+
}
|
459 |
+
</script>
|
460 |
+
|
461 |
+
<div class="screen-recorder-container {!visible ? 'invisible' : ''} {elem_classes.join(' ')}" style="{containerStyle}">
|
462 |
+
{#if loading_status}
|
463 |
+
<StatusTracker
|
464 |
+
autoscroll={gradio.autoscroll}
|
465 |
+
i18n={gradio.i18n}
|
466 |
+
{...loading_status}
|
467 |
+
/>
|
468 |
+
{/if}
|
469 |
+
|
470 |
+
<div class="screen-recorder">
|
471 |
+
<div class="controls">
|
472 |
+
{#if !isRecording}
|
473 |
+
<button
|
474 |
+
class="record-btn start"
|
475 |
+
on:click={componentMethods.startRecording}
|
476 |
+
disabled={!interactive}
|
477 |
+
>
|
478 |
+
<span class="recording-icon">●</span> Start Recording
|
479 |
+
</button>
|
480 |
+
{:else}
|
481 |
+
<button
|
482 |
+
class="record-btn stop"
|
483 |
+
on:click={componentMethods.stopRecording}
|
484 |
+
>
|
485 |
+
<span class="stop-icon">■</span> Stop Recording
|
486 |
+
</button>
|
487 |
+
<span class="recording-time">
|
488 |
+
{formatTime(recordingTime)}
|
489 |
+
</span>
|
490 |
+
{#if max_duration}
|
491 |
+
<span class="max-duration">/ {formatTime(max_duration)}</span>
|
492 |
+
{/if}
|
493 |
+
{/if}
|
494 |
+
</div>
|
495 |
+
|
496 |
+
<!-- Live Preview - Always show when recording -->
|
497 |
+
{#if isRecording}
|
498 |
+
<div class="preview-container">
|
499 |
+
<video
|
500 |
+
bind:this={previewVideo}
|
501 |
+
class="preview-video"
|
502 |
+
autoplay
|
503 |
+
muted
|
504 |
+
playsinline
|
505 |
+
aria-label="Live preview"
|
506 |
+
on:loadedmetadata={() => {
|
507 |
+
if (previewVideo) {
|
508 |
+
previewVideo.play().catch(console.warn);
|
509 |
+
}
|
510 |
+
}}
|
511 |
+
>
|
512 |
+
<track kind="captions" />
|
513 |
+
</video>
|
514 |
+
{#if webcam_overlay}
|
515 |
+
<video
|
516 |
+
bind:this={webcamVideo}
|
517 |
+
class="webcam-overlay {webcam_position}"
|
518 |
+
style="width: 200px; height: 200px;"
|
519 |
+
autoplay
|
520 |
+
muted
|
521 |
+
playsinline
|
522 |
+
aria-label="Webcam overlay"
|
523 |
+
>
|
524 |
+
<track kind="captions" />
|
525 |
+
</video>
|
526 |
+
{/if}
|
527 |
+
<div class="recording-indicator">
|
528 |
+
<span class="pulse">●</span> RECORDING
|
529 |
+
</div>
|
530 |
+
</div>
|
531 |
+
{/if}
|
532 |
+
|
533 |
+
{#if value?.video}
|
534 |
+
<div class="recording-preview" style="position: relative;">
|
535 |
+
{#if audio_enabled}
|
536 |
+
<div class="speaker-overlay">🔊</div>
|
537 |
+
{/if}
|
538 |
+
<video
|
539 |
+
src={value.video}
|
540 |
+
controls
|
541 |
+
class="preview-video"
|
542 |
+
aria-label="Recording preview"
|
543 |
+
on:loadedmetadata
|
544 |
+
on:loadeddata
|
545 |
+
on:error={(e) => console.error('Video error:', e)}
|
546 |
+
>
|
547 |
+
<track kind="captions" />
|
548 |
+
</video>
|
549 |
+
<div class="recording-info">
|
550 |
+
<div>Duration: {value.duration ? value.duration.toFixed(1) : '0.0'}s</div>
|
551 |
+
{#if value.size}
|
552 |
+
<div>Size: {formatFileSize(value.size)}</div>
|
553 |
+
{/if}
|
554 |
+
</div>
|
555 |
+
</div>
|
556 |
+
{/if}
|
557 |
+
|
558 |
+
<!-- Configuration Display -->
|
559 |
+
<div class="config-info">
|
560 |
+
<span>Audio: {audio_enabled ? '🔊' : '🔇'}</span>
|
561 |
+
<span>Format: {recording_format.toUpperCase()}</span>
|
562 |
+
{#if max_duration}
|
563 |
+
<span>Max: {formatTime(max_duration)}</span>
|
564 |
+
{/if}
|
565 |
+
</div>
|
566 |
+
|
567 |
+
<!-- Debug info -->
|
568 |
+
{#if value}
|
569 |
+
<div class="debug-info">
|
570 |
+
<small>Last recording: {value.orig_name} ({Math.round(value.size / 1024)}KB)</small>
|
571 |
+
</div>
|
572 |
+
{/if}
|
573 |
+
</div>
|
574 |
+
<video bind:this={screenVideoInternal} hidden muted playsinline style="display:none"></video>
|
575 |
+
{#if webcam_overlay}
|
576 |
+
<video bind:this={webcamVideoInternal} hidden muted playsinline style="display:none"></video>
|
577 |
+
{/if}
|
578 |
+
<canvas bind:this={canvas} use:bindCanvas hidden style="display:none"></canvas>
|
579 |
+
</div>
|
580 |
+
|
581 |
+
<style>
|
582 |
+
.screen-recorder-container {
|
583 |
+
display: block;
|
584 |
+
width: 100%;
|
585 |
+
box-sizing: border-box;
|
586 |
+
}
|
587 |
+
|
588 |
+
.screen-recorder-container.invisible {
|
589 |
+
display: none;
|
590 |
+
}
|
591 |
+
|
592 |
+
.screen-recorder {
|
593 |
+
border: 2px solid #e0e0e0;
|
594 |
+
border-radius: 8px;
|
595 |
+
padding: 16px;
|
596 |
+
background: #f9f9f9;
|
597 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
598 |
+
}
|
599 |
+
|
600 |
+
.controls {
|
601 |
+
display: flex;
|
602 |
+
align-items: center;
|
603 |
+
gap: 12px;
|
604 |
+
margin-bottom: 12px;
|
605 |
+
flex-wrap: wrap;
|
606 |
+
}
|
607 |
+
|
608 |
+
.record-btn {
|
609 |
+
padding: 10px 20px;
|
610 |
+
border: none;
|
611 |
+
border-radius: 6px;
|
612 |
+
font-size: 14px;
|
613 |
+
font-weight: 500;
|
614 |
+
cursor: pointer;
|
615 |
+
transition: all 0.2s;
|
616 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
617 |
+
}
|
618 |
+
|
619 |
+
.record-btn.start {
|
620 |
+
background: #4CAF50;
|
621 |
+
color: white;
|
622 |
+
}
|
623 |
+
|
624 |
+
.record-btn.start:hover {
|
625 |
+
background: #45a049;
|
626 |
+
}
|
627 |
+
|
628 |
+
.record-btn.stop {
|
629 |
+
background: #f44336;
|
630 |
+
color: white;
|
631 |
+
}
|
632 |
+
|
633 |
+
.record-btn.stop:hover {
|
634 |
+
background: #da190b;
|
635 |
+
}
|
636 |
+
|
637 |
+
.record-btn:disabled {
|
638 |
+
opacity: 0.5;
|
639 |
+
cursor: not-allowed;
|
640 |
+
}
|
641 |
+
|
642 |
+
.recording-time {
|
643 |
+
font-family: 'Courier New', monospace;
|
644 |
+
font-size: 18px;
|
645 |
+
font-weight: bold;
|
646 |
+
color: #f44336;
|
647 |
+
}
|
648 |
+
|
649 |
+
.max-duration {
|
650 |
+
font-family: 'Courier New', monospace;
|
651 |
+
font-size: 14px;
|
652 |
+
color: #666;
|
653 |
+
}
|
654 |
+
|
655 |
+
|
656 |
+
.preview-container {
|
657 |
+
position: relative;
|
658 |
+
margin: 12px 0;
|
659 |
+
border-radius: 6px;
|
660 |
+
overflow: hidden;
|
661 |
+
background: black;
|
662 |
+
min-height: 200px;
|
663 |
+
}
|
664 |
+
|
665 |
+
.preview-video {
|
666 |
+
width: 100%;
|
667 |
+
max-height: 400px;
|
668 |
+
display: block;
|
669 |
+
object-fit: contain;
|
670 |
+
}
|
671 |
+
|
672 |
+
|
673 |
+
.recording-indicator {
|
674 |
+
position: absolute;
|
675 |
+
top: 10px;
|
676 |
+
left: 10px;
|
677 |
+
background: rgba(244, 67, 54, 0.9);
|
678 |
+
color: white;
|
679 |
+
padding: 6px 12px;
|
680 |
+
border-radius: 4px;
|
681 |
+
font-size: 12px;
|
682 |
+
font-weight: bold;
|
683 |
+
animation: pulse 1s infinite;
|
684 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
|
685 |
+
}
|
686 |
+
|
687 |
+
@keyframes pulse {
|
688 |
+
0%, 100% { opacity: 1; }
|
689 |
+
50% { opacity: 0.7; }
|
690 |
+
}
|
691 |
+
|
692 |
+
.config-info {
|
693 |
+
display: flex;
|
694 |
+
gap: 8px;
|
695 |
+
font-size: 12px;
|
696 |
+
color: #666;
|
697 |
+
margin-top: 8px;
|
698 |
+
flex-wrap: wrap;
|
699 |
+
}
|
700 |
+
|
701 |
+
.config-info span {
|
702 |
+
padding: 4px 8px;
|
703 |
+
background: #e8e8e8;
|
704 |
+
border-radius: 4px;
|
705 |
+
border: 1px solid #ddd;
|
706 |
+
}
|
707 |
+
|
708 |
+
.debug-info {
|
709 |
+
margin-top: 8px;
|
710 |
+
padding: 8px;
|
711 |
+
background: #e8f5e8;
|
712 |
+
border-radius: 4px;
|
713 |
+
border: 1px solid #c8e6c8;
|
714 |
+
}
|
715 |
+
|
716 |
+
.speaker-overlay {
|
717 |
+
position: absolute;
|
718 |
+
top: 8px;
|
719 |
+
right: 8px;
|
720 |
+
background: rgba(0,0,0,0.5);
|
721 |
+
color: white;
|
722 |
+
padding: 4px;
|
723 |
+
border-radius: 4px;
|
724 |
+
font-size: 14px;
|
725 |
+
pointer-events: none;
|
726 |
+
}
|
727 |
+
</style>
|
src/frontend/gradio.config.js
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export default {
|
2 |
+
plugins: [],
|
3 |
+
svelte: {
|
4 |
+
preprocess: [],
|
5 |
+
},
|
6 |
+
build: {
|
7 |
+
target: "modules",
|
8 |
+
},
|
9 |
+
};
|
src/frontend/package-lock.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
src/frontend/package.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "gradio_screenrecorder",
|
3 |
+
"version": "0.3.21",
|
4 |
+
"description": "Gradio Screen Recorder Component",
|
5 |
+
"type": "module",
|
6 |
+
"author": "",
|
7 |
+
"license": "ISC",
|
8 |
+
"private": false,
|
9 |
+
"main_changeset": true,
|
10 |
+
"scripts": {
|
11 |
+
"build": "vite build",
|
12 |
+
"dev": "vite",
|
13 |
+
"preview": "vite preview"
|
14 |
+
},
|
15 |
+
"exports": {
|
16 |
+
".": {
|
17 |
+
"gradio": "./Index.svelte",
|
18 |
+
"svelte": "./dist/Index.svelte",
|
19 |
+
"types": "./dist/Index.svelte.d.ts"
|
20 |
+
},
|
21 |
+
"./example": {
|
22 |
+
"gradio": "./Example.svelte",
|
23 |
+
"svelte": "./dist/Example.svelte",
|
24 |
+
"types": "./dist/Example.svelte.d.ts"
|
25 |
+
},
|
26 |
+
"./package.json": "./package.json"
|
27 |
+
},
|
28 |
+
"dependencies": {
|
29 |
+
"@gradio/atoms": "0.16.1",
|
30 |
+
"@gradio/icons": "0.12.0",
|
31 |
+
"@gradio/statustracker": "0.10.11",
|
32 |
+
"@gradio/utils": "0.10.2",
|
33 |
+
"fix-webm-duration": "^1.0.6",
|
34 |
+
"svelte": "^4.2.7"
|
35 |
+
},
|
36 |
+
"devDependencies": {
|
37 |
+
"@gradio/preview": "0.13.0",
|
38 |
+
"@sveltejs/vite-plugin-svelte": "^3.0.0",
|
39 |
+
"@tsconfig/svelte": "^5.0.4",
|
40 |
+
"svelte-preprocess": "^6.0.3",
|
41 |
+
"typescript": "^5.8.3",
|
42 |
+
"vite": "^5.0.0",
|
43 |
+
"vite-plugin-svelte": "^3.0.0"
|
44 |
+
},
|
45 |
+
"peerDependencies": {
|
46 |
+
"svelte": "^4.0.0"
|
47 |
+
},
|
48 |
+
"repository": {
|
49 |
+
"type": "git",
|
50 |
+
"url": "git+https://github.com/your-username/your-repo.git",
|
51 |
+
"directory": "screenrecorder"
|
52 |
+
},
|
53 |
+
"files": [
|
54 |
+
"dist"
|
55 |
+
]
|
56 |
+
}
|
src/frontend/tsconfig.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"compilerOptions": {
|
3 |
+
"target": "ESNext",
|
4 |
+
"module": "ESNext",
|
5 |
+
"moduleResolution": "node",
|
6 |
+
"esModuleInterop": true,
|
7 |
+
"resolveJsonModule": true,
|
8 |
+
"strict": true,
|
9 |
+
"skipLibCheck": true,
|
10 |
+
"forceConsistentCasingInFileNames": true,
|
11 |
+
"isolatedModules": true,
|
12 |
+
"verbatimModuleSyntax": true,
|
13 |
+
"jsx": "preserve",
|
14 |
+
"lib": ["DOM", "DOM.Iterable", "ESNext"],
|
15 |
+
"types": ["svelte"],
|
16 |
+
"paths": {
|
17 |
+
"@/*": ["./*"]
|
18 |
+
}
|
19 |
+
},
|
20 |
+
"include": ["**/*.d.ts", "**/*.ts", "**/*.js", "**/*.svelte"],
|
21 |
+
"exclude": ["node_modules", "**/node_modules/*"]
|
22 |
+
}
|
src/frontend/types.d.ts
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Extend the Window interface
|
2 |
+
declare global {
|
3 |
+
interface Window {
|
4 |
+
requestAnimationFrame(callback: FrameRequestCallback): number;
|
5 |
+
cancelAnimationFrame(handle: number): void;
|
6 |
+
MediaRecorder: typeof MediaRecorder;
|
7 |
+
}
|
8 |
+
|
9 |
+
interface MediaRecorderOptions {
|
10 |
+
mimeType?: string;
|
11 |
+
audioBitsPerSecond?: number;
|
12 |
+
videoBitsPerSecond?: number;
|
13 |
+
bitsPerSecond?: number;
|
14 |
+
}
|
15 |
+
|
16 |
+
interface MediaTrackConstraints {
|
17 |
+
displaySurface?: 'browser' | 'monitor' | 'window';
|
18 |
+
cursor?: 'always' | 'motion' | 'never';
|
19 |
+
}
|
20 |
+
}
|
21 |
+
|
22 |
+
// Export the types
|
23 |
+
export {};
|
src/frontend/vite.config.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { defineConfig } from 'vite';
|
2 |
+
import { svelte } from '@sveltejs/vite-plugin-svelte';
|
3 |
+
|
4 |
+
export default defineConfig({
|
5 |
+
plugins: [svelte()],
|
6 |
+
});
|
src/manifest.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Screen Recorder Component",
|
3 |
+
"short_name": "ScreenRecorder",
|
4 |
+
"description": "Gradio Screen Recording Component",
|
5 |
+
"start_url": "/",
|
6 |
+
"display": "standalone"
|
7 |
+
}
|
src/pyproject.toml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = [
|
3 |
+
"hatchling",
|
4 |
+
"hatch-requirements-txt",
|
5 |
+
"hatch-fancy-pypi-readme>=22.5.0",
|
6 |
+
]
|
7 |
+
build-backend = "hatchling.build"
|
8 |
+
|
9 |
+
[project]
|
10 |
+
name = "gradio_screenrecorder"
|
11 |
+
version = "0.0.1"
|
12 |
+
description = "Screen Recorder Gradio Custom Component"
|
13 |
+
readme = "README.md"
|
14 |
+
license = "apache-2.0"
|
15 |
+
requires-python = ">=3.10"
|
16 |
+
authors = [{ name = "YOUR NAME", email = "[email protected]" }]
|
17 |
+
keywords = ["gradio-custom-component", "custom-component-track", "gradio", "screen-recorder"]
|
18 |
+
# Add dependencies here
|
19 |
+
dependencies = ["gradio>=4.0,<6.0"]
|
20 |
+
classifiers = [
|
21 |
+
'Development Status :: 3 - Alpha',
|
22 |
+
'Operating System :: OS Independent',
|
23 |
+
'Programming Language :: Python :: 3',
|
24 |
+
'Programming Language :: Python :: 3 :: Only',
|
25 |
+
'Programming Language :: Python :: 3.8',
|
26 |
+
'Programming Language :: Python :: 3.9',
|
27 |
+
'Programming Language :: Python :: 3.10',
|
28 |
+
'Programming Language :: Python :: 3.11',
|
29 |
+
'Topic :: Scientific/Engineering',
|
30 |
+
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
31 |
+
'Topic :: Scientific/Engineering :: Visualization',
|
32 |
+
]
|
33 |
+
|
34 |
+
# The repository and space URLs are optional, but recommended.
|
35 |
+
# Adding a repository URL will create a badge in the auto-generated README that links to the repository.
|
36 |
+
# Adding a space URL will create a badge in the auto-generated README that links to the space.
|
37 |
+
# This will make it easy for people to find your deployed demo or source code when they
|
38 |
+
# encounter your project in the wild.
|
39 |
+
|
40 |
+
# [project.urls]
|
41 |
+
# repository = "your github repository"
|
42 |
+
# space = "your space url"
|
43 |
+
|
44 |
+
[project.optional-dependencies]
|
45 |
+
dev = ["build", "twine"]
|
46 |
+
|
47 |
+
[tool.hatch.build]
|
48 |
+
artifacts = ["/backend/gradio_screenrecorder/templates", "*.pyi"]
|
49 |
+
|
50 |
+
[tool.hatch.build.targets.wheel]
|
51 |
+
packages = ["/backend/gradio_screenrecorder"]
|
src/requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
gradio
|