Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import base64 | |
| import io | |
| # Function to process the image | |
| def process_frame(frame): | |
| # Convert the frame to a grayscale image (as an example) | |
| gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| return gray_frame | |
| # HTML and JavaScript to access the webcam | |
| html_code = """ | |
| <html> | |
| <body> | |
| <video id="webcam" width="640" height="480" autoplay></video> | |
| <canvas id="canvas" width="640" height="480" style="display: none;"></canvas> | |
| <button onclick="captureFrame()">Capture Frame</button> | |
| <script> | |
| const video = document.getElementById("webcam"); | |
| const canvas = document.getElementById("canvas"); | |
| const ctx = canvas.getContext("2d"); | |
| // Access the user's webcam | |
| navigator.mediaDevices.getUserMedia({ video: true }) | |
| .then(function(stream) { | |
| video.srcObject = stream; | |
| }) | |
| .catch(function(error) { | |
| console.error("Error accessing the webcam", error); | |
| }); | |
| function captureFrame() { | |
| // Draw the current video frame onto the canvas | |
| ctx.drawImage(video, 0, 0, canvas.width, canvas.height); | |
| // Convert the canvas image to a base64-encoded PNG image | |
| const imageData = canvas.toDataURL("image/png"); | |
| // Send the image to Streamlit for processing | |
| window.parent.postMessage(imageData, "*"); | |
| } | |
| </script> | |
| </body> | |
| </html> | |
| """ | |
| # Create a Streamlit component for the webcam | |
| st.components.v1.html(html_code, height=480) | |
| # This will hold the base64 encoded image | |
| if 'image_data' not in st.session_state: | |
| st.session_state['image_data'] = None | |
| # Capture the image data from JavaScript to Streamlit | |
| def receive_image(image_data): | |
| if image_data is not None: | |
| # Decode the base64 image | |
| img_str = image_data.split(",")[1] | |
| img_bytes = base64.b64decode(img_str) | |
| img_array = np.frombuffer(img_bytes, np.uint8) | |
| frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR) | |
| # Process the frame | |
| processed_frame = process_frame(frame) | |
| # Convert the processed frame to PIL image to display | |
| processed_pil_image = Image.fromarray(processed_frame) | |
| st.image(processed_pil_image, caption="Processed Frame", use_column_width=True) | |
| # Listen for the image data from JavaScript | |
| st.session_state['image_data'] = st.experimental_get_query_params().get("image_data", [None])[0] | |
| receive_image(st.session_state['image_data']) | |