import gradio as gr import cv2 from deepface import DeepFace import numpy as np def predict_emotion(image): # Convert Gradio image (PIL format) to an OpenCV image img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Analyze the emotion using DeepFace result = DeepFace.analyze(img, actions=['emotion']) # Get the dominant emotion dominant_emotion = result[0]['dominant_emotion'] return dominant_emotion # Define the Gradio interface using the new API iface = gr.Interface(fn=predict_emotion, inputs=gr.Image(type="pil"), # Updated gr.Image input outputs="text", # Text output for dominant emotion title="Facial Emotion Recognizer", description="Upload an image and get the predicted emotion") # Launch the Gradio app iface.launch(share=True)