import cv2 import numpy as np import streamlit as st from keras.models import model_from_json # Load the emotion detection model json_file = open("facialemotionmodel.json", "r") model_json = json_file.read() json_file.close() model = model_from_json(model_json) model.load_weights("facialemotionmodel.h5") # Load the face cascade classifier haar_file = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml' face_cascade = cv2.CascadeClassifier(haar_file) # Function to extract features from an image def extract_features(image): feature = np.array(image) feature = feature.reshape(1, 48, 48, 1) return feature / 255.0 # Streamlit app title st.title("Facial Emotion Detection App") # Open webcam webcam = cv2.VideoCapture(0) # Labels for emotion categories labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'neutral', 5: 'sad', 6: 'surprise'} # Infinite loop for capturing frames try: while True: _, im = webcam.read() # Check if the image is not empty if im is not None: gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(im, 1.3, 5) try: for (p, q, r, s) in faces: image = gray[q:q+s, p:p+r] cv2.rectangle(im, (p, q), (p+r, q+s), (255, 0, 0), 2) image = cv2.resize(image, (48, 48)) img = extract_features(image) pred = model.predict(img) prediction_label = labels[pred.argmax()] cv2.putText(im, '% s' % (prediction_label), (p-10, q-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 0, 255)) st.image(im, channels="BGR", use_column_width=True) except cv2.error: pass except st.StopException: pass # Streamlit app closed # Release the webcam when done webcam.release() cv2.destroyAllWindows() # Close all OpenCV windows