import gradio as gr from transformers import AutoImageProcessor from transformers import SiglipForImageClassification from PIL import Image import torch # Load model and processor model_name = "prithivMLmods/Facial-Emotion-Detection-SigLIP2" model = SiglipForImageClassification.from_pretrained(model_name) processor = AutoImageProcessor.from_pretrained(model_name) def emotion_classification(image): """Predicts facial emotion classification for an image.""" image = Image.fromarray(image).convert("RGB") inputs = processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = torch.nn.functional.softmax(logits, dim=1).squeeze().tolist() labels = { "0": "Ahegao", "1": "Angry", "2": "Happy", "3": "Neutral", "4": "Sad", "5": "Surprise" } predictions = {labels[str(i)]: round(probs[i], 3) for i in range(len(probs))} return predictions if __name__ == "__main__": iface = gr.Interface( fn=emotion_classification, inputs=gr.Image(type="numpy"), outputs=gr.Label(label="Prediction Scores"), title="Facial Emotion Detection", description="Upload an image to classify the facial emotion." ) iface.launch()