Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
from lime.lime_text import LimeTextExplainer | |
from groq import Groq | |
from PIL import Image | |
from sentence_transformers import CLIPModel, CLIPProcessor | |
import tempfile | |
import json | |
import os | |
# ==== Load BERT Fake News Model ==== | |
model_name = "mrm8488/bert-tiny-fake-news" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
model.eval() | |
# ==== Load CLIP for Image-Text Similarity ==== | |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") | |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") | |
# ==== LIME Explainer ==== | |
explainer = LimeTextExplainer(class_names=["Real", "Fake"]) | |
# ==== Groq Client ==== | |
client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
# ==== News Classification ==== | |
def classify_news(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
return probs[0].tolist() | |
# ==== LIME Explanation ==== | |
def explain_prediction(text): | |
def predictor(texts): | |
return [classify_news(t) for t in texts] | |
explanation = explainer.explain_instance(text, predictor, num_features=6) | |
return explanation.as_list() | |
# ==== LLM Second Opinion ==== | |
def get_llm_opinion(text): | |
try: | |
response = client.chat.completions.create( | |
model="mixtral-8x7b-32768", | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant that detects fake news based on user input."}, | |
{"role": "user", "content": f"Please evaluate the following news and tell if it's fake or real, and why:\n\n{text}"} | |
] | |
) | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
return f"LLM Error: {str(e)}" | |
# ==== CLIP Image-Text Similarity ==== | |
def verify_image_with_text(image, text): | |
if image is None: | |
return "No image uploaded." | |
inputs = clip_processor(text=[text], images=image, return_tensors="pt", padding=True) | |
outputs = clip_model(**inputs) | |
logits_per_image = outputs.logits_per_image | |
probs = logits_per_image.softmax(dim=1) | |
similarity = probs[0][0].item() | |
return f"Image-Headline Similarity Score: {similarity:.2f}" | |
# ==== Main Pipeline ==== | |
def analyze_news(text, image=None): | |
prediction = classify_news(text) | |
model_label = "Real" if prediction[0] > prediction[1] else "Fake" | |
lime_output = explain_prediction(text) | |
lime_expl = [(word, round(weight, 3)) for word, weight in lime_output] | |
llm_verdict = get_llm_opinion(text) | |
img_verification = verify_image_with_text(image, text) | |
report = { | |
"model_prediction": model_label, | |
"bert_probs": {"Real": round(prediction[0], 3), "Fake": round(prediction[1], 3)}, | |
"lime_explanation": lime_expl, | |
"llm_opinion": llm_verdict, | |
"image_text_similarity": img_verification | |
} | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".json", mode="w") as tmp: | |
json.dump(report, tmp, indent=2) | |
json_path = tmp.name | |
return model_label, lime_expl, llm_verdict, img_verification, json_path | |
# ==== Gradio UI ==== | |
demo = gr.Interface( | |
fn=analyze_news, | |
inputs=[ | |
gr.Textbox(label="News Text", lines=5, placeholder="Paste news text here..."), | |
gr.Image(label="Optional Image") | |
], | |
outputs=[ | |
gr.Text(label="BERT Model Verdict"), | |
gr.HighlightedText(label="LIME Explanation"), | |
gr.Text(label="Groq LLM Opinion"), | |
gr.Text(label="Image-Text Match Score"), | |
gr.File(label="Download JSON Report") | |
], | |
title="📰 AI Fake News Detector with BERT + LIME + Groq LLM", | |
description="Upload news text (and optional image). Compare BERT model prediction with Groq LLM’s second opinion. Get LIME explanations, CLIP-based image-text match score, and export full report as JSON." | |
) | |
if __name__ == "__main__": | |
demo.launch() | |