Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# ---------- CSS ----------
|
2 |
CSS = """
|
3 |
/* Set same background color for everything */
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-speech", tokenizer="hossam87/bert-base-arabic-hate-speech", return_all_scores=False)
|
5 |
+
dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
|
6 |
+
|
7 |
+
def analyze(text):
|
8 |
+
# Hate speech detection
|
9 |
+
hate_res = hate(text)[0]
|
10 |
+
hate_label = hate_res['label']
|
11 |
+
hate_conf = hate_res['score']
|
12 |
+
|
13 |
+
# Dialect detection
|
14 |
+
dial_res = dialect(text)[0]
|
15 |
+
dial_label = dial_res['label']
|
16 |
+
dial_conf = dial_res['score']
|
17 |
+
|
18 |
+
# Threat score
|
19 |
+
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
|
20 |
+
score = hate_conf * weight.get(hate_label, 0)
|
21 |
+
|
22 |
+
# Recommended action (modified logic)
|
23 |
+
if hate_label != "Neutral":
|
24 |
+
action = "π¨ Immediate Review Required β This content contains severe hate speech or threats and should be escalated to moderators immediately."
|
25 |
+
elif score >= 0.49:
|
26 |
+
action = "β οΈ Potentially Harmful β The content may contain offensive or harmful language. Please review before taking further action."
|
27 |
+
else:
|
28 |
+
action = "β
Safe Content β No harmful language detected. No moderation needed."
|
29 |
+
|
30 |
+
return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
|
31 |
+
|
32 |
# ---------- CSS ----------
|
33 |
CSS = """
|
34 |
/* Set same background color for everything */
|