Mubarak2507 commited on
Commit
bd35ac7
Β·
verified Β·
1 Parent(s): 09373c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -28
app.py CHANGED
@@ -1,33 +1,45 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-speech", tokenizer="hossam87/bert-base-arabic-hate-speech", return_all_scores=False)
5
- dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
6
-
7
- def analyze(text):
8
- # Hate speech detection
9
- hate_res = hate(text)[0]
10
- hate_label = hate_res['label']
11
- hate_conf = hate_res['score']
12
-
13
- # Dialect detection
14
- dial_res = dialect(text)[0]
15
- dial_label = dial_res['label']
16
- dial_conf = dial_res['score']
17
-
18
- # Threat score
19
- weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
20
- score = hate_conf * weight.get(hate_label, 0)
21
-
22
- # Recommended action (modified logic)
23
- if hate_label != "Neutral":
24
- action = "🚨 Immediate Review Required β€” This content contains severe hate speech or threats and should be escalated to moderators immediately."
 
 
 
 
 
 
 
 
 
 
 
 
25
  elif score >= 0.49:
26
- action = "⚠️ Potentially Harmful β€” The content may contain offensive or harmful language. Please review before taking further action."
27
  else:
28
- action = "βœ… Safe Content β€” No harmful language detected. No moderation needed."
29
 
30
- return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
31
 
32
  # ---------- CSS ----------
33
  CSS = """
@@ -73,10 +85,10 @@ with gr.Blocks(css=CSS, theme="default") as demo:
73
  - **Threat Severity Score** (0–1 based on label + confidence)
74
  - **Recommended Action** (rule-based suggestion)
75
 
76
- **How to Use:**
77
- 1. Enter Arabic text in the box.
78
- 2. Click **Analyze**.
79
- 3. Review the results and suggested action.
80
  """)
81
 
82
  with gr.Column(scale=3):
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # ---------- Pipelines ----------
5
+ hate = pipeline(
6
+ "text-classification",
7
+ model="hossam87/bert-base-arabic-hate-speech",
8
+ tokenizer="hossam87/bert-base-arabic-hate-speech",
9
+ return_all_scores=False
10
+ )
11
+
12
+ dialect = pipeline(
13
+ "text-classification",
14
+ model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier",
15
+ tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier",
16
+ return_all_scores=False
17
+ )
18
+
19
+ # ---------- Inference ----------
20
+ def analyze(text: str):
21
+ if not text or not text.strip():
22
+ return ("", "", "", "", "", "Please enter some Arabic text.")
23
+
24
+ h = hate(text)[0]
25
+ d = dialect(text)[0]
26
+
27
+ hate_label, hate_conf = h.get("label",""), float(h.get("score",0))
28
+ dial_label, dial_conf = d.get("label",""), float(d.get("score",0))
29
+
30
+ weights = {"Neutral":0.0, "Offensive":0.5, "Sexism":1.0, "Racism":1.0, "Religious Discrimination":1.0}
31
+ score = hate_conf * weights.get(hate_label, 0.0)
32
+
33
+ if hate_label != "Neutral" and weights.get(hate_label,0.0) >= 1.0:
34
+ action = "🚨 Immediate Review β€” Severe content detected. Escalate to moderators."
35
+ elif hate_label != "Neutral":
36
+ action = "⚠️ Potentially Harmful β€” Contains offensive content. Please review."
37
  elif score >= 0.49:
38
+ action = "⚠️ Borderline β€” Review recommended."
39
  else:
40
+ action = "βœ… Safe β€” No action needed."
41
 
42
+ return (hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action)
43
 
44
  # ---------- CSS ----------
45
  CSS = """
 
85
  - **Threat Severity Score** (0–1 based on label + confidence)
86
  - **Recommended Action** (rule-based suggestion)
87
 
88
+ **How to Use**
89
+ 1) Enter Arabic text in the box.
90
+ 2) Click **Analyze**.
91
+ 3) Review the results and suggested action.
92
  """)
93
 
94
  with gr.Column(scale=3):