Mubarak2507 commited on
Commit
cd5d98f
·
verified ·
1 Parent(s): 9b60cd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -67
app.py CHANGED
@@ -1,81 +1,47 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-speech", tokenizer="hossam87/bert-base-arabic-hate-speech", return_all_scores=False)
5
- dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
6
-
7
- def analyze(text):
8
- # Hate speech detection
9
- hate_res = hate(text)[0]
10
- hate_label = hate_res['label']
11
- hate_conf = hate_res['score']
12
-
13
- # Dialect detection
14
- dial_res = dialect(text)[0]
15
- dial_label = dial_res['label']
16
- dial_conf = dial_res['score']
17
-
18
- # Threat score
19
- weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
20
- score = hate_conf * weight.get(hate_label, 0)
21
-
22
- # Recommended action (modified logic)
23
- if hate_label != "Neutral":
24
- action = "🚨 Immediate Review Required — This content contains severe hate speech or threats and should be escalated to moderators immediately."
25
- elif score >= 0.49:
26
- action = "⚠️ Potentially Harmful — The content may contain offensive or harmful language. Please review before taking further action."
27
- else:
28
- action = "✅ Safe Content — No harmful language detected. No moderation needed."
29
-
30
- return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
31
-
32
 
33
- # ---------- UI (Blocks with Sidebar) ----------
34
- SIDEBAR_CSS = """
35
- #sidebar-panel {
36
- background: #2b2b2b; /* keep sidebar's grey */
37
- border: 1px solid rgba(255,255,255,0.08);
38
- border-radius: 10px;
39
- padding: 18px;
40
  }
41
 
42
- /* Make textboxes transparent and match the page background */
43
  textarea, input[type="text"] {
44
- background: transparent !important;
45
- border: 1px solid rgba(255,255,255,0.2) !important;
46
- color: white !important;
47
  }
48
  """
49
 
50
- with gr.Blocks(css=SIDEBAR_CSS, theme="default") as demo:
 
51
  with gr.Row(equal_height=True):
52
- # Sidebar (left)
53
  with gr.Column(scale=1):
54
- with gr.Group(elem_id="sidebar-panel"):
55
- gr.Markdown(
56
- """
57
- ## 🛡️ Arabic Content Safety Analyzer
58
 
59
- **Purpose**
60
- Analyze Arabic text for harmful or threatening language.
61
 
62
- **Features**
63
- - **Hate Speech Classification** (Offensive, Racism, Sexism, Religious Discrimination)
64
- - **Dialect Detection** (Gulf, Levant, Egyptian, MSA)
65
- - **Threat Severity Score** (0–1 based on label + confidence)
66
- - **Recommended Action** (rule-based suggestion)
67
 
68
- **How to Use**
69
- 1. Enter Arabic text in the box.
70
- 2. Click **Analyze**.
71
- 3. Review the results and suggested action.
72
- """
73
- )
74
 
75
- # Main app (right)
76
  with gr.Column(scale=3):
77
  gr.Markdown("### Enter Arabic Text for Analysis")
78
-
79
  input_text = gr.Textbox(lines=4, placeholder="اكتب هنا...", label="Arabic Text")
80
 
81
  out_hate = gr.Textbox(label="Hate Speech Label", interactive=False)
@@ -87,11 +53,10 @@ with gr.Blocks(css=SIDEBAR_CSS, theme="default") as demo:
87
 
88
  analyze_btn = gr.Button("Analyze", variant="primary")
89
  analyze_btn.click(
90
- fn=analyze,
91
  inputs=input_text,
92
- outputs=[out_hate, out_hate_conf, out_dialect, out_dialect_conf, out_score, out_action],
93
  )
94
 
95
- # ---------- Launch ----------
96
  if __name__ == "__main__":
97
- demo.launch()
 
1
+ # ---------- CSS ----------
2
+ CSS = """
3
+ /* Set same background color for everything */
4
+ body, .gradio-container, .gr-blocks, .gr-box, .gr-panel {
5
+ background-color: #2b2b2b !important;
6
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ /* Keep text visible */
9
+ * {
10
+ color: #fff !important;
 
 
 
 
11
  }
12
 
13
+ /* Style input fields */
14
  textarea, input[type="text"] {
15
+ background-color: #1f1f1f !important;
16
+ border: 1px solid rgba(255,255,255,0.2) !important;
17
+ color: #fff !important;
18
  }
19
  """
20
 
21
+ # ---------- UI ----------
22
+ with gr.Blocks(css=CSS, theme="default") as demo:
23
  with gr.Row(equal_height=True):
 
24
  with gr.Column(scale=1):
25
+ gr.Markdown("""
26
+ ## 🛡️ Arabic Content Safety Analyzer
 
 
27
 
28
+ **Purpose**
29
+ Analyze Arabic text for harmful or threatening language.
30
 
31
+ **Features**
32
+ - **Hate Speech Classification** (Offensive, Racism, Sexism, Religious Discrimination)
33
+ - **Dialect Detection** (Gulf, Levant, Egyptian, MSA)
34
+ - **Threat Severity Score** (0–1 based on label + confidence)
35
+ - **Recommended Action** (rule-based suggestion)
36
 
37
+ **How to Use**
38
+ 1) أدخل النص العربي في الحقل.
39
+ 2) اضغط **Analyze**.
40
+ 3) راجع النتائج والإجراء المقترح.
41
+ """)
 
42
 
 
43
  with gr.Column(scale=3):
44
  gr.Markdown("### Enter Arabic Text for Analysis")
 
45
  input_text = gr.Textbox(lines=4, placeholder="اكتب هنا...", label="Arabic Text")
46
 
47
  out_hate = gr.Textbox(label="Hate Speech Label", interactive=False)
 
53
 
54
  analyze_btn = gr.Button("Analyze", variant="primary")
55
  analyze_btn.click(
56
+ analyze,
57
  inputs=input_text,
58
+ outputs=[out_hate, out_hate_conf, out_dialect, out_dialect_conf, out_score, out_action]
59
  )
60
 
 
61
  if __name__ == "__main__":
62
+ demo.launch()