ashjo317 commited on
Commit
e4f27c1
Β·
verified Β·
1 Parent(s): 2888de5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -20
app.py CHANGED
@@ -79,11 +79,28 @@ mksdown = """# πŸ˜ƒ Welcome To The Friendly Text Moderation for Twitter (X) Post
79
  ---
80
  # 🌟 "AI Solution Architect" Course by ELVTR
81
  """
82
-
83
  # Function to get toxicity scores from OpenAI
84
- def get_toxicity_openai(tweet):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  open_api_answer = _test_tweet_openapi(tweet)
 
86
  open_api_answer['category_scores']['IS THIS TWEET TOXIC'] = open_api_answer['flagged']
 
87
  # Convert scores to percentages
88
  categories = list(open_api_answer['category_scores'].keys())
89
  scores = [score * 100 for score in open_api_answer['category_scores'].values()] # Multiply by 100 to get percentage
@@ -94,7 +111,7 @@ def get_toxicity_openai(tweet):
94
  y=scores,
95
  text=[f"{score:.2f}%" for score in scores], # Format the text as percentage
96
  textposition='auto', # Position the text inside the bars
97
- marker_color=['red' if score > 90 else 'green' for score in scores], # Color red if > 50%
98
  ))
99
 
100
  # Update layout for better appearance
@@ -102,27 +119,27 @@ def get_toxicity_openai(tweet):
102
  title="Toxicity Categories",
103
  xaxis_title="Category",
104
  yaxis_title="Percentage (%)",
105
- showlegend=False
106
  )
107
 
108
  # Return the figure object to be displayed in Gradio
109
  return fig
110
 
111
 
112
- # Function to get toxicity scores from Hugging Face
113
- def get_toxicity_hf(tweet):
114
  hugging_face_answer = _test_tweet_huggingface(tweet)
115
- return hugging_face_answer[0]
116
-
117
-
118
- def get_toxicity_hf(tweet):
119
- hugging_face_answer = _test_tweet_huggingface(tweet)
120
- print(hugging_face_answer)
121
  score = hugging_face_answer[0]['score']*100
122
- if score <= 60:
123
- return json.dumps({"toxicity": "safe", "%Toxic": score, "%safe": (100-score)}, indent=4)
 
 
 
 
 
 
 
124
  else:
125
- return json.dumps({"toxicity": "unsafe", "%Toxic": score, "%safe": (100-score)}, indent=4)
126
 
127
  # Random Tweet Generator
128
  def get_random_tweet():
@@ -132,17 +149,21 @@ def get_random_tweet():
132
  with gr.Blocks() as demo:
133
  gr.Markdown(mksdown)
134
  user_input = gr.Textbox(label="Paste your paragraph (2-10 lines)", lines=5)
135
-
 
 
 
 
136
  with gr.Row():
137
  analyze_btn = gr.Button("Measure Toxicity OpenAPI")
138
  analyze_btn_hf = gr.Button("Measure Toxicity HF")
139
  random_tweet_btn = gr.Button("Populate Random Tweet")
140
 
141
- toxicity_output_json = gr.Code(label="Formatted Toxicity JSON", language="json")
142
  toxicity_output = gr.Plot()
143
-
144
- analyze_btn_hf.click(get_toxicity_hf, inputs=[user_input], outputs=[toxicity_output_json])
145
- analyze_btn.click(get_toxicity_openai, inputs=[user_input], outputs=[toxicity_output])
146
  random_tweet_btn.click(get_random_tweet, outputs=user_input)
147
 
148
  if __name__ == "__main__":
 
79
  ---
80
  # 🌟 "AI Solution Architect" Course by ELVTR
81
  """
 
82
  # Function to get toxicity scores from OpenAI
83
+ def get_toxicity_openai(tweet, tolerance_dropdown):
84
+ if tolerance_dropdown == "low":
85
+ th = 20
86
+ elif tolerance_dropdown == "medium":
87
+ th = 50
88
+ elif tolerance_dropdown == "high":
89
+ th = 95
90
+
91
+ toxicity = json.loads(get_toxicity_hf(tweet, tolerance_dropdown))["toxicity"]
92
+ print(toxicity)
93
+ if toxicity == "safe":
94
+ toxicity_flag = False
95
+ else:
96
+ toxicity_flag = True
97
+
98
+
99
+
100
  open_api_answer = _test_tweet_openapi(tweet)
101
+ print (type(open_api_answer['flagged']))
102
  open_api_answer['category_scores']['IS THIS TWEET TOXIC'] = open_api_answer['flagged']
103
+ open_api_answer['category_scores']['[ADJUSTED FOR TOLERANCE] IS THIS TWEET TOXIC'] = toxicity_flag
104
  # Convert scores to percentages
105
  categories = list(open_api_answer['category_scores'].keys())
106
  scores = [score * 100 for score in open_api_answer['category_scores'].values()] # Multiply by 100 to get percentage
 
111
  y=scores,
112
  text=[f"{score:.2f}%" for score in scores], # Format the text as percentage
113
  textposition='auto', # Position the text inside the bars
114
+ marker_color=['red' if score > th else 'green' for score in scores], # Color red if > 50%
115
  ))
116
 
117
  # Update layout for better appearance
 
119
  title="Toxicity Categories",
120
  xaxis_title="Category",
121
  yaxis_title="Percentage (%)",
122
+ showlegend=True
123
  )
124
 
125
  # Return the figure object to be displayed in Gradio
126
  return fig
127
 
128
 
129
+ def get_toxicity_hf(tweet, tolerance_dropdown):
 
130
  hugging_face_answer = _test_tweet_huggingface(tweet)
 
 
 
 
 
 
131
  score = hugging_face_answer[0]['score']*100
132
+ if tolerance_dropdown == "low":
133
+ th = 20
134
+ elif tolerance_dropdown == "medium":
135
+ th = 50
136
+ elif tolerance_dropdown == "high":
137
+ th = 95
138
+
139
+ if score <= th:
140
+ return json.dumps({"toxicity_tolerance": tolerance_dropdown, "toxicity": "safe", "%Toxic": score, "%safe": (100-score)}, indent=4)
141
  else:
142
+ return json.dumps({"toxicity_tolerance": tolerance_dropdown, "toxicity": "unsafe", "%Toxic": score, "%safe": (100-score)}, indent=4)
143
 
144
  # Random Tweet Generator
145
  def get_random_tweet():
 
149
  with gr.Blocks() as demo:
150
  gr.Markdown(mksdown)
151
  user_input = gr.Textbox(label="Paste your paragraph (2-10 lines)", lines=5)
152
+ tolerance_dropdown = gr.Dropdown(
153
+ choices=["low", "medium", "high"],
154
+ label="Toxicity Tolerance",
155
+ value="low"
156
+ )
157
  with gr.Row():
158
  analyze_btn = gr.Button("Measure Toxicity OpenAPI")
159
  analyze_btn_hf = gr.Button("Measure Toxicity HF")
160
  random_tweet_btn = gr.Button("Populate Random Tweet")
161
 
162
+ toxicity_output_json = gr.Code(label="HuggingFace Toxicity JSON", language="json")
163
  toxicity_output = gr.Plot()
164
+
165
+ analyze_btn_hf.click(get_toxicity_hf, inputs=[user_input, tolerance_dropdown], outputs=[toxicity_output_json])
166
+ analyze_btn.click(get_toxicity_openai, inputs=[user_input, tolerance_dropdown], outputs=[toxicity_output])
167
  random_tweet_btn.click(get_random_tweet, outputs=user_input)
168
 
169
  if __name__ == "__main__":