Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import numpy as np | |
import random | |
import re | |
import logging | |
from datetime import datetime | |
# Set up logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(levelname)s - %(message)s', | |
handlers=[ | |
logging.FileHandler('boundary_assistant.log'), | |
logging.StreamHandler() | |
] | |
) | |
logger = logging.getLogger(__name__) | |
class SimplifiedBoundaryAssistant: | |
def __init__(self): | |
# Initialize only the essential models | |
self.models = {} | |
self.tokenizers = {} | |
# Model paths - keeping the 4 essential ones | |
self.model_paths = { | |
'boundary': 'SamanthaStorm/healthy-boundary-predictor', # Your trained model | |
'toxicity': 'unitary/toxic-bert', # Toxicity detection | |
'emotion': 'j-hartmann/emotion-english-distilroberta-base', # Emotion detection | |
'fallacy': 'SamanthaStorm/fallacyfinder' # Fallacy detection | |
} | |
# Load all models | |
self.load_models() | |
# Emotion labels for the emotion model | |
self.emotion_labels = ['anger', 'disgust', 'fear', 'joy', 'neutral', 'sadness', 'surprise'] | |
# Emotion to need mapping | |
self.emotion_to_needs = { | |
'anger': ['respect', 'fairness', 'to be heard'], | |
'sadness': ['comfort', 'understanding', 'connection'], | |
'fear': ['safety', 'security', 'reassurance'], | |
'disgust': ['boundaries', 'respect', 'different approach'], | |
'neutral': ['clarity', 'understanding', 'communication'], | |
'joy': ['connection', 'sharing', 'celebration'], | |
'surprise': ['information', 'clarity', 'time to process'] | |
} | |
# Fallacy labels (from your fallacy finder) | |
self.fallacy_labels = { | |
'ad_hominem': 'Ad Hominem (Personal Attack)', | |
'strawman': 'Strawman (Misrepresenting Argument)', | |
'whataboutism': 'Whataboutism (Deflecting)', | |
'gaslighting': 'Gaslighting (Questioning Reality)', | |
'false_dichotomy': 'False Dichotomy (Only Two Options)', | |
'appeal_to_emotion': 'Appeal to Emotion', | |
'darvo': 'DARVO (Deny, Attack, Reverse)', | |
'moving_goalposts': 'Moving Goalposts', | |
'cherry_picking': 'Cherry Picking', | |
'appeal_to_authority': 'Appeal to Authority', | |
'slippery_slope': 'Slippery Slope', | |
'motte_and_bailey': 'Motte and Bailey', | |
'gish_gallop': 'Gish Gallop', | |
'kafkatrapping': 'Kafkatrapping', | |
'sealioning': 'Sealioning', | |
'no_fallacy': 'No Fallacy' | |
} | |
def load_models(self): | |
"""Load the essential models""" | |
logger.info("Starting model loading process...") | |
for name, path in self.model_paths.items(): | |
try: | |
logger.info(f"Loading {name} model: {path}") | |
if name == 'fallacy': | |
# Special handling for fallacy model with 16 labels | |
self.tokenizers[name] = AutoTokenizer.from_pretrained(path) | |
self.models[name] = AutoModelForSequenceClassification.from_pretrained(path, num_labels=16) | |
else: | |
self.tokenizers[name] = AutoTokenizer.from_pretrained(path) | |
self.models[name] = AutoModelForSequenceClassification.from_pretrained(path) | |
logger.info(f"β {name} model loaded successfully!") | |
except Exception as e: | |
logger.error(f"β Error loading {name} model: {e}") | |
self.models[name] = None | |
self.tokenizers[name] = None | |
def predict_with_model(self, text, model_name): | |
"""Make prediction with specified model""" | |
if self.models[model_name] is None or self.tokenizers[model_name] is None: | |
logger.warning(f"{model_name} model not available") | |
return None, 0.0 | |
try: | |
inputs = self.tokenizers[model_name]( | |
text, | |
return_tensors="pt", | |
truncation=True, | |
padding=True, | |
max_length=512 | |
) | |
with torch.no_grad(): | |
outputs = self.models[model_name](**inputs) | |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
predicted_class_id = predictions.argmax().item() | |
confidence = predictions.max().item() | |
logger.debug(f"{model_name} prediction: class {predicted_class_id}, confidence {confidence:.3f}") | |
return predicted_class_id, confidence | |
except Exception as e: | |
logger.error(f"Error with {model_name} prediction: {e}") | |
return None, 0.0 | |
def analyze_simplified(self, text): | |
"""Analysis using 4 essential models""" | |
if not text.strip(): | |
return None | |
logger.info(f"ANALYZING MESSAGE: '{text[:100]}{'...' if len(text) > 100 else ''}'") | |
analysis = { | |
'text': text, | |
'issues': [], | |
'underlying_needs': { | |
'emotions': [], | |
'core_needs': [], | |
'likely_feelings': [] | |
}, | |
'severity_score': 0, | |
'is_toxic': False, | |
'is_healthy_boundary': True, | |
'fallacies_detected': [] | |
} | |
# 1. Emotion Detection (for understanding underlying needs) | |
logger.info("Running emotion detection...") | |
emotion_pred, emotion_conf = self.predict_with_model(text, 'emotion') | |
if emotion_pred is not None and emotion_conf > 0.3: | |
detected_emotion = self.emotion_labels[emotion_pred] if emotion_pred < len(self.emotion_labels) else 'neutral' | |
logger.info(f"EMOTION DETECTED: {detected_emotion} (confidence: {emotion_conf:.3f})") | |
analysis['underlying_needs']['emotions'].append({ | |
'emotion': detected_emotion, | |
'confidence': emotion_conf | |
}) | |
analysis['underlying_needs']['core_needs'].extend(self.emotion_to_needs.get(detected_emotion, ['understanding'])) | |
analysis['underlying_needs']['likely_feelings'].append(detected_emotion) | |
else: | |
logger.info(f"No strong emotion detected (pred: {emotion_pred}, conf: {emotion_conf})") | |
# 2. Toxicity Detection | |
logger.info("Running toxicity detection...") | |
toxicity_pred, toxicity_conf = self.predict_with_model(text, 'toxicity') | |
if toxicity_pred is not None and toxicity_pred == 1 and toxicity_conf > 0.7: # 1 = TOXIC | |
logger.info(f"TOXICITY DETECTED: confidence {toxicity_conf:.3f}") | |
analysis['is_toxic'] = True | |
analysis['issues'].append({ | |
'type': 'toxic_language', | |
'name': 'Toxic Language Detected', | |
'confidence': toxicity_conf, | |
'severity': 3 if toxicity_conf > 0.9 else 2 | |
}) | |
analysis['severity_score'] += 3 if toxicity_conf > 0.9 else 2 | |
else: | |
logger.info(f"No toxicity detected (pred: {toxicity_pred}, conf: {toxicity_conf})") | |
# 3. Boundary Health Check (1=healthy, 0=unhealthy) | |
logger.info("Running boundary health check...") | |
boundary_pred, boundary_conf = self.predict_with_model(text, 'boundary') | |
if boundary_pred is not None: | |
if boundary_pred == 0 and boundary_conf > 0.7: # 0 = unhealthy | |
logger.info(f"UNHEALTHY BOUNDARY DETECTED: confidence {boundary_conf:.3f}") | |
analysis['is_healthy_boundary'] = False | |
analysis['issues'].append({ | |
'type': 'unhealthy_boundary', | |
'name': 'Unhealthy Boundary Pattern', | |
'confidence': boundary_conf, | |
'severity': 2 if boundary_conf > 0.8 else 1 | |
}) | |
analysis['severity_score'] += 2 if boundary_conf > 0.8 else 1 | |
elif boundary_pred == 1: # 1 = healthy | |
logger.info(f"HEALTHY BOUNDARY DETECTED: confidence {boundary_conf:.3f}") | |
analysis['is_healthy_boundary'] = True | |
else: | |
logger.info(f"Boundary health check failed") | |
# 4. Fallacy Detection | |
logger.info("Running fallacy detection...") | |
fallacy_pred, fallacy_conf = self.predict_with_model(text, 'fallacy') | |
if fallacy_pred is not None and fallacy_conf > 0.6: # Higher threshold for fallacies | |
fallacy_keys = list(self.fallacy_labels.keys()) | |
if fallacy_pred < len(fallacy_keys): | |
fallacy_type = fallacy_keys[fallacy_pred] | |
logger.info(f"FALLACY DETECTED: {fallacy_type} (confidence: {fallacy_conf:.3f})") | |
if fallacy_type != 'no_fallacy': | |
analysis['fallacies_detected'].append(fallacy_type) | |
analysis['issues'].append({ | |
'type': 'fallacy', | |
'subtype': fallacy_type, | |
'name': self.fallacy_labels[fallacy_type], | |
'confidence': fallacy_conf, | |
'severity': 3 if fallacy_type == 'gaslighting' else 2 | |
}) | |
analysis['severity_score'] += 3 if fallacy_type == 'gaslighting' else 2 | |
else: | |
logger.info(f"Fallacy model detected 'no_fallacy' - healthy communication") | |
else: | |
logger.info(f"No fallacy detected (pred: {fallacy_pred}, conf: {fallacy_conf})") | |
logger.info(f"ANALYSIS COMPLETE - Severity score: {analysis['severity_score']}, Issues found: {len(analysis['issues'])}") | |
logger.info("=" * 80) | |
return analysis | |
def generate_smart_boundary(self, analysis): | |
"""Generate boundary based on simplified analysis with emotion awareness""" | |
if not analysis: | |
return "I need to express my feelings about this situation. I'd like us to find a way to communicate that works for both of us." | |
original_text = analysis['text'].lower() | |
# Extract context and emotions | |
relationship_context = self.infer_relationship_context(original_text) | |
situation_context = self.extract_situation_context(original_text) | |
# Use detected emotions for more empathetic boundaries | |
detected_emotions = analysis['underlying_needs']['emotions'] | |
if detected_emotions: | |
primary_emotion = detected_emotions[0]['emotion'] # Highest confidence emotion | |
emotion_context = primary_emotion | |
else: | |
emotion_context = self.extract_emotional_context(original_text) | |
# Handle different scenarios based on analysis | |
if analysis['is_toxic']: | |
return self.generate_non_toxic_boundary(emotion_context, situation_context, analysis['underlying_needs']['core_needs']) | |
elif analysis['fallacies_detected']: | |
# Handle fallacy-based responses | |
primary_fallacy = analysis['fallacies_detected'][0] | |
return self.generate_fallacy_response(primary_fallacy, situation_context, emotion_context) | |
elif not analysis['is_healthy_boundary']: | |
return self.generate_healthier_boundary(original_text, situation_context, emotion_context) | |
elif analysis['issues']: | |
# Has some issues but not major ones | |
return self.generate_improved_boundary(situation_context, emotion_context) | |
else: | |
# No major issues detected, just polish it up | |
return self.generate_polished_boundary(original_text, situation_context, emotion_context) | |
def generate_non_toxic_boundary(self, emotion, situation, core_needs): | |
"""Generate boundary that removes toxic language""" | |
needs_text = core_needs[0] if core_needs else 'understanding' | |
if emotion == 'anger': | |
return f"I'm feeling really frustrated about {situation}. I need {needs_text} and a chance to discuss this calmly when we can both listen to each other." | |
elif emotion == 'sadness': | |
return f"I'm feeling hurt about {situation}. I need {needs_text} and for us to find a way to talk about this that doesn't leave either of us feeling attacked." | |
elif emotion == 'fear': | |
return f"I'm feeling defensive about {situation}. I need {needs_text} and reassurance that we can work through this together respectfully." | |
else: | |
return f"I'm feeling overwhelmed about {situation}. I need {needs_text} and for us to approach this conversation with more care for each other's feelings." | |
def generate_healthier_boundary(self, original_text, situation, emotion): | |
"""Generate healthier version of boundary""" | |
# Look for specific unhealthy patterns in original text | |
if any(word in original_text for word in ['always', 'never']): | |
return f"I feel {emotion} when {situation} happens. I need us to find a specific solution for this particular issue." | |
elif 'you need to' in original_text or 'you should' in original_text: | |
return f"I feel {emotion} about {situation}. I'd appreciate it if we could work together on this." | |
else: | |
return f"I want to express how I feel about {situation}. I feel {emotion} and I need us to find a way to handle this that works for both of us." | |
def generate_fallacy_response(self, fallacy_type, situation, emotion): | |
"""Generate response based on specific fallacy detected""" | |
if 'ad_hominem' in fallacy_type: | |
return f"I feel {emotion} when {situation} becomes personal. I need us to focus on the actual issues rather than attacking each other's character." | |
elif 'gaslighting' in fallacy_type: | |
return f"I feel {emotion} when my experiences about {situation} are questioned. I need my perspective to be acknowledged, even if we disagree." | |
elif 'strawman' in fallacy_type: | |
return f"I feel {emotion} when {situation} gets misrepresented. I need us to address what I'm actually saying." | |
elif 'whataboutism' in fallacy_type: | |
return f"I feel {emotion} when {situation} gets deflected to other issues. I need us to address this specific concern first." | |
else: | |
return f"I notice I feel {emotion} about {situation}. I need us to discuss this more constructively." | |
def generate_improved_boundary(self, situation, emotion): | |
"""Generate improved boundary for minor issues""" | |
return f"I'm feeling {emotion} about {situation}. I'd like us to step back and approach this differently so we can understand each other better." | |
def generate_polished_boundary(self, original_text, situation, emotion): | |
"""Polish up already decent boundaries""" | |
return f"I want to communicate about {situation}. I'm feeling {emotion} and would appreciate if we could work together to find a solution that feels good for both of us." | |
def infer_relationship_context(self, text): | |
"""Infer relationship type from original text""" | |
if any(word in text for word in ['love', 'babe', 'honey', 'relationship', 'partner']): | |
return 'romantic' | |
elif any(word in text for word in ['work', 'meeting', 'boss', 'office', 'colleague']): | |
return 'professional' | |
elif any(word in text for word in ['mom', 'dad', 'family', 'parent', 'sibling']): | |
return 'family' | |
else: | |
return 'general' | |
def extract_situation_context(self, text): | |
"""Extract specific situation being discussed""" | |
if 'interrupt' in text: | |
return 'interrupting during conversations' | |
elif any(word in text for word in ['late', 'time', 'punctual']): | |
return 'time and punctuality issues' | |
elif 'listen' in text or 'hear' in text: | |
return 'feeling unheard in our communication' | |
elif any(word in text for word in ['talk', 'conversation', 'discuss']): | |
return 'our communication patterns' | |
elif 'respect' in text: | |
return 'mutual respect in our interactions' | |
else: | |
return 'this situation' | |
def extract_emotional_context(self, text): | |
"""Extract emotional undertones from original text""" | |
if any(word in text for word in ['hurt', 'pain', 'upset']): | |
return 'hurt' | |
elif any(word in text for word in ['angry', 'mad', 'furious', 'annoying']): | |
return 'frustrated' | |
elif any(word in text for word in ['ignore', 'dismissed', 'unheard']): | |
return 'unheard' | |
elif any(word in text for word in ['disrespect', 'rude']): | |
return 'disrespected' | |
elif any(word in text for word in ['anxious', 'worried', 'stressed']): | |
return 'anxious' | |
else: | |
return 'uncomfortable' | |
"""Infer relationship type from original text""" | |
if any(word in text for word in ['love', 'babe', 'honey', 'relationship', 'partner']): | |
return 'romantic' | |
elif any(word in text for word in ['work', 'meeting', 'boss', 'office', 'colleague']): | |
return 'professional' | |
elif any(word in text for word in ['mom', 'dad', 'family', 'parent', 'sibling']): | |
return 'family' | |
else: | |
return 'general' | |
def extract_situation_context(self, text): | |
"""Extract specific situation being discussed""" | |
if 'interrupt' in text: | |
return 'interrupting during conversations' | |
elif any(word in text for word in ['late', 'time', 'punctual']): | |
return 'time and punctuality issues' | |
elif 'listen' in text or 'hear' in text: | |
return 'feeling unheard in our communication' | |
elif any(word in text for word in ['talk', 'conversation', 'discuss']): | |
return 'our communication patterns' | |
elif 'respect' in text: | |
return 'mutual respect in our interactions' | |
else: | |
return 'this situation' | |
def extract_emotional_context(self, text): | |
"""Extract emotional undertones from original text""" | |
if any(word in text for word in ['hurt', 'pain', 'upset']): | |
return 'hurt' | |
elif any(word in text for word in ['angry', 'mad', 'furious', 'annoying']): | |
return 'frustrated' | |
elif any(word in text for word in ['ignore', 'dismissed', 'unheard']): | |
return 'unheard' | |
elif any(word in text for word in ['disrespect', 'rude']): | |
return 'disrespected' | |
elif any(word in text for word in ['anxious', 'worried', 'stressed']): | |
return 'anxious' | |
else: | |
return 'uncomfortable' | |
def calculate_overall_score(self, analysis): | |
"""Calculate overall boundary health score""" | |
if not analysis: | |
return 85 # Good baseline if no analysis | |
base_score = 100 | |
# Major penalties for severe issues | |
if analysis['is_toxic']: | |
base_score -= 40 # Major penalty for toxic language | |
if not analysis['is_healthy_boundary']: | |
base_score -= 25 # Significant penalty for unhealthy boundaries | |
# Additional penalties based on severity | |
severity_penalty = analysis['severity_score'] * 8 | |
final_score = max(0, base_score - severity_penalty) | |
return final_score | |
def format_analysis_feedback(self, analysis): | |
"""Format the analysis into user-friendly feedback""" | |
if not analysis: | |
return "π‘ **Unable to analyze:** Please try entering your message again.\n" | |
if not analysis['issues']: | |
return "β **Great communication!** No major issues detected. Your boundary setting approach looks healthy.\n" | |
feedback = "" | |
# Address major issues | |
if analysis['is_toxic']: | |
feedback += "β οΈ **Toxic Language Detected**\n\n" | |
feedback += "β’ This communication contains language that may be harmful or aggressive\n" | |
feedback += "β’ Consider focusing on specific behaviors rather than personal attacks\n" | |
feedback += "β’ Using 'I' statements can help express feelings without blame\n\n" | |
if not analysis['is_healthy_boundary']: | |
feedback += "π **Boundary Improvement Needed**\n\n" | |
feedback += "β’ This boundary-setting approach may not be as effective\n" | |
feedback += "β’ Consider focusing on your needs rather than what others should do\n" | |
feedback += "β’ Express feelings without making demands or ultimatums\n\n" | |
# Show fallacy detection | |
if analysis['fallacies_detected']: | |
feedback += "π§ **Logical Issues Detected**\n\n" | |
for fallacy in analysis['fallacies_detected']: | |
fallacy_name = self.fallacy_labels.get(fallacy, fallacy) | |
feedback += f"β’ **{fallacy_name}** detected\n" | |
feedback += "β’ These patterns can make communication less effective\n" | |
feedback += "β’ Focus on specific behaviors rather than general arguments\n\n" | |
# Show emotional understanding | |
if analysis['underlying_needs']['emotions']: | |
feedback += "π **What You Might Be Feeling**\n\n" | |
emotions = analysis['underlying_needs']['emotions'] | |
for emotion_data in emotions: | |
emotion = emotion_data['emotion'] | |
confidence = emotion_data['confidence'] | |
feedback += f"β’ **{emotion.title()}** ({confidence*100:.0f}% confidence)\n" | |
core_needs = list(set(analysis['underlying_needs']['core_needs'])) # Remove duplicates | |
if core_needs: | |
feedback += f"\n**You may need:** {', '.join(core_needs[:3])}\n\n" | |
return feedback | |
def process_boundary_request(self, raw_input): | |
"""Main function using simplified AI analysis""" | |
if not raw_input.strip(): | |
return "Please enter your raw thoughts to get started.", "", "" | |
# Simplified analysis using 3 models | |
analysis = self.analyze_simplified(raw_input) | |
# Format analysis feedback | |
analysis_text = self.format_analysis_feedback(analysis) | |
# Generate improved boundary | |
improved = self.generate_smart_boundary(analysis) | |
# Calculate score for explanation | |
score = self.calculate_overall_score(analysis) | |
# Generate explanation | |
explanation = f""" | |
**Why this works better:** | |
β Uses "I" statements to express feelings | |
β Removes harmful communication patterns | |
β Focuses on needs rather than blame | |
β Creates opportunity for collaborative solution | |
**Original Message Health Score: {score}/100** | |
**Not quite right?** Use the "Custom Boundary Builder" below to create a more personalized boundary. | |
""" | |
return analysis_text, improved, explanation | |
def validate_boundary_smart(self, boundary_text): | |
"""Smart validation using simplified models""" | |
if not boundary_text.strip(): | |
return "Please enter a boundary to check.", 0, [] | |
# Run simplified analysis on the boundary | |
analysis = self.analyze_simplified(boundary_text) | |
score = self.calculate_overall_score(analysis) | |
# Determine rating | |
if analysis and analysis['is_toxic']: | |
rating = "π΄ HARMFUL - Contains Toxic Language" | |
elif score >= 80: | |
rating = "π’ Excellent Boundary!" | |
elif score >= 60: | |
rating = "π΅ Good Boundary" | |
elif score >= 40: | |
rating = "π‘ Fair Boundary" | |
else: | |
rating = "π΄ Needs Improvement" | |
# Format feedback | |
feedback = f"**{rating}** (Score: {score}/100)\n\n" | |
if analysis and analysis['issues']: | |
feedback += self.format_analysis_feedback(analysis) | |
else: | |
feedback += "β **Great boundary!** No significant issues detected.\n" | |
feedback += "β’ Uses respectful communication patterns\n" | |
feedback += "β’ Shows healthy boundary-setting approach\n" | |
feedback += "β’ Free of toxic language patterns\n" | |
return feedback, score, analysis.get('issues', []) if analysis else [] | |
# Custom boundary generation methods (keeping the same) | |
def generate_custom_boundary(self, behavior, relationship, feeling, need, setting, tone): | |
"""Generate a custom boundary based on guided questions""" | |
# Relationship-specific language | |
relationship_language = { | |
'romantic_partner': { | |
'address': 'babe/honey', | |
'approach': 'collaborative', | |
'example': 'I love you and want us to work through this together' | |
}, | |
'friend': { | |
'address': 'friend', | |
'approach': 'direct but caring', | |
'example': 'I value our friendship and want to talk about something' | |
}, | |
'family_member': { | |
'address': 'family', | |
'approach': 'respectful but firm', | |
'example': 'I respect you and need to share something important' | |
}, | |
'coworker': { | |
'address': 'colleague', | |
'approach': 'professional', | |
'example': 'I wanted to discuss our working relationship' | |
}, | |
'boss': { | |
'address': 'professional', | |
'approach': 'formal but assertive', | |
'example': 'I\'d like to discuss some workplace concerns' | |
}, | |
'child': { | |
'address': 'child', | |
'approach': 'teaching moment', | |
'example': 'Let\'s talk about how we treat each other' | |
} | |
} | |
# Setting-specific approaches | |
setting_approaches = { | |
'private_conversation': 'when we\'re alone', | |
'public_space': 'respectfully in public', | |
'work_environment': 'in a professional setting', | |
'family_gathering': 'during family time', | |
'text_message': 'via text', | |
'email': 'in writing' | |
} | |
# Tone variations | |
tone_styles = { | |
'gentle': 'I gently need to share', | |
'firm': 'I need to be clear about', | |
'direct': 'I want to directly address', | |
'diplomatic': 'I\'d like to diplomatically discuss' | |
} | |
rel_info = relationship_language.get(relationship, relationship_language['friend']) | |
setting_phrase = setting_approaches.get(setting, 'in our interactions') | |
tone_phrase = tone_styles.get(tone, 'I need to share') | |
# Generate boundary with customization | |
templates = [ | |
f"{tone_phrase} that I feel {feeling} when {behavior}. I need {need} for our relationship to work well.", | |
f"I want to talk about how I feel {feeling} when {behavior}. Moving forward, I need {need}.", | |
f"I've noticed I feel {feeling} when {behavior} happens. Could we work together so I can have {need}?", | |
f"I need to share that {behavior} makes me feel {feeling}. I'm hoping we can find a way for me to have {need}." | |
] | |
boundary = random.choice(templates) | |
# Add relationship-specific context | |
context = f"\n\n**Tailored for {relationship.replace('_', ' ')}:**\n" | |
context += f"β’ Consider saying this {setting_phrase}\n" | |
context += f"β’ Remember: {rel_info['example']}\n" | |
context += f"β’ Approach: {rel_info['approach']}" | |
return boundary + context | |
# Initialize the simplified assistant | |
assistant = SimplifiedBoundaryAssistant() | |
# Gradio interface functions | |
def analyze_and_improve(raw_input): | |
return assistant.process_boundary_request(raw_input) | |
def check_boundary_smart(boundary_text): | |
feedback, score, issues = assistant.validate_boundary_smart(boundary_text) | |
return feedback | |
def build_custom_boundary(behavior, relationship, feeling, need, setting, tone): | |
if not all([behavior, relationship, feeling, need]): | |
return "Please fill in all required fields to generate your custom boundary." | |
custom_boundary = assistant.generate_custom_boundary(behavior, relationship, feeling, need, setting, tone) | |
return custom_boundary | |
def show_custom_builder(): | |
return gr.update(visible=True) | |
def hide_custom_builder(): | |
return gr.update(visible=False) | |
# Create the Gradio interface | |
with gr.Blocks( | |
theme=gr.themes.Soft(primary_hue="blue", secondary_hue="purple"), | |
title="Smart Boundary Writing Assistant", | |
css=""" | |
.gradio-container { | |
max-width: 1200px !important; | |
} | |
""" | |
) as demo: | |
gr.Markdown( | |
""" | |
# π‘οΈ Smart Boundary Writing Assistant | |
**Powered by 3 essential AI models:** Toxicity Detection, Your Custom Boundary Predictor, and Emotion Recognition. | |
Transform your raw thoughts into healthy, effective boundaries with AI-powered insights. | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("## π Step 1: Enter Your Raw Thoughts") | |
raw_input = gr.Textbox( | |
label="What boundary do you need to set?", | |
placeholder="e.g., 'You always interrupt me and never listen to what I'm saying'", | |
lines=4, | |
info="Don't worry about being perfect - our AI will analyze and improve it!" | |
) | |
analyze_btn = gr.Button("π Analyze with AI Models", variant="primary", size="lg") | |
with gr.Column(scale=1): | |
gr.Markdown("## β Step 2: AI-Generated Boundary") | |
analysis_output = gr.Textbox( | |
label="AI Analysis", | |
lines=6, | |
interactive=False | |
) | |
improved_output = gr.Textbox( | |
label="Your AI-Improved Boundary", | |
lines=3, | |
interactive=False | |
) | |
explanation_output = gr.Textbox( | |
label="Why This Works Better", | |
lines=4, | |
interactive=False | |
) | |
# "Not quite right?" button | |
with gr.Row(): | |
customize_btn = gr.Button("π― Not quite right? Create a custom boundary", variant="secondary", size="sm") | |
gr.Markdown("---") | |
# Hidden Custom Boundary Builder Section (initially hidden) | |
with gr.Column(visible=False) as custom_builder: | |
gr.Markdown("## π― Custom Boundary Builder") | |
gr.Markdown("*Let's create a personalized boundary that feels right for your specific situation.*") | |
with gr.Row(): | |
with gr.Column(): | |
behavior_input = gr.Textbox( | |
label="What specific behavior do you want them to stop/change?", | |
placeholder="e.g., 'interrupting me during meetings', 'showing up late', 'criticizing my appearance'", | |
lines=2 | |
) | |
relationship_input = gr.Dropdown( | |
label="What's your relationship to this person?", | |
choices=[ | |
("Romantic partner", "romantic_partner"), | |
("Friend", "friend"), | |
("Family member", "family_member"), | |
("Coworker", "coworker"), | |
("Boss/Manager", "boss"), | |
("Child", "child") | |
], | |
value="friend" | |
) | |
feeling_input = gr.Dropdown( | |
label="How does this behavior make you feel?", | |
choices=[ | |
"frustrated", "hurt", "disrespected", "unheard", | |
"uncomfortable", "anxious", "angry", "disappointed", | |
"undervalued", "stressed" | |
], | |
value="frustrated" | |
) | |
with gr.Column(): | |
need_input = gr.Textbox( | |
label="What do you need instead?", | |
placeholder="e.g., 'to be heard completely', 'punctuality and respect for my time', 'supportive communication'", | |
lines=2 | |
) | |
setting_input = gr.Dropdown( | |
label="Where/how will you communicate this?", | |
choices=[ | |
("Private conversation", "private_conversation"), | |
("Text message", "text_message"), | |
("Email", "email"), | |
("Work environment", "work_environment"), | |
("Family gathering", "family_gathering"), | |
("Public space", "public_space") | |
], | |
value="private_conversation" | |
) | |
tone_input = gr.Dropdown( | |
label="What tone feels right for you?", | |
choices=[ | |
("Gentle", "gentle"), | |
("Firm", "firm"), | |
("Direct", "direct"), | |
("Diplomatic", "diplomatic") | |
], | |
value="firm" | |
) | |
build_btn = gr.Button("π§ Build My Custom Boundary", variant="primary", size="lg") | |
custom_output = gr.Textbox( | |
label="Your Custom Boundary", | |
lines=8, | |
interactive=False | |
) | |
with gr.Row(): | |
back_btn = gr.Button("β Back to main tool", variant="secondary", size="sm") | |
gr.Markdown("---") | |
# Model status | |
gr.Markdown( | |
""" | |
## π€ AI Models Used | |
This assistant uses **4 essential AI models** for comprehensive analysis: | |
- **π Fallacy Detector** - Identifies logical fallacies and reasoning errors | |
- **π‘οΈ Your Boundary Predictor** - Assesses boundary communication health | |
- **β οΈ Toxicity Detector** - Identifies harmful/toxic language patterns | |
- **π Emotion Recognition** - Recognizes underlying feelings and needs | |
**Result:** Focused analysis that identifies problems AND understands your emotional needs! | |
""" | |
) | |
# Connect the functions | |
analyze_btn.click( | |
fn=analyze_and_improve, | |
inputs=[raw_input], | |
outputs=[analysis_output, improved_output, explanation_output] | |
) | |
# Custom builder show/hide functionality | |
customize_btn.click( | |
fn=show_custom_builder, | |
outputs=[custom_builder] | |
) | |
back_btn.click( | |
fn=hide_custom_builder, | |
outputs=[custom_builder] | |
) | |
build_btn.click( | |
fn=build_custom_boundary, | |
inputs=[behavior_input, relationship_input, feeling_input, need_input, setting_input, tone_input], | |
outputs=[custom_output] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch( | |
share=True, | |
server_name="0.0.0.0", | |
server_port=7860 | |
) |