Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import transformers | |
| import torch | |
| from transformers import pipeline | |
| from datetime import datetime | |
| import os | |
| from huggingface_hub import login | |
| # Get token from environment variable | |
| hf_token = os.getenv("hf_token") | |
| if hf_token is None: | |
| raise ValueError("Please set HF_TOKEN environment variable with your Hugging Face token") | |
| # Login with the token | |
| login(token=hf_token) | |
| # Configuration des modèles | |
| def load_models(): | |
| # Modèle médical | |
| medical_pipe = transformers.pipeline( | |
| "text-generation", | |
| model="ContactDoctor/Bio-Medical-Llama-3-2-1B-CoT-012025", | |
| torch_dtype=torch.bfloat16, | |
| device_map="auto", | |
| ) | |
| # Modèle de traduction | |
| translator = pipeline("translation", model="facebook/nllb-200-distilled-600M") | |
| return medical_pipe, translator | |
| medical_pipe, translator = load_models() | |
| # Message système pour guider le chatbot | |
| system_message = { | |
| "role": "system", | |
| "content": ( | |
| "You are a helpful, respectful, and knowledgeable medical assistant developed by the AI team at AfriAI Solutions, Senegal. " | |
| "Provide brief, clear definitions when answering medical questions. After giving a concise response, ask the user if they would like more information about specific aspects such as symptoms, causes, or treatments. " | |
| "If the user declines further information, respond briefly and ask if there's anything else they need help with." | |
| "Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. " | |
| "Ensure your responses are socially unbiased and positive. If a question does not make sense, explain why instead of providing incorrect information. " | |
| "If you don't know the answer to a question, avoid sharing false information. " | |
| "Encourage users to consult healthcare professionals for accurate diagnoses and personalized advice." | |
| ), | |
| } | |
| max_history = 10 # Limite de l'historique de conversation | |
| # Traduction français -> anglais | |
| def translate_fr_to_en(text): | |
| translated = translator(text, src_lang="fra_Latn", tgt_lang="eng_Latn") | |
| return translated[0]['translation_text'] | |
| # Traduction anglais -> français | |
| def translate_en_to_fr(text): | |
| translated = translator(text, src_lang="eng_Latn", tgt_lang="fra_Latn") | |
| return translated[0]['translation_text'] | |
| # Gestion des salutations et phrases de fermeture | |
| def handle_special_phrases(user_input): | |
| user_input_lower = user_input.lower() | |
| # Salutations | |
| greetings = ["bonjour", "salut", "hello", "hi", "coucou", "bonsoir"] | |
| if any(greet in user_input_lower for greet in greetings): | |
| now = datetime.now() | |
| if 5 <= now.hour < 12: | |
| return "Bonjour ! Je suis l'assistant médical AfriAI. Comment puis-je vous aider aujourd'hui ?" | |
| elif 12 <= now.hour < 18: | |
| return "Bon après-midi ! Je suis l'assistant médical AfriAI. En quoi puis-je vous aider ?" | |
| else: | |
| return "Bonsoir ! Je suis l'assistant médical AfriAI. Comment puis-je vous aider ce soir ?" | |
| # Remerciements | |
| thanks = ["merci", "thank you", "thanks", "je vous remercie"] | |
| if any(thank in user_input_lower for thank in thanks): | |
| return "Je vous en prie ! N'hésitez pas si vous avez d'autres questions médicales." | |
| # Fermeture | |
| goodbyes = ["au revoir", "bye", "goodbye", "à plus", "à bientôt"] | |
| if any(goodbye in user_input_lower for goodbye in goodbyes): | |
| return "Au revoir ! Prenez soin de vous. N'hésitez pas à revenir si vous avez d'autres questions." | |
| return None | |
| # Génération de réponse médicale | |
| def generate_medical_response(messages): | |
| # Création du prompt | |
| prompt = medical_pipe.tokenizer.apply_chat_template( | |
| messages, tokenize=False, add_generation_prompt=False | |
| ) | |
| # Tokens d'arrêt | |
| stop_tokens = [ | |
| medical_pipe.tokenizer.eos_token_id, | |
| medical_pipe.tokenizer.convert_tokens_to_ids("<|eot_id|>"), | |
| ] | |
| # Génération de la réponse | |
| outputs = medical_pipe( | |
| prompt, | |
| max_new_tokens=512, | |
| eos_token_id=stop_tokens, | |
| do_sample=True, | |
| temperature=0.4, | |
| top_k=150, | |
| top_p=0.75, | |
| ) | |
| return outputs[0]["generated_text"][len(prompt):].strip() | |
| # Fonction principale du chatbot | |
| def chat_with_medical_bot(user_input, chat_history): | |
| # Vérifier les phrases spéciales (salutations, remerciements, etc.) | |
| special_response = handle_special_phrases(user_input) | |
| if special_response: | |
| chat_history.append((user_input, special_response)) | |
| return "", chat_history | |
| # Traduire l'entrée utilisateur en anglais | |
| try: | |
| user_input_en = translate_fr_to_en(user_input) | |
| except: | |
| user_input_en = user_input # En cas d'échec, utiliser l'original | |
| # Initialiser l'historique des messages si vide | |
| if not hasattr(chat_with_medical_bot, "messages"): | |
| chat_with_medical_bot.messages = [system_message] | |
| # Ajouter le message utilisateur | |
| chat_with_medical_bot.messages.append({"role": "user", "content": user_input_en}) | |
| # Limiter l'historique | |
| if len(chat_with_medical_bot.messages) > max_history * 2: | |
| chat_with_medical_bot.messages = [system_message] + chat_with_medical_bot.messages[-max_history * 2:] | |
| # Générer la réponse médicale | |
| try: | |
| response_en = generate_medical_response(chat_with_medical_bot.messages) | |
| # Traduire la réponse en français | |
| try: | |
| response_fr = translate_en_to_fr(response_en) | |
| except: | |
| response_fr = response_en # En cas d'échec, utiliser l'original | |
| except Exception as e: | |
| response_fr = f"Désolé, une erreur s'est produite : {str(e)}" | |
| # Ajouter la réponse à l'historique des messages | |
| chat_with_medical_bot.messages.append({"role": "assistant", "content": response_en}) | |
| # Ajouter à l'historique de chat Gradio | |
| chat_history.append((user_input, response_fr)) | |
| return "", chat_history | |
| # CSS personnalisé pour le thème AfriAI Solutions | |
| custom_css = """ | |
| :root { | |
| --primary-color: #2C5F2D; | |
| --secondary-color: #97BC62; | |
| --accent-color: #E0E0E0; | |
| --text-color: #333333; | |
| } | |
| .gradio-container { | |
| font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
| } | |
| #chatbot { | |
| background-color: white; | |
| border-radius: 10px; | |
| box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); | |
| height: 500px; | |
| } | |
| .user, .bot { | |
| padding: 12px 16px; | |
| border-radius: 18px; | |
| margin: 8px 0; | |
| line-height: 1.4; | |
| max-width: 80%; | |
| } | |
| .user { | |
| background-color: var(--secondary-color); | |
| color: white; | |
| margin-left: auto; | |
| border-bottom-right-radius: 4px; | |
| } | |
| .bot { | |
| background-color: var(--accent-color); | |
| color: var(--text-color); | |
| margin-right: auto; | |
| border-bottom-left-radius: 4px; | |
| } | |
| #header { | |
| background-color: var(--primary-color); | |
| color: white; | |
| padding: 20px; | |
| border-radius: 10px 10px 0 0; | |
| text-align: center; | |
| } | |
| #header img { | |
| max-width: 150px; | |
| margin-bottom: 10px; | |
| } | |
| #header h1 { | |
| margin: 0; | |
| font-size: 24px; | |
| } | |
| #header p { | |
| margin: 5px 0 0; | |
| font-size: 16px; | |
| opacity: 0.9; | |
| } | |
| footer { | |
| text-align: center; | |
| padding: 10px; | |
| font-size: 12px; | |
| color: var(--text-color); | |
| opacity: 0.7; | |
| } | |
| #component-0 { | |
| border-radius: 0 0 10px 10px !important; | |
| } | |
| #component-1 { | |
| border-radius: 0 0 10px 10px !important; | |
| } | |
| """ | |
| # Interface Gradio | |
| with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo: | |
| # En-tête avec logo et description | |
| with gr.Column(elem_id="header"): | |
| gr.Markdown(""" | |
| <div style="text-align: center;"> | |
| <img src="https://via.placeholder.com/150x50/2C5F2D/FFFFFF?text=AfriAI+Solutions" alt="AfriAI Solutions Logo"> | |
| <h1>Assistant Médical AfriAI</h1> | |
| <p>Transformez vos questions médicales en solutions claires et précises</p> | |
| </div> | |
| """) | |
| # Zone de chat | |
| chatbot = gr.Chatbot( | |
| label="Conversation", | |
| elem_id="chatbot", | |
| bubble_full_width=False, | |
| avatar_images=( | |
| "https://via.placeholder.com/40/97BC62/FFFFFF?text=VOUS", | |
| "https://via.placeholder.com/40/2C5F2D/FFFFFF?text=AI" | |
| ) | |
| ) | |
| # Zone de saisie | |
| msg = gr.Textbox( | |
| label="Posez votre question médicale en français", | |
| placeholder="Bonjour, quels sont les symptômes du diabète ?", | |
| container=False | |
| ) | |
| # Boutons | |
| with gr.Row(): | |
| clear = gr.ClearButton([msg, chatbot], value="Effacer la conversation") | |
| submit = gr.Button("Envoyer", variant="primary") | |
| # Événements | |
| msg.submit(chat_with_medical_bot, [msg, chatbot], [msg, chatbot]) | |
| submit.click(chat_with_medical_bot, [msg, chatbot], [msg, chatbot]) | |
| # Exemples | |
| gr.Examples( | |
| examples=[ | |
| "Bonjour, quels sont les symptômes du diabète ?", | |
| "Comment traiter une migraine ?", | |
| "Expliquez-moi ce qu'est l'hypertension artérielle", | |
| "Quelles sont les causes de la toux persistante ?" | |
| ], | |
| inputs=msg, | |
| label="Exemples de questions" | |
| ) | |
| # Pied de page | |
| gr.Markdown(""" | |
| <div style="text-align: center; font-size: 12px; color: #666; margin-top: 20px;"> | |
| <p>Cet assistant médical ne remplace pas un avis médical professionnel. Consultez toujours un médecin pour des problèmes de santé sérieux.</p> | |
| <p>© 2024 AfriAI Solutions - Sénégal</p> | |
| </div> | |
| """, elem_id="footer") | |
| # Démarrer l'application | |
| if __name__ == "__main__": | |
| demo.launch() | |