Spaces:
Running
Running
import streamlit as st | |
from langchain_ollama import ChatOllama | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.prompts import ( | |
SystemMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
AIMessagePromptTemplate, | |
ChatPromptTemplate | |
) | |
st.title("🧠 BrainAI") | |
st.caption("🚀 Your own AI Neurologist with SuperPowers!!") | |
# Initiate chat engine | |
llm_engine = ChatOllama( | |
model="deepseek-r1:1.5b", | |
base_url="http://localhost:11434", | |
temperature=0.3 | |
) | |
# System prompt | |
system_prompt = SystemMessagePromptTemplate.from_template(""" | |
You are BrainAI, an AI-powered neurologist assistant designed to provide non-emergency guidance, education, | |
and support for neurological health. Your expertise includes brain anatomy, neurological disorders (e.g., | |
epilepsy, Alzheimer’s, brain tumors, migraines), symptoms, diagnostics, and general brain health tips. | |
Always prioritize ethical guidelines, clarify your limitations, and emphasize consulting a licensed professional | |
for personal care. | |
""") | |
# Session management | |
if "message_log" not in st.session_state: | |
st.session_state.message_log = [{"role": "assistant", "content": "Hello! How can I assist you with brain health today?"}] | |
# Chat container | |
chat_container = st.container() | |
# Display messages with thinking process handling | |
with chat_container: | |
for message in st.session_state.message_log: | |
with st.chat_message(message["role"]): | |
# Check for thinking tags in the content | |
if "<think>" in message["content"]: | |
# Split response into thinking and actual response | |
parts = message["content"].split("</think>") | |
think_content = parts[0].replace("<think>", "").strip() | |
actual_response = parts[-1].strip() | |
# Add expander for thinking process at the top | |
with st.expander("🔍 View AI's Thinking Process"): | |
st.markdown(f"*Internal Analysis:*\n{think_content}") | |
# Display actual response after expander | |
st.markdown(actual_response) | |
else: | |
st.markdown(message["content"]) | |
# Chat input | |
user_query = st.chat_input(" Message ...") | |
def generate_ai_response(prompt_chain): | |
processing_pipeline = prompt_chain | llm_engine | StrOutputParser() | |
return processing_pipeline.invoke({}) | |
def build_prompt_chain(): | |
prompt_sequence = [system_prompt] | |
for msg in st.session_state.message_log: | |
if msg["role"] == "user": | |
prompt_sequence.append(HumanMessagePromptTemplate.from_template(msg["content"])) | |
elif msg["role"] == "assistant": | |
# Use original content with thinking tags if present | |
prompt_sequence.append(AIMessagePromptTemplate.from_template(msg["content"])) | |
return ChatPromptTemplate.from_messages(prompt_sequence) | |
if user_query: | |
st.session_state.message_log.append({"role": "user", "content": user_query}) | |
with st.spinner("🧠 Thinking ..."): | |
prompt_chain = build_prompt_chain() | |
raw_response = generate_ai_response(prompt_chain) | |
# Store raw response with thinking tags | |
st.session_state.message_log.append({ | |
"role": "assistant", | |
"content": raw_response | |
}) | |
st.rerun() | |