File size: 3,473 Bytes
1ccf66a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import streamlit as st
from langchain_ollama import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
    AIMessagePromptTemplate,
    ChatPromptTemplate
)

st.title("🧠 BrainAI")
st.caption("🚀 Your own AI Neurologist with SuperPowers!!")

# Initiate chat engine
llm_engine = ChatOllama(
    model="deepseek-r1:1.5b",
    base_url="http://localhost:11434",
    temperature=0.3
)

# System prompt
system_prompt = SystemMessagePromptTemplate.from_template("""

    You are BrainAI, an AI-powered neurologist assistant designed to provide non-emergency guidance, education, 

    and support for neurological health. Your expertise includes brain anatomy, neurological disorders (e.g., 

    epilepsy, Alzheimer’s, brain tumors, migraines), symptoms, diagnostics, and general brain health tips. 

    Always prioritize ethical guidelines, clarify your limitations, and emphasize consulting a licensed professional 

    for personal care.

""")

# Session management
if "message_log" not in st.session_state:
    st.session_state.message_log = [{"role": "assistant", "content": "Hello! How can I assist you with brain health today?"}]

# Chat container
chat_container = st.container()

# Display messages with thinking process handling
with chat_container:
    for message in st.session_state.message_log:
        with st.chat_message(message["role"]):
            # Check for thinking tags in the content
            if "<think>" in message["content"]:
                # Split response into thinking and actual response
                parts = message["content"].split("</think>")
                think_content = parts[0].replace("<think>", "").strip()
                actual_response = parts[-1].strip()
                
                # Add expander for thinking process at the top
                with st.expander("🔍 View AI's Thinking Process"):
                    st.markdown(f"*Internal Analysis:*\n{think_content}")
                
                # Display actual response after expander
                st.markdown(actual_response)
            else:
                st.markdown(message["content"])

# Chat input
user_query = st.chat_input(" Message ...")

def generate_ai_response(prompt_chain):
    processing_pipeline = prompt_chain | llm_engine | StrOutputParser()
    return processing_pipeline.invoke({})

def build_prompt_chain():
    prompt_sequence = [system_prompt]
    for msg in st.session_state.message_log:
        if msg["role"] == "user":
            prompt_sequence.append(HumanMessagePromptTemplate.from_template(msg["content"]))
        elif msg["role"] == "assistant":
            # Use original content with thinking tags if present
            prompt_sequence.append(AIMessagePromptTemplate.from_template(msg["content"]))
    return ChatPromptTemplate.from_messages(prompt_sequence)

if user_query:
    st.session_state.message_log.append({"role": "user", "content": user_query})

    with st.spinner("🧠 Thinking ..."):
        prompt_chain = build_prompt_chain()
        raw_response = generate_ai_response(prompt_chain)
        
        # Store raw response with thinking tags
        st.session_state.message_log.append({
            "role": "assistant",
            "content": raw_response
        })

    st.rerun()