rudra0410hf commited on
Commit
327a025
·
verified ·
1 Parent(s): 97abd2b

Delete brain_ai.py

Browse files
Files changed (1) hide show
  1. brain_ai.py +0 -113
brain_ai.py DELETED
@@ -1,113 +0,0 @@
1
- import streamlit as st
2
- import time
3
- from langchain_ollama import ChatOllama
4
- from langchain_core.output_parsers import StrOutputParser
5
- from langchain_core.prompts import (
6
- SystemMessagePromptTemplate,
7
- HumanMessagePromptTemplate,
8
- AIMessagePromptTemplate,
9
- ChatPromptTemplate
10
- )
11
-
12
- st.title("🧠 BrainAI")
13
- st.caption("🚀 Your own AI Neurologist with SuperPowers!!")
14
-
15
- # Common user query suggestions
16
- suggestions = [
17
- "What are the early symptoms of a brain tumor?",
18
- "How is a brain tumor diagnosed?",
19
- "What are the treatment options for brain tumors?",
20
- "Can a brain tumor be non-cancerous?",
21
- "What lifestyle changes can help manage brain tumors?"
22
- ]
23
-
24
- # Display suggestions in rows and keep them fixed at the top
25
- # st.write("### 💡 Common Questions")
26
- suggestion_container = st.container()
27
- with suggestion_container:
28
- for query in suggestions:
29
- if st.button(query, key=query):
30
- st.session_state["user_input"] = query
31
- st.rerun()
32
-
33
- # Initiate chat engine
34
- llm_engine = ChatOllama(
35
- model="deepseek-r1:1.5b",
36
- base_url="http://localhost:11434",
37
- temperature=0.3
38
- )
39
-
40
- # System prompt
41
- system_prompt = SystemMessagePromptTemplate.from_template("""
42
- You are BrainAI, an AI-powered neurologist assistant designed to provide non-emergency guidance, education,
43
- and support for neurological health. Your expertise includes brain anatomy, neurological disorders (e.g.,
44
- epilepsy, Alzheimer’s, brain tumors, migraines), symptoms, diagnostics, and general brain health tips.
45
- Always prioritize ethical guidelines, clarify your limitations, and emphasize consulting a licensed professional
46
- for personal care. Answer only in English language.
47
- """)
48
-
49
- # Session management
50
- if "message_log" not in st.session_state:
51
- st.session_state.message_log = [{"role": "assistant", "content": "Hello! How can I assist you with brain health today?"}]
52
-
53
- # Chat container
54
- chat_container = st.container()
55
-
56
- # Display messages with animation
57
- def display_text_with_animation(text):
58
- message_placeholder = st.empty()
59
- displayed_text = ""
60
- for char in text:
61
- displayed_text += char
62
- message_placeholder.markdown(displayed_text)
63
- time.sleep(0.01)
64
-
65
- with chat_container:
66
- for message in st.session_state.message_log:
67
- with st.chat_message(message["role"]):
68
- if "<think>" in message["content"]:
69
- parts = message["content"].split("</think>")
70
- think_content = parts[0].replace("<think>", "").strip()
71
- actual_response = parts[-1].strip()
72
-
73
- with st.expander("🔍 View AI's Thinking Process"):
74
- st.markdown(f"*Internal Analysis:*\n{think_content}")
75
-
76
- display_text_with_animation(actual_response)
77
- else:
78
- display_text_with_animation(message["content"])
79
-
80
- # Chat input
81
- user_query = st.chat_input(" Ask anything about brain health ...")
82
-
83
- # If a suggestion was selected, use it as the input
84
- if "user_input" in st.session_state:
85
- user_query = st.session_state["user_input"]
86
- del st.session_state["user_input"]
87
-
88
- def generate_ai_response(prompt_chain):
89
- processing_pipeline = prompt_chain | llm_engine | StrOutputParser()
90
- return processing_pipeline.invoke({})
91
-
92
- def build_prompt_chain():
93
- prompt_sequence = [system_prompt]
94
- for msg in st.session_state.message_log:
95
- if msg["role"] == "user":
96
- prompt_sequence.append(HumanMessagePromptTemplate.from_template(msg["content"]))
97
- elif msg["role"] == "assistant":
98
- prompt_sequence.append(AIMessagePromptTemplate.from_template(msg["content"]))
99
- return ChatPromptTemplate.from_messages(prompt_sequence)
100
-
101
- if user_query:
102
- st.session_state.message_log.append({"role": "user", "content": user_query})
103
-
104
- with st.spinner("🧠 Thinking ..."):
105
- prompt_chain = build_prompt_chain()
106
- raw_response = generate_ai_response(prompt_chain)
107
-
108
- st.session_state.message_log.append({
109
- "role": "assistant",
110
- "content": raw_response
111
- })
112
-
113
- st.rerun()