rudra0410hf commited on
Commit
fedd0b6
·
verified ·
1 Parent(s): 327a025

Delete test.py

Browse files
Files changed (1) hide show
  1. test.py +0 -88
test.py DELETED
@@ -1,88 +0,0 @@
1
- import streamlit as st
2
- from langchain_ollama import ChatOllama
3
- from langchain_core.output_parsers import StrOutputParser
4
- from langchain_core.prompts import (
5
- SystemMessagePromptTemplate,
6
- HumanMessagePromptTemplate,
7
- AIMessagePromptTemplate,
8
- ChatPromptTemplate
9
- )
10
-
11
- st.title("🧠 BrainAI")
12
- st.caption("🚀 Your own AI Neurologist with SuperPowers!!")
13
-
14
- # Initiate chat engine
15
- llm_engine = ChatOllama(
16
- model="deepseek-r1:1.5b",
17
- base_url="http://localhost:11434",
18
- temperature=0.3
19
- )
20
-
21
- # System prompt
22
- system_prompt = SystemMessagePromptTemplate.from_template("""
23
- You are BrainAI, an AI-powered neurologist assistant designed to provide non-emergency guidance, education,
24
- and support for neurological health. Your expertise includes brain anatomy, neurological disorders (e.g.,
25
- epilepsy, Alzheimer’s, brain tumors, migraines), symptoms, diagnostics, and general brain health tips.
26
- Always prioritize ethical guidelines, clarify your limitations, and emphasize consulting a licensed professional
27
- for personal care.
28
- """)
29
-
30
- # Session management
31
- if "message_log" not in st.session_state:
32
- st.session_state.message_log = [{"role": "assistant", "content": "Hello! How can I assist you with brain health today?"}]
33
-
34
- # Chat container
35
- chat_container = st.container()
36
-
37
- # Display messages with thinking process handling
38
- with chat_container:
39
- for message in st.session_state.message_log:
40
- with st.chat_message(message["role"]):
41
- # Check for thinking tags in the content
42
- if "<think>" in message["content"]:
43
- # Split response into thinking and actual response
44
- parts = message["content"].split("</think>")
45
- think_content = parts[0].replace("<think>", "").strip()
46
- actual_response = parts[-1].strip()
47
-
48
- # Add expander for thinking process at the top
49
- with st.expander("🔍 View AI's Thinking Process"):
50
- st.markdown(f"*Internal Analysis:*\n{think_content}")
51
-
52
- # Display actual response after expander
53
- st.markdown(actual_response)
54
- else:
55
- st.markdown(message["content"])
56
-
57
- # Chat input
58
- user_query = st.chat_input(" Message ...")
59
-
60
- def generate_ai_response(prompt_chain):
61
- processing_pipeline = prompt_chain | llm_engine | StrOutputParser()
62
- return processing_pipeline.invoke({})
63
-
64
- def build_prompt_chain():
65
- prompt_sequence = [system_prompt]
66
- for msg in st.session_state.message_log:
67
- if msg["role"] == "user":
68
- prompt_sequence.append(HumanMessagePromptTemplate.from_template(msg["content"]))
69
- elif msg["role"] == "assistant":
70
- # Use original content with thinking tags if present
71
- prompt_sequence.append(AIMessagePromptTemplate.from_template(msg["content"]))
72
- return ChatPromptTemplate.from_messages(prompt_sequence)
73
-
74
- if user_query:
75
- st.session_state.message_log.append({"role": "user", "content": user_query})
76
-
77
- with st.spinner("🧠 Thinking ..."):
78
- prompt_chain = build_prompt_chain()
79
- raw_response = generate_ai_response(prompt_chain)
80
-
81
- # Store raw response with thinking tags
82
- st.session_state.message_log.append({
83
- "role": "assistant",
84
- "content": raw_response
85
- })
86
-
87
- st.rerun()
88
-