Fix indentation error in processing flow
Browse files- Fix IndentationError at line 862 in app.py
- Ensure proper indentation throughout processing block
- Prevent syntax errors from breaking the app
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <[email protected]>
app.py
CHANGED
@@ -859,54 +859,55 @@ if st.session_state.get("processing"):
|
|
859 |
}
|
860 |
</style>
|
861 |
""", unsafe_allow_html=True)
|
862 |
-
|
863 |
-
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
|
868 |
-
if not isinstance(response, dict):
|
869 |
-
response = {
|
870 |
-
"role": "assistant",
|
871 |
-
"content": "Error: Invalid response format",
|
872 |
-
"gen_code": "",
|
873 |
-
"ex_code": "",
|
874 |
-
"last_prompt": prompt,
|
875 |
-
"error": "Invalid response format",
|
876 |
-
"timestamp": datetime.now().strftime("%H:%M")
|
877 |
-
}
|
878 |
-
|
879 |
-
response.setdefault("role", "assistant")
|
880 |
-
response.setdefault("content", "No content generated")
|
881 |
-
response.setdefault("gen_code", "")
|
882 |
-
response.setdefault("ex_code", "")
|
883 |
-
response.setdefault("last_prompt", prompt)
|
884 |
-
response.setdefault("error", None)
|
885 |
-
response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
|
886 |
|
887 |
-
|
888 |
response = {
|
889 |
"role": "assistant",
|
890 |
-
"content":
|
891 |
"gen_code": "",
|
892 |
"ex_code": "",
|
893 |
"last_prompt": prompt,
|
894 |
-
"error":
|
895 |
"timestamp": datetime.now().strftime("%H:%M")
|
896 |
}
|
897 |
-
|
898 |
-
st.session_state.responses.append(response)
|
899 |
-
st.session_state["last_prompt"] = prompt
|
900 |
-
st.session_state["last_model_name"] = model_name
|
901 |
-
st.session_state.processing = False
|
902 |
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
|
|
|
|
908 |
|
909 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
910 |
|
911 |
# Close chat container
|
912 |
st.markdown("</div>", unsafe_allow_html=True)
|
|
|
859 |
}
|
860 |
</style>
|
861 |
""", unsafe_allow_html=True)
|
862 |
+
|
863 |
+
prompt = st.session_state.get("current_question")
|
864 |
+
model_name = st.session_state.get("current_model")
|
865 |
+
|
866 |
+
try:
|
867 |
+
response = ask_question(model_name=model_name, question=prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
868 |
|
869 |
+
if not isinstance(response, dict):
|
870 |
response = {
|
871 |
"role": "assistant",
|
872 |
+
"content": "Error: Invalid response format",
|
873 |
"gen_code": "",
|
874 |
"ex_code": "",
|
875 |
"last_prompt": prompt,
|
876 |
+
"error": "Invalid response format",
|
877 |
"timestamp": datetime.now().strftime("%H:%M")
|
878 |
}
|
|
|
|
|
|
|
|
|
|
|
879 |
|
880 |
+
response.setdefault("role", "assistant")
|
881 |
+
response.setdefault("content", "No content generated")
|
882 |
+
response.setdefault("gen_code", "")
|
883 |
+
response.setdefault("ex_code", "")
|
884 |
+
response.setdefault("last_prompt", prompt)
|
885 |
+
response.setdefault("error", None)
|
886 |
+
response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
|
887 |
|
888 |
+
except Exception as e:
|
889 |
+
response = {
|
890 |
+
"role": "assistant",
|
891 |
+
"content": f"Sorry, I encountered an error: {str(e)}",
|
892 |
+
"gen_code": "",
|
893 |
+
"ex_code": "",
|
894 |
+
"last_prompt": prompt,
|
895 |
+
"error": str(e),
|
896 |
+
"timestamp": datetime.now().strftime("%H:%M")
|
897 |
+
}
|
898 |
+
|
899 |
+
st.session_state.responses.append(response)
|
900 |
+
st.session_state["last_prompt"] = prompt
|
901 |
+
st.session_state["last_model_name"] = model_name
|
902 |
+
st.session_state.processing = False
|
903 |
+
|
904 |
+
# Clear processing state
|
905 |
+
if "current_model" in st.session_state:
|
906 |
+
del st.session_state.current_model
|
907 |
+
if "current_question" in st.session_state:
|
908 |
+
del st.session_state.current_question
|
909 |
+
|
910 |
+
st.rerun()
|
911 |
|
912 |
# Close chat container
|
913 |
st.markdown("</div>", unsafe_allow_html=True)
|