llm-app / app.py
pierre-livetrend's picture
Add debug logging to troubleshoot connection error
d47dfbe
import os
import sys
import traceback
import chainlit as cl
from openai import OpenAI
# Set up basic logging to stdout
def log_message(message):
print(f"DEBUG: {message}", file=sys.stdout)
sys.stdout.flush() # Ensure output is flushed immediately
# Log startup information
log_message("Application starting up")
log_message(f"Python version: {sys.version}")
# Check for API key
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
log_message("API key found (first few chars): " + api_key[:4] + "...")
else:
log_message("WARNING: No API key found in environment")
# Initialize the OpenAI client
try:
client = OpenAI(api_key=api_key)
log_message("OpenAI client initialized successfully")
except Exception as e:
log_message(f"Error initializing OpenAI client: {str(e)}")
traceback.print_exc()
@cl.on_chat_start
async def start():
log_message("New chat session started")
if not api_key:
await cl.Message(content="⚠️ API key not found. Please add OPENAI_API_KEY secret.").send()
@cl.on_message
async def main(message: cl.Message):
log_message(f"Received message: {message.content[:50]}...")
# Simple test response without using OpenAI
if message.content.lower() == "test":
log_message("Sending test response")
await cl.Message(content="Test successful! This is a direct response without using the OpenAI API.").send()
return
try:
log_message("Attempting to call OpenAI API")
# Call OpenAI API
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": message.content}
],
temperature=0.7,
)
log_message("OpenAI API call successful")
# Send response back to user
await cl.Message(
content=response.choices[0].message.content,
).send()
except Exception as e:
log_message(f"Error during API call: {str(e)}")
traceback.print_exc()
# Send error message to user
await cl.Message(
content=f"Sorry, I encountered an error: {str(e)}",
).send()