fix: improve error handling and logging in OpenAI client and chat message processing
This commit is contained in:
31
src/app.py
31
src/app.py
@@ -12,22 +12,29 @@ def display_chat_messages():
|
||||
|
||||
def handle_user_input():
|
||||
if prompt := st.chat_input("Type your message..."):
|
||||
print(f"User input received: {prompt}") # Debug log
|
||||
st.session_state.messages.append({"role": "user", "content": prompt})
|
||||
with st.chat_message("user"):
|
||||
st.markdown(prompt)
|
||||
|
||||
with st.chat_message("assistant"):
|
||||
response_placeholder = st.empty()
|
||||
full_response = ""
|
||||
|
||||
client = OpenAIClient()
|
||||
for chunk in client.get_chat_response(st.session_state.messages):
|
||||
if chunk.choices[0].delta.content:
|
||||
full_response += chunk.choices[0].delta.content
|
||||
response_placeholder.markdown(full_response + "▌")
|
||||
|
||||
response_placeholder.markdown(full_response)
|
||||
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
||||
try:
|
||||
with st.chat_message("assistant"):
|
||||
response_placeholder = st.empty()
|
||||
full_response = ""
|
||||
|
||||
client = OpenAIClient()
|
||||
print("Calling OpenAI API...") # Debug log
|
||||
for chunk in client.get_chat_response(st.session_state.messages):
|
||||
if chunk.choices[0].delta.content:
|
||||
full_response += chunk.choices[0].delta.content
|
||||
response_placeholder.markdown(full_response + "▌")
|
||||
|
||||
response_placeholder.markdown(full_response)
|
||||
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
||||
print("API call completed successfully") # Debug log
|
||||
except Exception as e:
|
||||
st.error(f"Error processing message: {str(e)}")
|
||||
print(f"Error details: {str(e)}") # Debug log
|
||||
|
||||
def main():
|
||||
st.title("Streamlit Chat App")
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
import configparser
|
||||
from openai import OpenAI
|
||||
|
||||
@@ -7,29 +6,34 @@ class OpenAIClient:
|
||||
self.config = configparser.ConfigParser()
|
||||
self.config.read('config/config.ini')
|
||||
|
||||
# Configure OpenAI client with OpenRouter-specific headers
|
||||
# Validate configuration
|
||||
if not self.config.has_section('openai'):
|
||||
raise Exception("Missing [openai] section in config.ini")
|
||||
if not self.config['openai'].get('api_key'):
|
||||
raise Exception("Missing api_key in config.ini")
|
||||
|
||||
# Configure OpenAI client
|
||||
self.client = OpenAI(
|
||||
api_key=self.config['openai']['api_key'],
|
||||
base_url=self.config['openai']['base_url'],
|
||||
default_headers={
|
||||
"HTTP-Referer": "https://streamlit-chat-app.com", # Required by OpenRouter
|
||||
"X-Title": "Streamlit Chat App" # Optional, helps with analytics
|
||||
"HTTP-Referer": "https://streamlit-chat-app.com",
|
||||
"X-Title": "Streamlit Chat App"
|
||||
}
|
||||
)
|
||||
|
||||
def get_chat_response(self, messages):
|
||||
try:
|
||||
# Ensure messages are correctly formatted
|
||||
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
||||
print(f"Sending request to {self.config['openai']['base_url']}") # Debug log
|
||||
print(f"Using model: {self.config['openai']['model']}") # Debug log
|
||||
|
||||
# Make API request
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.config['openai']['model'],
|
||||
messages=formatted_messages,
|
||||
messages=messages,
|
||||
stream=True
|
||||
)
|
||||
return response
|
||||
except Exception as e:
|
||||
# Enhanced error logging
|
||||
print(f"Error details: {e}")
|
||||
raise Exception(f"OpenAI API error: {str(e)}")
|
||||
error_msg = f"API Error (Code: {getattr(e, 'code', 'N/A')}): {str(e)}"
|
||||
print(error_msg) # Debug log
|
||||
raise Exception(error_msg)
|
||||
|
||||
Reference in New Issue
Block a user