diff --git a/src/app.py b/src/app.py index 2bcad71..7fa66f9 100644 --- a/src/app.py +++ b/src/app.py @@ -37,10 +37,12 @@ def handle_user_input(): response = st.session_state.client.get_chat_response(st.session_state.messages) # Handle both MCP and standard OpenAI responses - if hasattr(response, "__iter__"): + # Check if it's NOT a dict (assuming stream is not a dict) + if not isinstance(response, dict): # Standard OpenAI streaming response for chunk in response: - if chunk.choices[0].delta.content: + # Ensure chunk has choices and delta before accessing + if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: full_response += chunk.choices[0].delta.content response_placeholder.markdown(full_response + "▌") else: diff --git a/src/openai_client.py b/src/openai_client.py index 6fbbbab..34868fb 100644 --- a/src/openai_client.py +++ b/src/openai_client.py @@ -63,10 +63,6 @@ class OpenAIClient: raise Exception(error_msg) def _wrap_mcp_response(self, response: dict): - """Convert MCP response to OpenAI-compatible format""" - - # Create a generator to simulate streaming response - def response_generator(): - yield {"choices": [{"delta": {"content": response.get("assistant_text", "")}}]} - - return response_generator() + """Return the MCP response dictionary directly (for non-streaming).""" + # No conversion needed if app.py handles dicts separately + return response