Log all responses
This commit is contained in:
@@ -75,6 +75,7 @@ class LLMClient:
|
|||||||
|
|
||||||
# If streaming, return the generator directly
|
# If streaming, return the generator directly
|
||||||
if stream:
|
if stream:
|
||||||
|
logger.info(f"Using streaming response from {self.provider_name}")
|
||||||
return self.provider.get_streaming_content(response)
|
return self.provider.get_streaming_content(response)
|
||||||
|
|
||||||
# For non-streaming responses, handle tool calls if present
|
# For non-streaming responses, handle tool calls if present
|
||||||
@@ -96,10 +97,18 @@ class LLMClient:
|
|||||||
messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, tool_results=tool_results, original_response=response
|
messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, tool_results=tool_results, original_response=response
|
||||||
)
|
)
|
||||||
|
|
||||||
return {"content": self.provider.get_content(follow_up_response)}
|
content = self.provider.get_content(follow_up_response)
|
||||||
|
logger.info(f"Final content from {self.provider_name} with tool calls COMPLETE RESPONSE START >>>")
|
||||||
|
logger.info(content)
|
||||||
|
logger.info("<<< COMPLETE RESPONSE END")
|
||||||
|
return {"content": content}
|
||||||
else:
|
else:
|
||||||
logger.info("Response does not contain tool calls")
|
logger.info("Response does not contain tool calls")
|
||||||
return {"content": self.provider.get_content(response)}
|
content = self.provider.get_content(response)
|
||||||
|
logger.info(f"Final content from {self.provider_name} without tool calls COMPLETE RESPONSE START >>>")
|
||||||
|
logger.info(content)
|
||||||
|
logger.info("<<< COMPLETE RESPONSE END")
|
||||||
|
return {"content": content}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"Error in {self.provider_name} API call: {str(e)}\n{traceback.format_exc()}"
|
error_msg = f"Error in {self.provider_name} API call: {str(e)}\n{traceback.format_exc()}"
|
||||||
|
|||||||
@@ -265,6 +265,7 @@ class AnthropicProvider(BaseLLMProvider):
|
|||||||
Returns:
|
Returns:
|
||||||
Generator yielding content chunks
|
Generator yielding content chunks
|
||||||
"""
|
"""
|
||||||
|
logger.info("Starting Anthropic streaming response processing")
|
||||||
|
|
||||||
def generate():
|
def generate():
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
|
|||||||
@@ -213,6 +213,7 @@ class OpenAIProvider(BaseLLMProvider):
|
|||||||
Returns:
|
Returns:
|
||||||
Generator yielding content chunks
|
Generator yielding content chunks
|
||||||
"""
|
"""
|
||||||
|
logger.info("Starting OpenAI streaming response processing")
|
||||||
|
|
||||||
def generate():
|
def generate():
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
"""Views for Airflow Wingman plugin."""
|
"""Views for Airflow Wingman plugin."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
from flask import Response, request, session
|
from flask import Response, request, session
|
||||||
from flask.json import jsonify
|
from flask.json import jsonify
|
||||||
from flask_appbuilder import BaseView as AppBuilderBaseView, expose
|
from flask_appbuilder import BaseView as AppBuilderBaseView, expose
|
||||||
@@ -10,6 +13,9 @@ from airflow_wingman.notes import INTERFACE_MESSAGES
|
|||||||
from airflow_wingman.prompt_engineering import prepare_messages
|
from airflow_wingman.prompt_engineering import prepare_messages
|
||||||
from airflow_wingman.tools import list_airflow_tools
|
from airflow_wingman.tools import list_airflow_tools
|
||||||
|
|
||||||
|
# Create a properly namespaced logger for the Airflow plugin
|
||||||
|
logger = logging.getLogger("airflow.plugins.wingman")
|
||||||
|
|
||||||
|
|
||||||
class WingmanView(AppBuilderBaseView):
|
class WingmanView(AppBuilderBaseView):
|
||||||
"""View for Airflow Wingman plugin."""
|
"""View for Airflow Wingman plugin."""
|
||||||
@@ -50,6 +56,11 @@ class WingmanView(AppBuilderBaseView):
|
|||||||
# Get base URL from models configuration based on provider
|
# Get base URL from models configuration based on provider
|
||||||
base_url = MODELS.get(provider_name, {}).get("endpoint")
|
base_url = MODELS.get(provider_name, {}).get("endpoint")
|
||||||
|
|
||||||
|
# Log the request parameters (excluding API key for security)
|
||||||
|
safe_data = {k: v for k, v in data.items() if k != "api_key"}
|
||||||
|
logger.info(f"Chat request: provider={provider_name}, model={data.get('model')}, stream={data.get('stream')}")
|
||||||
|
logger.info(f"Request parameters: {json.dumps(safe_data)[:200]}...")
|
||||||
|
|
||||||
# Create a new client for this request with the appropriate provider
|
# Create a new client for this request with the appropriate provider
|
||||||
client = LLMClient(provider_name=provider_name, api_key=data["api_key"], base_url=base_url)
|
client = LLMClient(provider_name=provider_name, api_key=data["api_key"], base_url=base_url)
|
||||||
|
|
||||||
@@ -96,24 +107,39 @@ class WingmanView(AppBuilderBaseView):
|
|||||||
def _handle_streaming_response(self, client: LLMClient, data: dict) -> Response:
|
def _handle_streaming_response(self, client: LLMClient, data: dict) -> Response:
|
||||||
"""Handle streaming response."""
|
"""Handle streaming response."""
|
||||||
try:
|
try:
|
||||||
# Get the streaming generator from the client
|
logger.info("Beginning streaming response")
|
||||||
generator = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=True)
|
generator = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=True)
|
||||||
|
|
||||||
def stream_response():
|
def stream_response():
|
||||||
|
complete_response = ""
|
||||||
|
|
||||||
# Send SSE format for each chunk
|
# Send SSE format for each chunk
|
||||||
for chunk in generator:
|
for chunk in generator:
|
||||||
if chunk:
|
if chunk:
|
||||||
yield f"data: {chunk}\n\n"
|
yield f"data: {chunk}\n\n"
|
||||||
|
|
||||||
# Signal end of stream
|
# Log the complete assembled response at the end
|
||||||
|
logger.info("COMPLETE RESPONSE START >>>")
|
||||||
|
logger.info(complete_response)
|
||||||
|
logger.info("<<< COMPLETE RESPONSE END")
|
||||||
|
|
||||||
yield "data: [DONE]\n\n"
|
yield "data: [DONE]\n\n"
|
||||||
|
|
||||||
return Response(stream_response(), mimetype="text/event-stream", headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"})
|
return Response(stream_response(), mimetype="text/event-stream", headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If streaming fails, return error
|
logger.error(f"Streaming error: {str(e)}")
|
||||||
return jsonify({"error": str(e)}), 500
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
def _handle_regular_response(self, client: LLMClient, data: dict) -> Response:
|
def _handle_regular_response(self, client: LLMClient, data: dict) -> Response:
|
||||||
"""Handle regular response."""
|
"""Handle regular response."""
|
||||||
response = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=False)
|
try:
|
||||||
return jsonify(response)
|
logger.info("Beginning regular (non-streaming) response")
|
||||||
|
response = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=False)
|
||||||
|
logger.info("COMPLETE RESPONSE START >>>")
|
||||||
|
logger.info(f"Response to frontend: {json.dumps(response)}")
|
||||||
|
logger.info("<<< COMPLETE RESPONSE END")
|
||||||
|
|
||||||
|
return jsonify(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Regular response error: {str(e)}")
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|||||||
Reference in New Issue
Block a user