From 86f70170469cb961fb085adc443c0f1305a1a3b8 Mon Sep 17 00:00:00 2001 From: abhishekbhakat Date: Wed, 26 Feb 2025 20:33:43 +0000 Subject: [PATCH] Log all responses --- src/airflow_wingman/llm_client.py | 13 +++++-- .../providers/anthropic_provider.py | 1 + .../providers/openai_provider.py | 1 + src/airflow_wingman/views.py | 36 ++++++++++++++++--- 4 files changed, 44 insertions(+), 7 deletions(-) diff --git a/src/airflow_wingman/llm_client.py b/src/airflow_wingman/llm_client.py index 40d1260..727991b 100644 --- a/src/airflow_wingman/llm_client.py +++ b/src/airflow_wingman/llm_client.py @@ -75,6 +75,7 @@ class LLMClient: # If streaming, return the generator directly if stream: + logger.info(f"Using streaming response from {self.provider_name}") return self.provider.get_streaming_content(response) # For non-streaming responses, handle tool calls if present @@ -96,10 +97,18 @@ class LLMClient: messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, tool_results=tool_results, original_response=response ) - return {"content": self.provider.get_content(follow_up_response)} + content = self.provider.get_content(follow_up_response) + logger.info(f"Final content from {self.provider_name} with tool calls COMPLETE RESPONSE START >>>") + logger.info(content) + logger.info("<<< COMPLETE RESPONSE END") + return {"content": content} else: logger.info("Response does not contain tool calls") - return {"content": self.provider.get_content(response)} + content = self.provider.get_content(response) + logger.info(f"Final content from {self.provider_name} without tool calls COMPLETE RESPONSE START >>>") + logger.info(content) + logger.info("<<< COMPLETE RESPONSE END") + return {"content": content} except Exception as e: error_msg = f"Error in {self.provider_name} API call: {str(e)}\n{traceback.format_exc()}" diff --git a/src/airflow_wingman/providers/anthropic_provider.py b/src/airflow_wingman/providers/anthropic_provider.py index 666c6d5..d18dd1f 100644 --- a/src/airflow_wingman/providers/anthropic_provider.py +++ b/src/airflow_wingman/providers/anthropic_provider.py @@ -265,6 +265,7 @@ class AnthropicProvider(BaseLLMProvider): Returns: Generator yielding content chunks """ + logger.info("Starting Anthropic streaming response processing") def generate(): for chunk in response: diff --git a/src/airflow_wingman/providers/openai_provider.py b/src/airflow_wingman/providers/openai_provider.py index bfcc7dd..512970d 100644 --- a/src/airflow_wingman/providers/openai_provider.py +++ b/src/airflow_wingman/providers/openai_provider.py @@ -213,6 +213,7 @@ class OpenAIProvider(BaseLLMProvider): Returns: Generator yielding content chunks """ + logger.info("Starting OpenAI streaming response processing") def generate(): for chunk in response: diff --git a/src/airflow_wingman/views.py b/src/airflow_wingman/views.py index fcf3bff..599c4bd 100644 --- a/src/airflow_wingman/views.py +++ b/src/airflow_wingman/views.py @@ -1,5 +1,8 @@ """Views for Airflow Wingman plugin.""" +import json +import logging + from flask import Response, request, session from flask.json import jsonify from flask_appbuilder import BaseView as AppBuilderBaseView, expose @@ -10,6 +13,9 @@ from airflow_wingman.notes import INTERFACE_MESSAGES from airflow_wingman.prompt_engineering import prepare_messages from airflow_wingman.tools import list_airflow_tools +# Create a properly namespaced logger for the Airflow plugin +logger = logging.getLogger("airflow.plugins.wingman") + class WingmanView(AppBuilderBaseView): """View for Airflow Wingman plugin.""" @@ -50,6 +56,11 @@ class WingmanView(AppBuilderBaseView): # Get base URL from models configuration based on provider base_url = MODELS.get(provider_name, {}).get("endpoint") + # Log the request parameters (excluding API key for security) + safe_data = {k: v for k, v in data.items() if k != "api_key"} + logger.info(f"Chat request: provider={provider_name}, model={data.get('model')}, stream={data.get('stream')}") + logger.info(f"Request parameters: {json.dumps(safe_data)[:200]}...") + # Create a new client for this request with the appropriate provider client = LLMClient(provider_name=provider_name, api_key=data["api_key"], base_url=base_url) @@ -96,24 +107,39 @@ class WingmanView(AppBuilderBaseView): def _handle_streaming_response(self, client: LLMClient, data: dict) -> Response: """Handle streaming response.""" try: - # Get the streaming generator from the client + logger.info("Beginning streaming response") generator = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=True) def stream_response(): + complete_response = "" + # Send SSE format for each chunk for chunk in generator: if chunk: yield f"data: {chunk}\n\n" - # Signal end of stream + # Log the complete assembled response at the end + logger.info("COMPLETE RESPONSE START >>>") + logger.info(complete_response) + logger.info("<<< COMPLETE RESPONSE END") + yield "data: [DONE]\n\n" return Response(stream_response(), mimetype="text/event-stream", headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}) except Exception as e: - # If streaming fails, return error + logger.error(f"Streaming error: {str(e)}") return jsonify({"error": str(e)}), 500 def _handle_regular_response(self, client: LLMClient, data: dict) -> Response: """Handle regular response.""" - response = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=False) - return jsonify(response) + try: + logger.info("Beginning regular (non-streaming) response") + response = client.chat_completion(messages=data["messages"], model=data["model"], temperature=data["temperature"], max_tokens=data["max_tokens"], stream=False) + logger.info("COMPLETE RESPONSE START >>>") + logger.info(f"Response to frontend: {json.dumps(response)}") + logger.info("<<< COMPLETE RESPONSE END") + + return jsonify(response) + except Exception as e: + logger.error(f"Regular response error: {str(e)}") + return jsonify({"error": str(e)}), 500