diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 7415446620..fcde91cdcb 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -443,70 +443,16 @@ def _handle_streaming_response( event=LLMStreamChunkEvent(chunk=chunk_content), ) - # --- 4) Fallback to non-streaming if no content received - if not full_response.strip() and chunk_count == 0: + # --- 4) If no content received or extraction failed, fall back to non-streaming mode + if not full_response.strip(): logging.warning( - "No chunks received in streaming response, falling back to non-streaming" + f"Received {chunk_count} chunks but unable to extract text content. Falling back to non-streaming call." ) non_streaming_params = params.copy() non_streaming_params["stream"] = False - non_streaming_params.pop( - "stream_options", None - ) # Remove stream_options for non-streaming call - return self._handle_non_streaming_response( - non_streaming_params, callbacks, available_functions - ) - - # --- 5) Handle empty response with chunks - if not full_response.strip() and chunk_count > 0: - logging.warning( - f"Received {chunk_count} chunks but no content was extracted" - ) - if last_chunk is not None: - try: - # Try to extract content from the last chunk's message - choices = None - if isinstance(last_chunk, dict) and "choices" in last_chunk: - choices = last_chunk["choices"] - elif hasattr(last_chunk, "choices"): - if not isinstance(getattr(last_chunk, "choices"), type): - choices = getattr(last_chunk, "choices") - - if choices and len(choices) > 0: - choice = choices[0] - - # Try to get content from message - message = None - if isinstance(choice, dict) and "message" in choice: - message = choice["message"] - elif hasattr(choice, "message"): - message = getattr(choice, "message") - - if message: - content = None - if isinstance(message, dict) and "content" in message: - content = message["content"] - elif hasattr(message, "content"): - content = getattr(message, "content") - - if content: - full_response = content - logging.info( - f"Extracted content from last chunk message: {full_response}" - ) - except Exception as e: - logging.debug(f"Error extracting content from last chunk: {e}") - logging.debug( - f"Last chunk format: {type(last_chunk)}, content: {last_chunk}" - ) - - # --- 6) If still empty, raise an error instead of using a default response - if not full_response.strip(): - raise Exception( - "No content received from streaming response. Received empty chunks or failed to extract content." - ) + return self._handle_non_streaming_response(non_streaming_params, callbacks, available_functions) - # --- 7) Check for tool calls in the final response + # --- 5) Check for tool calls in the final response tool_calls = None try: if last_chunk: @@ -534,7 +480,7 @@ def _handle_streaming_response( except Exception as e: logging.debug(f"Error checking for tool calls: {e}") - # --- 8) If no tool calls or no available functions, return the text response directly + # --- 6) If no tool calls or no available functions, return the text response directly if not tool_calls or not available_functions: # Log token usage if available in streaming mode self._handle_streaming_callbacks(callbacks, usage_info, last_chunk) @@ -542,15 +488,15 @@ def _handle_streaming_response( self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL) return full_response - # --- 9) Handle tool calls if present + # --- 7) Handle tool calls if present tool_result = self._handle_tool_call(tool_calls, available_functions) if tool_result is not None: return tool_result - # --- 10) Log token usage if available in streaming mode + # --- 8) Log token usage if available in streaming mode self._handle_streaming_callbacks(callbacks, usage_info, last_chunk) - # --- 11) Emit completion event and return response + # --- 9) Emit completion event and return response self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL) return full_response diff --git a/src/crewai/utilities/events/llm_events.py b/src/crewai/utilities/events/llm_events.py index 07a17a48b7..0d271f41b2 100644 --- a/src/crewai/utilities/events/llm_events.py +++ b/src/crewai/utilities/events/llm_events.py @@ -46,3 +46,4 @@ class LLMStreamChunkEvent(BaseEvent): type: str = "llm_stream_chunk" chunk: str + tool_call: Optional[dict] = None