Skip to content

Fix #2740: Prevent IndexError in ollama_pt() with empty messages #2741

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 31 additions & 5 deletions src/crewai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,23 @@ def _is_anthropic_model(self, model: str) -> bool:
ANTHROPIC_PREFIXES = ("anthropic/", "claude-", "claude/")
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)

def _validate_messages(
self,
messages: Union[str, List[Dict[str, str]], None]
) -> None:
"""Validate that messages list is not empty or None.

Args:
messages: Input messages for the LLM

Raises:
ValueError: If messages is None or an empty list
"""
if messages is None:
raise ValueError("Messages list cannot be empty. At least one message is required.")
if isinstance(messages, list) and len(messages) == 0:
raise ValueError("Messages list cannot be empty. At least one message is required.")

def _prepare_completion_params(
self,
messages: Union[str, List[Dict[str, str]]],
Expand All @@ -337,8 +354,14 @@ def _prepare_completion_params(

Returns:
Dict[str, Any]: Parameters for the completion call

Raises:
ValueError: If messages is None or an empty list
"""
# --- 1) Format messages according to provider requirements
# --- 1) Ensure messages list is not empty
self._validate_messages(messages)

# --- 2) Format messages according to provider requirements
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
formatted_messages = self._format_messages_for_provider(messages)
Expand Down Expand Up @@ -842,10 +865,13 @@ def call(

Raises:
TypeError: If messages format is invalid
ValueError: If response format is not supported
ValueError: If messages is None or an empty list, or if response format is not supported
LLMContextLengthExceededException: If input exceeds model's context limit
"""
# --- 1) Emit call started event
# --- 1) Validate messages is not None or empty to prevent IndexError in LiteLLM's ollama_pt()
self._validate_messages(messages)

# --- 2) Emit call started event
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
Expand All @@ -857,10 +883,10 @@ def call(
),
)

# --- 2) Validate parameters before proceeding with the call
# --- 3) Validate parameters before proceeding with the call
self._validate_call_params()

# --- 3) Convert string messages to proper format if needed
# --- 4) Convert string messages to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]

Expand Down
59 changes: 59 additions & 0 deletions tests/test_empty_messages.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from unittest.mock import patch, MagicMock

import pytest

from crewai.llm import LLM


@patch('crewai.llm.LLM._prepare_completion_params')
def test_empty_messages_validation(mock_prepare):
"""
Test that LLM.call() raises a ValueError when an empty messages list is passed.
This prevents the IndexError in LiteLLM's ollama_pt() function.
"""
llm = LLM(model="gpt-3.5-turbo") # Any model will do for this test

with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=[])

with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=None)

mock_prepare.assert_not_called()


@patch('crewai.llm.LLM._prepare_completion_params')
def test_empty_string_message(mock_prepare):
"""
Test that LLM.call() raises a ValueError when an empty string message is passed.
"""
llm = LLM(model="gpt-3.5-turbo")

with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages="")

mock_prepare.assert_not_called()


@patch('crewai.llm.LLM._prepare_completion_params')
def test_invalid_message_format(mock_prepare):
"""
Test that LLM.call() raises a TypeError when a message with invalid format is passed.
"""
mock_prepare.side_effect = TypeError("Invalid message format")
llm = LLM(model="gpt-3.5-turbo")

with pytest.raises(TypeError, match="Invalid message format"):
llm.call(messages=[{}])


@pytest.mark.vcr(filter_headers=["authorization"])
def test_ollama_model_empty_messages():
"""
Test that LLM.call() with an Ollama model raises a ValueError
when an empty messages list is passed.
"""
llm = LLM(model="ollama/llama3")

with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=[])
Loading