Skip to content
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 68 additions & 5 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@
from dataclasses import dataclass, field
from inspect import FrameInfo
from pathlib import Path
from typing import Any
from typing import Any, NamedTuple

import httpx
import pytest
from _pytest.mark import ParameterSet
from devtools import debug
from pydantic_core import SchemaValidator, core_schema
from pytest_examples import CodeExample, EvalExample, find_examples
from pytest_examples.config import ExamplesConfig as BaseExamplesConfig
from pytest_mock import MockerFixture
Expand Down Expand Up @@ -43,6 +44,7 @@
from pydantic_ai.models.fallback import FallbackModel
from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel
from pydantic_ai.models.test import TestModel
from pydantic_ai.tools import ToolDefinition

from .conftest import ClientWithHandler, TestEnv, try_import

Expand Down Expand Up @@ -115,7 +117,7 @@ def tmp_path_cwd(tmp_path: Path):
'ignore:`BuiltinToolCallEvent` is deprecated', 'ignore:`BuiltinToolResultEvent` is deprecated'
)
@pytest.mark.parametrize('example', find_filter_examples())
def test_docs_examples(
def test_docs_examples( # noqa: C901
example: CodeExample,
eval_example: EvalExample,
mocker: MockerFixture,
Expand Down Expand Up @@ -147,8 +149,13 @@ def print(self, *args: Any, **kwargs: Any) -> None:

mocker.patch('pydantic_evals.dataset.EvaluationReport', side_effect=CustomEvaluationReport)

mocker.patch('pydantic_ai.mcp.MCPServerSSE', return_value=MockMCPServer())
mocker.patch('pydantic_ai.mcp.MCPServerStreamableHTTP', return_value=MockMCPServer())
def create_mock_mcp_server(*args: Any, **kwargs: Any) -> MockMCPServer:
"""Factory to create new mock instances each time."""
return MockMCPServer(*args, **kwargs)

mocker.patch('pydantic_ai.mcp.MCPServerSSE', side_effect=create_mock_mcp_server)
mocker.patch('pydantic_ai.mcp.MCPServerStreamableHTTP', side_effect=create_mock_mcp_server)
mocker.patch('pydantic_ai.mcp.MCPServerStdio', side_effect=create_mock_mcp_server)
mocker.patch('mcp.server.fastmcp.FastMCP')

env.set('OPENAI_API_KEY', 'testing')
Expand Down Expand Up @@ -300,7 +307,27 @@ def rich_prompt_ask(prompt: str, *_args: Any, **_kwargs: Any) -> str:
raise ValueError(f'Unexpected prompt: {prompt}')


def _create_no_op_validator() -> SchemaValidator:
"""Create a validator that accepts any input."""
return SchemaValidator(schema=core_schema.any_schema())


class MockMCPServer(AbstractToolset[Any]):
# Common tools that tests need
_TOOLS = {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of all of this work to keep MCPServerStdio('python', args=['mcp_server.py']) working, can we change the mcp-run-python example to use mcp_server.py as well?

Remember that the goal of this PR is to get rid of those extra downloads mcp-run-python is doing + the Deno requirement, not to fully mock MCP (it's fine to call the local tests/mcp_server.py server which doesn't download anything)

'echo_deps',
'image_generator',
'return_fruit',
'get_document',
'get_company_logo',
}

def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Accept any arguments that real MCP servers might take."""
# Store for reference but don't need to do anything with them
self._args = args
self._kwargs = kwargs

@property
def id(self) -> str | None:
return None # pragma: no cover
Expand All @@ -316,13 +343,49 @@ async def __aexit__(self, *args: Any) -> None:
pass

async def get_tools(self, ctx: RunContext[Any]) -> dict[str, ToolsetTool[Any]]:
return {}
# Return common tools for tests that need them
tools: dict[str, ToolsetTool[Any]] = {}
for tool_name in self._TOOLS:
tools[tool_name] = ToolsetTool(
toolset=self,
tool_def=ToolDefinition(
name=tool_name,
parameters_json_schema={'type': 'object', 'properties': {}},
),
max_retries=0,
args_validator=_create_no_op_validator(),
)
return tools

async def call_tool(
self, name: str, tool_args: dict[str, Any], ctx: RunContext[Any], tool: ToolsetTool[Any]
) -> Any:
# Return specific values for known tools that tests use
if name == 'echo_deps':
# Return the expected response for echo_deps tool
return {'echo': 'This is an echo message', 'deps': ctx.deps if hasattr(ctx, 'deps') else None}
return None # pragma: lax no cover

async def list_resources(self) -> list[Any]:
"""Mock list_resources returns sample resources."""

class Resource(NamedTuple):
name: str
uri: str
mime_type: str

return [Resource(name='user_name_resource', uri='resource://user_name.txt', mime_type='text/plain')]

async def list_resource_templates(self) -> list[Any]:
"""Mock list_resource_templates."""
return []

async def read_resource(self, uri: str) -> str:
"""Mock read_resource returns sample content."""
if 'user_name' in uri:
return 'Alice'
return ''


text_responses: dict[str, str | ToolCallPart | Sequence[ToolCallPart]] = {
'Use the web to get the current time.': "In San Francisco, it's 8:21:41 pm PDT on Wednesday, August 6, 2025.",
Expand Down
Loading