Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,15 @@ repos:
args: ["--skip", "tests/models/cassettes/*"]
additional_dependencies:
- tomli

- repo: local
hooks:
- id: clai-help
name: clai help output
entry: uv
args: [run, pytest, "clai/update_readme.py"]
language: system
types_or: [python, markdown]
pass_filenames: false
- id: format
name: Format
entry: make
Expand Down
30 changes: 29 additions & 1 deletion clai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,35 @@ Either way, running `clai` will start an interactive session where you can chat
- `/multiline`: Toggle multiline input mode (use Ctrl+D to submit)
- `/cp`: Copy the last response to clipboard

For full CLI documentation, see the [CLI documentation](https://ai.pydantic.dev/cli/).
## Help

```
usage: clai [-h] [-l] [--version] [-m MODEL] [-a AGENT] [-t CODE_THEME] [--no-stream] [prompt]

Pydantic AI CLI v...

Commands:
web Start a web-based chat interface for an agent

Use "clai web --help" for more information on the web command.

positional arguments:
prompt AI prompt for one-shot mode. If omitted, starts interactive mode.

options:
-h, --help show this help message and exit
-l, --list-models List all available models and exit
--version Show version and exit
-m MODEL, --model MODEL
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-5" or "anthropic:claude-sonnet-4-5". Defaults to "openai:gpt-5".
-a AGENT, --agent AGENT
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
-t CODE_THEME, --code-theme CODE_THEME
Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.
--no-stream Disable streaming from the model
```

For more information on how to use it, see the [CLI documentation](https://ai.pydantic.dev/cli/).

## Web Chat UI

Expand Down
30 changes: 30 additions & 0 deletions clai/update_readme.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import os
import re
import sys
from pathlib import Path

import pytest

from pydantic_ai._cli import cli


@pytest.mark.skipif(sys.version_info >= (3, 13), reason='slightly different output with 3.13')
def test_cli_help(capfd: pytest.CaptureFixture[str]):
"""Check README.md help output matches `clai --help`."""
os.environ['COLUMNS'] = '150'
with pytest.raises(SystemExit):
cli(['--help'], prog_name='clai')

help_output = capfd.readouterr().out.strip()
# TODO change when we reach v1
help_output = re.sub(r'(Pydantic AI CLI v).+', r'\1...', help_output)

this_dir = Path(__file__).parent
readme = this_dir / 'README.md'
content = readme.read_text(encoding='utf-8')

new_content, count = re.subn('^(## Help\n+```).+?```', rf'\1\n{help_output}\n```', content, flags=re.M | re.S)
assert count, 'help section not found'
if new_content != content:
readme.write_text(new_content, encoding='utf-8')
pytest.fail('`clai --help` output changed.')
21 changes: 14 additions & 7 deletions pydantic_ai_slim/pydantic_ai/_cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,12 +122,12 @@ def cli_system_prompt() -> str:
The user is running {sys.platform}."""


def cli_exit(prog_name: str = 'pai'): # pragma: no cover
def cli_exit(prog_name: str = 'clai'): # pragma: no cover
"""Run the CLI and exit."""
sys.exit(cli(prog_name=prog_name))


def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-5') -> int:
def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'clai', default_model: str = 'openai:gpt-5') -> int:
"""Run the CLI and return the exit code for the process."""
# we don't want to autocomplete or list models that don't include the provider,
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
Expand Down Expand Up @@ -180,8 +180,8 @@ def _cli_web(args_list: list[str], prog_name: str, default_model: str, qualified
)
parser.add_argument('--host', default='127.0.0.1', help='Host to bind server (default: 127.0.0.1)')
parser.add_argument('--port', type=int, default=7932, help='Port to bind server (default: 7932)')

argcomplete.autocomplete(parser)
if 'pytest' not in sys.modules: # pragma: no cover
argcomplete.autocomplete(parser)
args = parser.parse_args(args_list)

from .web import run_web_command
Expand All @@ -201,7 +201,14 @@ def _cli_chat(args_list: list[str], prog_name: str, default_model: str, qualifie
"""Handle the chat command (default)."""
parser = argparse.ArgumentParser(
prog=prog_name,
description=f'Pydantic AI CLI v{__version__}',
description=f"""\
Pydantic AI CLI v{__version__}

Commands:
web Start a web-based chat interface for an agent

Use "{prog_name} web --help" for more information on the web command.
""",
formatter_class=argparse.RawTextHelpFormatter,
)

Expand Down Expand Up @@ -237,8 +244,8 @@ def _cli_chat(args_list: list[str], prog_name: str, default_model: str, qualifie
default='dark',
)
parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model')

argcomplete.autocomplete(parser)
if 'pytest' not in sys.modules: # pragma: no cover
argcomplete.autocomplete(parser)
args = parser.parse_args(args_list)

console = Console()
Expand Down
3 changes: 1 addition & 2 deletions tests/models/anthropic/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,9 @@
AnthropicModelFactory = Callable[..., AnthropicModel]


# Model factory fixture for live API tests
@pytest.fixture
def anthropic_model(anthropic_api_key: str) -> AnthropicModelFactory:
"""Factory to create Anthropic models with custom configuration."""
"""Factory to create Anthropic models. Used by VCR-recorded integration tests."""

@cache
def _create_model(
Expand Down
10 changes: 5 additions & 5 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

def test_cli_version(capfd: CaptureFixture[str]):
assert cli(['--version']) == 0
assert capfd.readouterr().out.startswith('pai - Pydantic AI CLI')
assert capfd.readouterr().out.startswith('clai - Pydantic AI CLI')


def test_invalid_model(capfd: CaptureFixture[str]):
Expand Down Expand Up @@ -141,7 +141,7 @@ def test_no_command_defaults_to_chat(mocker: MockerFixture):
def test_list_models(capfd: CaptureFixture[str]):
assert cli(['--list-models']) == 0
output = capfd.readouterr().out.splitlines()
assert output[:3] == snapshot([IsStr(regex='pai - Pydantic AI CLI .*'), '', 'Available models:'])
assert output[:3] == snapshot([IsStr(regex='clai - Pydantic AI CLI .*'), '', 'Available models:'])

providers = (
'openai',
Expand Down Expand Up @@ -274,21 +274,21 @@ def test_code_theme_unset(mocker: MockerFixture, env: TestEnv):
env.set('OPENAI_API_KEY', 'test')
mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
cli([])
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai')
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'clai')


def test_code_theme_light(mocker: MockerFixture, env: TestEnv):
env.set('OPENAI_API_KEY', 'test')
mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
cli(['--code-theme=light'])
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'default', 'pai')
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'default', 'clai')


def test_code_theme_dark(mocker: MockerFixture, env: TestEnv):
env.set('OPENAI_API_KEY', 'test')
mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
cli(['--code-theme=dark'])
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai')
mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'clai')


def test_agent_to_cli_sync(mocker: MockerFixture, env: TestEnv):
Expand Down