diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4aa75e1f3f..8b807012ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,9 +27,15 @@ repos: args: ["--skip", "tests/models/cassettes/*"] additional_dependencies: - tomli - - repo: local hooks: + - id: clai-help + name: clai help output + entry: uv + args: [run, pytest, "clai/update_readme.py"] + language: system + types_or: [python, markdown] + pass_filenames: false - id: format name: Format entry: make diff --git a/clai/README.md b/clai/README.md index 99f34c2103..4c5d3c1e3d 100644 --- a/clai/README.md +++ b/clai/README.md @@ -51,7 +51,34 @@ Either way, running `clai` will start an interactive session where you can chat - `/multiline`: Toggle multiline input mode (use Ctrl+D to submit) - `/cp`: Copy the last response to clipboard -For full CLI documentation, see the [CLI documentation](https://ai.pydantic.dev/cli/). +## Help + +``` +usage: clai [-h] [-l] [--version] [-m MODEL] [-a AGENT] [-t CODE_THEME] [--no-stream] [prompt] + +Pydantic AI CLI v... + +subcommands: + web Start a web-based chat interface for an agent + Run "clai web --help" for more information + +positional arguments: + prompt AI prompt for one-shot mode. If omitted, starts interactive mode. + +options: + -h, --help show this help message and exit + -l, --list-models List all available models and exit + --version Show version and exit + -m MODEL, --model MODEL + Model to use, in format ":" e.g. "openai:gpt-5" or "anthropic:claude-sonnet-4-5". Defaults to "openai:gpt-5". + -a AGENT, --agent AGENT + Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent" + -t CODE_THEME, --code-theme CODE_THEME + Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals. + --no-stream Disable streaming from the model +``` + +For more information on how to use it, see the [CLI documentation](https://ai.pydantic.dev/cli/). ## Web Chat UI diff --git a/clai/update_readme.py b/clai/update_readme.py new file mode 100644 index 0000000000..6ebf5dd3ba --- /dev/null +++ b/clai/update_readme.py @@ -0,0 +1,29 @@ +import os +import re +import sys +from pathlib import Path + +import pytest + +from pydantic_ai._cli import cli + + +@pytest.mark.skipif(sys.version_info >= (3, 13), reason='slightly different output with 3.13') +def test_cli_help(capfd: pytest.CaptureFixture[str]): + """Check README.md help output matches `clai --help`.""" + os.environ['COLUMNS'] = '150' + with pytest.raises(SystemExit): + cli(['--help'], prog_name='clai') + + help_output = capfd.readouterr().out.strip() + help_output = re.sub(r'(Pydantic AI CLI v).+', r'\1...', help_output) + + this_dir = Path(__file__).parent + readme = this_dir / 'README.md' + content = readme.read_text(encoding='utf-8') + + new_content, count = re.subn('^(## Help\n+```).+?```', rf'\1\n{help_output}\n```', content, flags=re.M | re.S) + assert count, 'help section not found' + if new_content != content: + readme.write_text(new_content, encoding='utf-8') + pytest.fail('`clai --help` output changed.') diff --git a/pydantic_ai_slim/pydantic_ai/_cli/__init__.py b/pydantic_ai_slim/pydantic_ai/_cli/__init__.py index 956e0551a2..51d14ab9f7 100644 --- a/pydantic_ai_slim/pydantic_ai/_cli/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/_cli/__init__.py @@ -122,12 +122,12 @@ def cli_system_prompt() -> str: The user is running {sys.platform}.""" -def cli_exit(prog_name: str = 'pai'): # pragma: no cover +def cli_exit(prog_name: str = 'clai'): # pragma: no cover """Run the CLI and exit.""" sys.exit(cli(prog_name=prog_name)) -def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-5') -> int: +def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'clai', default_model: str = 'openai:gpt-5') -> int: """Run the CLI and return the exit code for the process.""" # we don't want to autocomplete or list models that don't include the provider, # e.g. we want to show `openai:gpt-4o` but not `gpt-4o` @@ -180,7 +180,6 @@ def _cli_web(args_list: list[str], prog_name: str, default_model: str, qualified ) parser.add_argument('--host', default='127.0.0.1', help='Host to bind server (default: 127.0.0.1)') parser.add_argument('--port', type=int, default=7932, help='Port to bind server (default: 7932)') - argcomplete.autocomplete(parser) args = parser.parse_args(args_list) @@ -201,7 +200,13 @@ def _cli_chat(args_list: list[str], prog_name: str, default_model: str, qualifie """Handle the chat command (default).""" parser = argparse.ArgumentParser( prog=prog_name, - description=f'Pydantic AI CLI v{__version__}', + description=f"""\ +Pydantic AI CLI v{__version__} + +subcommands: + web Start a web-based chat interface for an agent + Run "clai web --help" for more information +""", formatter_class=argparse.RawTextHelpFormatter, ) @@ -237,7 +242,6 @@ def _cli_chat(args_list: list[str], prog_name: str, default_model: str, qualifie default='dark', ) parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model') - argcomplete.autocomplete(parser) args = parser.parse_args(args_list) diff --git a/tests/models/anthropic/conftest.py b/tests/models/anthropic/conftest.py index 6edb7d19f2..1444c9cd30 100644 --- a/tests/models/anthropic/conftest.py +++ b/tests/models/anthropic/conftest.py @@ -21,10 +21,9 @@ AnthropicModelFactory = Callable[..., AnthropicModel] -# Model factory fixture for live API tests @pytest.fixture def anthropic_model(anthropic_api_key: str) -> AnthropicModelFactory: - """Factory to create Anthropic models with custom configuration.""" + """Factory to create Anthropic models. Used by VCR-recorded integration tests.""" @cache def _create_model( diff --git a/tests/test_cli.py b/tests/test_cli.py index 823d12e94e..5f17a6a0b6 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -31,7 +31,7 @@ def test_cli_version(capfd: CaptureFixture[str]): assert cli(['--version']) == 0 - assert capfd.readouterr().out.startswith('pai - Pydantic AI CLI') + assert capfd.readouterr().out.startswith('clai - Pydantic AI CLI') def test_invalid_model(capfd: CaptureFixture[str]): @@ -141,7 +141,7 @@ def test_no_command_defaults_to_chat(mocker: MockerFixture): def test_list_models(capfd: CaptureFixture[str]): assert cli(['--list-models']) == 0 output = capfd.readouterr().out.splitlines() - assert output[:3] == snapshot([IsStr(regex='pai - Pydantic AI CLI .*'), '', 'Available models:']) + assert output[:3] == snapshot([IsStr(regex='clai - Pydantic AI CLI .*'), '', 'Available models:']) providers = ( 'openai', @@ -274,21 +274,21 @@ def test_code_theme_unset(mocker: MockerFixture, env: TestEnv): env.set('OPENAI_API_KEY', 'test') mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat') cli([]) - mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai') + mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'clai') def test_code_theme_light(mocker: MockerFixture, env: TestEnv): env.set('OPENAI_API_KEY', 'test') mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat') cli(['--code-theme=light']) - mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'default', 'pai') + mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'default', 'clai') def test_code_theme_dark(mocker: MockerFixture, env: TestEnv): env.set('OPENAI_API_KEY', 'test') mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat') cli(['--code-theme=dark']) - mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai') + mock_run_chat.assert_awaited_once_with(True, IsInstance(Agent), IsInstance(Console), 'monokai', 'clai') def test_agent_to_cli_sync(mocker: MockerFixture, env: TestEnv):