diff --git a/.gitignore b/.gitignore index 4654cf7..4dcf45f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ __pycache__/ .mypy_cache/ .coverage htmlcov/ +build/ diff --git a/README.md b/README.md index cd96e54..9efdf3a 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ nano ~/.pretty-release-notes/config.toml ### Configuration Format -The configuration file uses TOML format with sections for GitHub credentials, OpenAI settings, database caching, and filters. See [`config.toml.example`](config.toml.example) for the complete structure and all available options. +The configuration file uses TOML format with sections for GitHub credentials, LLM settings, database caching, and filters. The canonical section name is `[llm]`, while the legacy `[openai]` section is still accepted for backward compatibility. Plain model names still default to OpenAI, and `provider:model` targets other `any-llm` providers. See [`config.toml.example`](config.toml.example) for the complete structure and all available options. You can override the config location using the `--config-path` flag. @@ -130,7 +130,7 @@ from pretty_release_notes import ReleaseNotesBuilder client = ( ReleaseNotesBuilder() .with_github_token("ghp_your_token") - .with_openai("sk_your_key", model="gpt-4") + .with_llm("sk_your_key", model="gpt-4") # or model="anthropic:claude-sonnet-4-5" .with_database("sqlite", enabled=True) .with_filters( exclude_types={"chore", "ci", "refactor"}, @@ -196,14 +196,16 @@ curl -X POST http://localhost:8000/generate \ "tag": "v15.38.4", "previous_tag_name": "v15.38.0", "github_token": "ghp_your_token_here", - "openai_key": "sk-your_key_here", - "openai_model": "gpt-4", + "llm_key": "sk-your_key_here", + "llm_model": "gpt-4", "exclude_types": ["chore", "ci", "refactor"], "exclude_labels": ["skip-release-notes"], "exclude_authors": ["dependabot[bot]"] }' ``` +Legacy `openai_key` and `openai_model` request fields are still accepted for backward compatibility. + Response: ```json { diff --git a/config.toml.example b/config.toml.example index e099e42..385da03 100644 --- a/config.toml.example +++ b/config.toml.example @@ -17,13 +17,15 @@ token = "" # If not set, must be specified via --owner flag owner = "frappe" -[openai] -# OpenAI API key (required) -# Get one at: https://platform.openai.com/api-keys +[llm] +# LLM provider API key (required) +# For plain OpenAI models, use an OpenAI key. For other providers, use that provider's key. +# Legacy [openai] is still accepted for backward compatibility. api_key = "" -# OpenAI model to use (default: "gpt-4.1") -model = "o1" +# Model to use. Plain names default to OpenAI. +# For other providers, use "provider:model" (for example: "anthropic:claude-sonnet-4-5") +model = "gpt-4.1" # Maximum patch size before fallback to commit message (default: 10000) max_patch_size = 10000 diff --git a/examples/library_usage.py b/examples/library_usage.py index 0be4977..2396d77 100644 --- a/examples/library_usage.py +++ b/examples/library_usage.py @@ -18,7 +18,7 @@ def basic_usage(): client = ( ReleaseNotesBuilder() .with_github_token("ghp_xxxxx") # Replace with your token - .with_openai("sk-xxxxx") # Replace with your API key + .with_llm("sk-xxxxx") # Replace with your API key .build() ) @@ -37,7 +37,7 @@ def advanced_usage(): client = ( ReleaseNotesBuilder() .with_github_token("ghp_xxxxx") # Replace with your token - .with_openai("sk-xxxxx", model="gpt-4", max_patch_size=15000) + .with_llm("sk-xxxxx", model="gpt-4", max_patch_size=15000) .with_database("sqlite", enabled=True) .with_filters( exclude_types={"chore", "refactor", "ci", "style", "test"}, @@ -69,14 +69,14 @@ def direct_config_usage(): DatabaseConfig, FilterConfig, GitHubConfig, - OpenAIConfig, + LLMConfig, ReleaseNotesClient, ReleaseNotesConfig, ) config = ReleaseNotesConfig( github=GitHubConfig(token="ghp_xxxxx"), # Replace with your token - openai=OpenAIConfig(api_key="sk-xxxxx", model="gpt-4.1"), # Replace with your key + llm=LLMConfig(api_key="sk-xxxxx", model="gpt-4.1"), # Replace with your key database=DatabaseConfig(type="sqlite", enabled=True), filters=FilterConfig( exclude_change_types={"chore", "refactor"}, @@ -101,18 +101,16 @@ def silent_usage(): client = ( ReleaseNotesBuilder() .with_github_token("ghp_xxxxx") # Replace with your token - .with_openai("sk-xxxxx") # Replace with your API key + .with_llm("sk-xxxxx") # Replace with your API key .build() ) # No progress reporter = NullProgressReporter used by default - notes = client.generate_release_notes( + return client.generate_release_notes( owner="frappe", repo="erpnext", tag="v15.38.4", ) - return notes - if __name__ == "__main__": print("Example 1: Basic Usage") diff --git a/pretty_release_notes/__init__.py b/pretty_release_notes/__init__.py index b3336e1..fd0ec8b 100644 --- a/pretty_release_notes/__init__.py +++ b/pretty_release_notes/__init__.py @@ -6,6 +6,7 @@ DatabaseConfig, FilterConfig, GitHubConfig, + LLMConfig, OpenAIConfig, ReleaseNotesConfig, ) @@ -25,6 +26,7 @@ # Configuration "ReleaseNotesConfig", "GitHubConfig", + "LLMConfig", "OpenAIConfig", "DatabaseConfig", "FilterConfig", diff --git a/pretty_release_notes/api.py b/pretty_release_notes/api.py index b945d40..5864557 100644 --- a/pretty_release_notes/api.py +++ b/pretty_release_notes/api.py @@ -7,7 +7,7 @@ FilterConfig, GitHubConfig, GroupingConfig, - OpenAIConfig, + LLMConfig, ReleaseNotesConfig, ) from .core.interfaces import NullProgressReporter, ProgressReporter @@ -72,8 +72,8 @@ class ReleaseNotesBuilder: def __init__(self): self._github_token = None - self._openai_key = None - self._openai_model = "gpt-4.1" + self._llm_key = None + self._llm_model = "gpt-4.1" self._max_patch_size = 10000 self._db_type = "sqlite" self._db_name = "stored_lines" @@ -93,13 +93,17 @@ def with_github_token(self, token: str) -> "ReleaseNotesBuilder": self._github_token = token return self - def with_openai(self, api_key: str, model: str = "gpt-4.1", max_patch_size: int = 10000) -> "ReleaseNotesBuilder": - """Set OpenAI configuration.""" - self._openai_key = api_key - self._openai_model = model + def with_llm(self, api_key: str, model: str = "gpt-4.1", max_patch_size: int = 10000) -> "ReleaseNotesBuilder": + """Set LLM configuration.""" + self._llm_key = api_key + self._llm_model = model self._max_patch_size = max_patch_size return self + def with_openai(self, api_key: str, model: str = "gpt-4.1", max_patch_size: int = 10000) -> "ReleaseNotesBuilder": + """Backward-compatible alias for with_llm().""" + return self.with_llm(api_key=api_key, model=model, max_patch_size=max_patch_size) + def with_database( self, db_type: str = "sqlite", db_name: str = "stored_lines", enabled: bool = True ) -> "ReleaseNotesBuilder": @@ -179,14 +183,14 @@ def build(self) -> ReleaseNotesClient: """ if not self._github_token: raise ValueError("GitHub token is required") - if not self._openai_key: - raise ValueError("OpenAI API key is required") + if not self._llm_key: + raise ValueError("LLM API key is required") config = ReleaseNotesConfig( github=GitHubConfig(token=self._github_token), - openai=OpenAIConfig( - api_key=self._openai_key, - model=self._openai_model, + llm=LLMConfig( + api_key=self._llm_key, + model=self._llm_model, max_patch_size=self._max_patch_size, ), database=DatabaseConfig( diff --git a/pretty_release_notes/core/config.py b/pretty_release_notes/core/config.py index 69d677c..e89ade1 100644 --- a/pretty_release_notes/core/config.py +++ b/pretty_release_notes/core/config.py @@ -13,14 +13,17 @@ def __post_init__(self): @dataclass -class OpenAIConfig: +class LLMConfig: api_key: str model: str = "gpt-4.1" max_patch_size: int = 10000 def __post_init__(self): if not self.api_key: - raise ValueError("OpenAI API key is required") + raise ValueError("LLM API key is required") + + +OpenAIConfig = LLMConfig @dataclass @@ -87,12 +90,47 @@ def _get_default_prompt_path() -> Path: return package_dir / "prompt.txt" -@dataclass +@dataclass(init=False) class ReleaseNotesConfig: github: GitHubConfig - openai: OpenAIConfig + llm: LLMConfig database: DatabaseConfig = field(default_factory=DatabaseConfig) filters: FilterConfig = field(default_factory=FilterConfig) grouping: GroupingConfig = field(default_factory=GroupingConfig) prompt_path: Path = field(default_factory=_get_default_prompt_path) force_use_commits: bool = False + + def __init__( + self, + github: GitHubConfig, + llm: LLMConfig | None = None, + openai: LLMConfig | None = None, + database: DatabaseConfig | None = None, + filters: FilterConfig | None = None, + grouping: GroupingConfig | None = None, + prompt_path: Path | None = None, + force_use_commits: bool = False, + ): + if llm is not None and openai is not None and llm != openai: + raise ValueError("Pass either llm or openai configuration, not both") + + resolved_llm = llm or openai + if resolved_llm is None: + raise ValueError("LLM configuration is required") + + self.github = github + self.llm = resolved_llm + self.database = database if database is not None else DatabaseConfig() + self.filters = filters if filters is not None else FilterConfig() + self.grouping = grouping if grouping is not None else GroupingConfig() + self.prompt_path = prompt_path if prompt_path is not None else _get_default_prompt_path() + self.force_use_commits = force_use_commits + + @property + def openai(self) -> LLMConfig: + """Backward-compatible alias for llm configuration.""" + return self.llm + + @openai.setter + def openai(self, value: LLMConfig) -> None: + self.llm = value diff --git a/pretty_release_notes/core/config_loader.py b/pretty_release_notes/core/config_loader.py index 1d4952d..9e022b7 100644 --- a/pretty_release_notes/core/config_loader.py +++ b/pretty_release_notes/core/config_loader.py @@ -10,7 +10,7 @@ FilterConfig, GitHubConfig, GroupingConfig, - OpenAIConfig, + LLMConfig, ReleaseNotesConfig, _get_default_prompt_path, ) @@ -30,14 +30,18 @@ def __init__(self, config_dict: dict[str, Any]): self.config_dict = config_dict def load(self) -> ReleaseNotesConfig: + llm_api_key = self.config_dict.get("llm_api_key", self.config_dict.get("openai_api_key")) + if llm_api_key is None: + raise KeyError("llm_api_key") + return ReleaseNotesConfig( github=GitHubConfig( token=self.config_dict["github_token"], owner=self.config_dict.get("github_owner"), ), - openai=OpenAIConfig( - api_key=self.config_dict["openai_api_key"], - model=self.config_dict.get("openai_model", "gpt-4.1"), + llm=LLMConfig( + api_key=llm_api_key, + model=self.config_dict.get("llm_model", self.config_dict.get("openai_model", "gpt-4.1")), max_patch_size=self.config_dict.get("max_patch_size", 10000), ), database=DatabaseConfig( @@ -79,21 +83,21 @@ def __init__(self, env_path: str = ".env"): def load(self) -> ReleaseNotesConfig: config = dotenv_values(self.env_path) - # Required fields - will raise KeyError if missing + # Required fields github_token = config["GH_TOKEN"] - openai_key = config["OPENAI_API_KEY"] + llm_key = config.get("LLM_API_KEY") or config.get("OPENAI_API_KEY") - # Ensure github_token and openai_key are not None + # Ensure github_token and llm_key are not None if github_token is None: raise ValueError("GH_TOKEN is required in .env file") - if openai_key is None: - raise ValueError("OPENAI_API_KEY is required in .env file") + if llm_key is None: + raise ValueError("LLM_API_KEY is required in .env file") return ReleaseNotesConfig( github=GitHubConfig(token=github_token, owner=config.get("DEFAULT_OWNER")), - openai=OpenAIConfig( - api_key=openai_key, - model=config.get("OPENAI_MODEL") or "gpt-4.1", + llm=LLMConfig( + api_key=llm_key, + model=config.get("LLM_MODEL") or config.get("OPENAI_MODEL") or "gpt-4.1", max_patch_size=int(config.get("MAX_PATCH_SIZE") or "10000"), ), database=DatabaseConfig( @@ -155,29 +159,29 @@ def load(self) -> ReleaseNotesConfig: # Extract nested sections with defaults github_config = config.get("github", {}) - openai_config = config.get("openai", {}) + llm_config = {**config.get("openai", {}), **config.get("llm", {})} database_config = config.get("database", {}) filters_config = config.get("filters", {}) grouping_config = config.get("grouping", {}) # Required fields github_token = github_config.get("token") - openai_key = openai_config.get("api_key") + llm_key = llm_config.get("api_key") if not github_token: raise ValueError("github.token is required in config file") - if not openai_key: - raise ValueError("openai.api_key is required in config file") + if not llm_key: + raise ValueError("llm.api_key is required in config file") return ReleaseNotesConfig( github=GitHubConfig( token=github_token, owner=github_config.get("owner"), ), - openai=OpenAIConfig( - api_key=openai_key, - model=openai_config.get("model", "gpt-4.1"), - max_patch_size=openai_config.get("max_patch_size", 10000), + llm=LLMConfig( + api_key=llm_key, + model=llm_config.get("model", "gpt-4.1"), + max_patch_size=llm_config.get("max_patch_size", 10000), ), database=DatabaseConfig( type=database_config.get("type", "sqlite"), diff --git a/pretty_release_notes/generator.py b/pretty_release_notes/generator.py index 07b7f3b..196c9e8 100644 --- a/pretty_release_notes/generator.py +++ b/pretty_release_notes/generator.py @@ -11,7 +11,7 @@ from .database import get_db from .github_client import GitHubClient from .models import ReleaseNotes, ReleaseNotesLine, Repository -from .openai_client import get_chat_response +from .openai_client import format_model_name, get_chat_response class ReleaseNotesGenerator: @@ -32,9 +32,9 @@ def __init__( self.exclude_change_labels = config.filters.exclude_change_labels self.exclude_authors = config.filters.exclude_authors self.grouping = config.grouping - self.openai_api_key = config.openai.api_key - self.openai_model = config.openai.model - self.max_patch_size = config.openai.max_patch_size + self.llm_api_key = config.llm.api_key + self.llm_model = config.llm.model + self.max_patch_size = config.llm.max_patch_size self.prompt_path = config.prompt_path self.db_type = config.database.type self.db_name = config.database.name @@ -131,7 +131,7 @@ def generate(self, tag: str, previous_tag_name: str | None = None) -> str: self.exclude_change_labels, self.exclude_authors, grouping=self.grouping, - model_name=f"OpenAI {self.openai_model}", + model_name=format_model_name(self.llm_model), ) def _get_prs_for_lines(self, lines: list["ReleaseNotesLine"]) -> None: @@ -233,11 +233,11 @@ def _process_line(self, line: "ReleaseNotesLine", prompt_template: str): try: change_summary = get_chat_response( content=prompt, - model=self.openai_model, - api_key=self.openai_api_key, + model=self.llm_model, + api_key=self.llm_api_key, ) except Exception as e: - error_msg = f"OpenAI API error for {line.change}: {str(e)}" + error_msg = f"LLM API error ({format_model_name(self.llm_model)}) for {line.change}: {str(e)}" self.progress.report(ProgressEvent(type="error", message=error_msg)) return diff --git a/pretty_release_notes/openai_client.py b/pretty_release_notes/openai_client.py index 42a5b9d..10c1135 100644 --- a/pretty_release_notes/openai_client.py +++ b/pretty_release_notes/openai_client.py @@ -1,11 +1,52 @@ -from openai import OpenAI +from typing import Any + +from any_llm import AnyLLM, completion from tenacity import ( retry, stop_after_attempt, wait_random_exponential, ) -MODELS_WITH_FLEX = {"o3", "o4-mini", "gpt-5-nano", "gpt-5-mini", "gpt-5", "gpt-5.1", "gpt-5.2"} +DEFAULT_PROVIDER = "openai" +OPENAI_MODELS_WITH_FLEX = { + "o3", + "o4-mini", + "gpt-5-nano", + "gpt-5-mini", + "gpt-5", + "gpt-5.1", + "gpt-5.2", + "gpt-5.4-pro", + "gpt-5.4", +} + + +def _get_model_info(model: str) -> tuple[str, str, bool]: + model = model.strip() + try: + provider, provider_model = AnyLLM.split_model_provider(model) + except ValueError: + if ":" in model or "/" in model: + raise + return DEFAULT_PROVIDER, model, False + return provider.value, provider_model, True + + +def _get_provider_kwargs(provider: str, model: str) -> dict[str, object]: + if provider != DEFAULT_PROVIDER: + return {} + return { + "client_args": {"timeout": 900.0}, + "service_tier": "flex" if model in OPENAI_MODELS_WITH_FLEX else "auto", + } + + +def format_model_name(model: str) -> str: + """Format model information for user-facing output.""" + provider, provider_model, _ = _get_model_info(model) + if provider == DEFAULT_PROVIDER: + return f"OpenAI {provider_model}" + return f"{provider}:{provider_model}" @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) @@ -14,27 +55,32 @@ def get_chat_response( model: str, api_key: str, ) -> str: - """Get a chat response from OpenAI. + """Get a chat response through any-llm. Raises: - Exception: If OpenAI API call fails after all retry attempts - ValueError: If OpenAI API returns empty content + Exception: If the provider API call fails after all retry attempts + ValueError: If the provider API returns empty content """ - client = OpenAI(api_key=api_key, timeout=900.0) - - chat_completion = client.chat.completions.create( - messages=[ + provider, provider_model, is_provider_qualified = _get_model_info(model) + completion_kwargs: dict[str, Any] = { + "messages": [ { "role": "user", "content": content, } ], - model=model, - service_tier="flex" if model in MODELS_WITH_FLEX else "auto", - ) + "model": model.strip() if is_provider_qualified else provider_model, + "api_key": api_key, + **_get_provider_kwargs(provider, provider_model), + } + + if not is_provider_qualified: + completion_kwargs["provider"] = DEFAULT_PROVIDER + + chat_completion: Any = completion(**completion_kwargs) response_content: str | None = chat_completion.choices[0].message.content if response_content is None: - raise ValueError("OpenAI API returned empty content") + raise ValueError("LLM API returned empty content") # At this point, mypy knows response_content is str (not None) return str(response_content) diff --git a/pretty_release_notes/setup_command.py b/pretty_release_notes/setup_command.py index 4dc35b9..5e39853 100644 --- a/pretty_release_notes/setup_command.py +++ b/pretty_release_notes/setup_command.py @@ -73,15 +73,15 @@ def setup_config( default=existing_values.get("github_owner") or "frappe", ) - console.print("\n[bold cyan]OpenAI Configuration[/bold cyan]") - openai_key = Prompt.ask( - "OpenAI API Key", - default=existing_values.get("openai_key", ""), + console.print("\n[bold cyan]LLM Configuration[/bold cyan]") + llm_key = Prompt.ask( + "LLM API Key", + default=existing_values.get("llm_key", ""), password=True, ) - openai_model = Prompt.ask( - "OpenAI Model", - default=existing_values.get("openai_model") or "o1", + llm_model = Prompt.ask( + "Model (use provider:model for non-OpenAI providers)", + default=existing_values.get("llm_model") or "gpt-4.1", ) max_patch_size = Prompt.ask( "Maximum patch size before fallback", @@ -130,8 +130,8 @@ def setup_config( toml_content = _build_toml_content( github_token=github_token, github_owner=github_owner, - openai_key=openai_key, - openai_model=openai_model, + llm_key=llm_key, + llm_model=llm_model, max_patch_size=int(max_patch_size), db_type=db_type, db_name=db_name, @@ -148,7 +148,7 @@ def setup_config( console.print("[dim]" + "─" * 60 + "[/dim]") # Mask sensitive values in preview preview = toml_content.replace(github_token, "ghp_***" if github_token else "") - preview = preview.replace(openai_key, "sk-***" if openai_key else "") + preview = preview.replace(llm_key, "***" if llm_key else "") # Escape opening square brackets for Rich markup (closing brackets are fine) preview = preview.replace("[", r"\[") console.print(preview) @@ -175,16 +175,16 @@ def setup_config( def _flatten_toml(toml_config: dict) -> dict: """Flatten nested TOML config to simple dict for defaults.""" - flat = {} - github = toml_config.get("github", {}) - flat["github_token"] = github.get("token", "") - flat["github_owner"] = github.get("owner", "") + flat = { + "github_token": github.get("token", ""), + "github_owner": github.get("owner", ""), + } - openai = toml_config.get("openai", {}) - flat["openai_key"] = openai.get("api_key", "") - flat["openai_model"] = openai.get("model", "") - flat["max_patch_size"] = openai.get("max_patch_size", 10000) + llm = {**toml_config.get("openai", {}), **toml_config.get("llm", {})} + flat["llm_key"] = llm.get("api_key", "") + flat["llm_model"] = llm.get("model", "") + flat["max_patch_size"] = llm.get("max_patch_size", 10000) database = toml_config.get("database", {}) flat["db_type"] = database.get("type", "") @@ -207,8 +207,8 @@ def _migrate_env_to_dict(env_values: dict) -> dict: return { "github_token": env_values.get("GH_TOKEN", ""), "github_owner": env_values.get("DEFAULT_OWNER", ""), - "openai_key": env_values.get("OPENAI_API_KEY", ""), - "openai_model": env_values.get("OPENAI_MODEL", ""), + "llm_key": env_values.get("LLM_API_KEY") or env_values.get("OPENAI_API_KEY", ""), + "llm_model": env_values.get("LLM_MODEL") or env_values.get("OPENAI_MODEL", ""), "max_patch_size": int(env_values.get("MAX_PATCH_SIZE", "10000")), "db_type": env_values.get("DB_TYPE", ""), "db_name": env_values.get("DB_NAME", ""), @@ -222,8 +222,8 @@ def _migrate_env_to_dict(env_values: dict) -> dict: def _build_toml_content( github_token: str, github_owner: str, - openai_key: str, - openai_model: str, + llm_key: str, + llm_model: str, max_patch_size: int, db_type: str, db_name: str, @@ -256,9 +256,11 @@ def to_toml_array(s: str) -> str: token = "{github_token}" owner = "{github_owner}" -[openai] -api_key = "{openai_key}" -model = "{openai_model}" +[llm] +# Legacy [openai] is still accepted for backward compatibility. +api_key = "{llm_key}" +# Use plain model names for OpenAI or "provider:model" for other providers. +model = "{llm_model}" max_patch_size = {max_patch_size} [database] diff --git a/pretty_release_notes/web/app.py b/pretty_release_notes/web/app.py index 8aac7ae..e580901 100644 --- a/pretty_release_notes/web/app.py +++ b/pretty_release_notes/web/app.py @@ -4,7 +4,7 @@ from typing import Any from fastapi import BackgroundTasks, FastAPI, HTTPException -from pydantic import BaseModel +from pydantic import AliasChoices, BaseModel, ConfigDict, Field from ..api import ReleaseNotesBuilder from ..core.interfaces import ProgressEvent, ProgressReporter @@ -18,12 +18,14 @@ class GenerateRequest(BaseModel): """Request model for generating release notes.""" + model_config = ConfigDict(populate_by_name=True) + owner: str repo: str tag: str github_token: str - openai_key: str - openai_model: str = "gpt-4.1" + llm_key: str = Field(validation_alias=AliasChoices("llm_key", "openai_key")) + llm_model: str = Field(default="gpt-4.1", validation_alias=AliasChoices("llm_model", "openai_model")) exclude_types: list[str] = [] exclude_labels: list[str] = [] exclude_authors: list[str] = [] @@ -110,7 +112,7 @@ async def process_generation(job_id: str, request: GenerateRequest) -> None: client = ( ReleaseNotesBuilder() .with_github_token(request.github_token) - .with_openai(request.openai_key, request.openai_model) + .with_llm(request.llm_key, request.llm_model) .with_filters( exclude_types=set(request.exclude_types), exclude_labels=set(request.exclude_labels), diff --git a/pyproject.toml b/pyproject.toml index 28e6a20..f0cfdf5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ license = {text = "MIT"} authors = [ {name = "Raffael Meyer" } ] -keywords = ["github", "release-notes", "openai", "cli"] +keywords = ["github", "release-notes", "llm", "cli"] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", @@ -28,9 +28,9 @@ dependencies = [ "typer>=0.9.0", "rich>=13.0.0", "requests>=2.31.0", - "openai>=1.0.0", "python-dotenv>=1.0.0", "tenacity>=8.2.0", + "any-llm-sdk[all]", ] [project.optional-dependencies] diff --git a/tests/test_api.py b/tests/test_api.py index 5715f70..c5fb79b 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -9,7 +9,7 @@ from pretty_release_notes.core.config import ( DatabaseConfig, GitHubConfig, - OpenAIConfig, + LLMConfig, ReleaseNotesConfig, ) from pretty_release_notes.core.interfaces import ProgressEvent, ProgressReporter @@ -26,7 +26,7 @@ def test_custom_progress_reporter_receives_events(self): # Build a client with the mock reporter config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config, progress_reporter=mock_reporter) @@ -40,7 +40,7 @@ def test_builder_with_custom_progress_reporter(self): client = ( ReleaseNotesBuilder() .with_github_token("test_token") - .with_openai("test_key") + .with_llm("test_key") .with_progress_reporter(mock_reporter) .build() ) @@ -51,7 +51,7 @@ def test_null_progress_reporter_when_none_provided(self): """Test that NullProgressReporter is used when none provided.""" config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config) @@ -82,7 +82,7 @@ def report(self, event: ProgressEvent) -> None: config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config, progress_reporter=reporter) @@ -95,7 +95,7 @@ def report(self, event: ProgressEvent) -> None: def test_silent_operation_with_null_reporter(self): """Test that silent operation works (no progress output).""" # Build without progress reporter (should use NullProgressReporter) - client = ReleaseNotesBuilder().with_github_token("test_token").with_openai("test_key").build() + client = ReleaseNotesBuilder().with_github_token("test_token").with_llm("test_key").build() from pretty_release_notes.core.interfaces import NullProgressReporter @@ -111,16 +111,16 @@ class TestConfigurationValidation: def test_missing_github_token_raises_error(self): """Test that building without GitHub token raises ValueError.""" - builder = ReleaseNotesBuilder().with_openai("test_key") + builder = ReleaseNotesBuilder().with_llm("test_key") with pytest.raises(ValueError, match="GitHub token is required"): builder.build() - def test_missing_openai_key_raises_error(self): - """Test that building without OpenAI key raises ValueError.""" + def test_missing_llm_key_raises_error(self): + """Test that building without LLM key raises ValueError.""" builder = ReleaseNotesBuilder().with_github_token("test_token") - with pytest.raises(ValueError, match="OpenAI API key is required"): + with pytest.raises(ValueError, match="LLM API key is required"): builder.build() def test_invalid_database_type_raises_error(self): @@ -133,14 +133,14 @@ def test_empty_github_token_raises_error(self): with pytest.raises(ValueError, match="GitHub token is required"): GitHubConfig(token="") - def test_empty_openai_key_raises_error(self): - """Test that empty OpenAI API key raises ValueError.""" - with pytest.raises(ValueError, match="OpenAI API key is required"): - OpenAIConfig(api_key="") + def test_empty_llm_key_raises_error(self): + """Test that empty LLM API key raises ValueError.""" + with pytest.raises(ValueError, match="LLM API key is required"): + LLMConfig(api_key="") def test_valid_configuration_builds_successfully(self): """Test that valid configuration builds without errors.""" - client = ReleaseNotesBuilder().with_github_token("test_token").with_openai("test_key").build() + client = ReleaseNotesBuilder().with_github_token("test_token").with_llm("test_key").build() assert client is not None assert isinstance(client, ReleaseNotesClient) @@ -150,7 +150,7 @@ def test_all_builder_options_work(self): client = ( ReleaseNotesBuilder() .with_github_token("test_token") - .with_openai("test_key", model="gpt-4", max_patch_size=15000) + .with_llm("test_key", model="gpt-4", max_patch_size=15000) .with_database("sqlite", db_name="test_db", enabled=True) .with_filters( exclude_types={"chore", "ci"}, @@ -163,9 +163,9 @@ def test_all_builder_options_work(self): ) assert client.config.github.token == "test_token" - assert client.config.openai.api_key == "test_key" - assert client.config.openai.model == "gpt-4" - assert client.config.openai.max_patch_size == 15000 + assert client.config.llm.api_key == "test_key" + assert client.config.llm.model == "gpt-4" + assert client.config.llm.max_patch_size == 15000 assert client.config.database.type == "sqlite" assert client.config.database.name == "test_db" assert client.config.database.enabled is True @@ -180,18 +180,26 @@ def test_builder_returns_self_for_chaining(self): builder = ReleaseNotesBuilder() assert builder.with_github_token("token") is builder + assert builder.with_llm("key") is builder assert builder.with_openai("key") is builder assert builder.with_database() is builder assert builder.with_filters() is builder assert builder.with_prompt_file(Path("test.txt")) is builder assert builder.with_force_commits() is builder + def test_with_openai_alias_populates_llm_config(self): + """Test that with_openai remains a working alias.""" + client = ReleaseNotesBuilder().with_github_token("test_token").with_openai("test_key").build() + + assert client.config.llm.api_key == "test_key" + assert client.config.openai.api_key == "test_key" + def test_partial_filters_work(self): """Test that filters can be set partially (not all at once).""" client = ( ReleaseNotesBuilder() .with_github_token("test_token") - .with_openai("test_key") + .with_llm("test_key") .with_filters(exclude_types={"chore"}) .build() ) @@ -202,20 +210,20 @@ def test_partial_filters_work(self): def test_database_defaults(self): """Test that database has sensible defaults.""" - client = ReleaseNotesBuilder().with_github_token("test_token").with_openai("test_key").build() + client = ReleaseNotesBuilder().with_github_token("test_token").with_llm("test_key").build() # Check defaults assert client.config.database.type == "sqlite" assert client.config.database.name == "stored_lines" assert client.config.database.enabled is True - def test_openai_defaults(self): - """Test that OpenAI config has sensible defaults.""" - client = ReleaseNotesBuilder().with_github_token("test_token").with_openai("test_key").build() + def test_llm_defaults(self): + """Test that LLM config has sensible defaults.""" + client = ReleaseNotesBuilder().with_github_token("test_token").with_llm("test_key").build() # Check defaults - assert client.config.openai.model == "gpt-4.1" - assert client.config.openai.max_patch_size == 10000 + assert client.config.llm.model == "gpt-4.1" + assert client.config.llm.max_patch_size == 10000 class TestClientAPI: @@ -225,7 +233,7 @@ def test_client_initializes_with_config(self): """Test that client initializes with configuration.""" config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config) @@ -235,7 +243,7 @@ def test_generate_release_notes_calls_generator(self): """Test that generate_release_notes calls the generator correctly.""" config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config) @@ -255,7 +263,7 @@ def test_update_github_release_calls_generator(self): """Test that update_github_release calls the generator correctly.""" config = ReleaseNotesConfig( github=GitHubConfig(token="test_token"), - openai=OpenAIConfig(api_key="test_key"), + llm=LLMConfig(api_key="test_key"), ) client = ReleaseNotesClient(config) diff --git a/tests/test_core.py b/tests/test_core.py index dd8756d..19012fb 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -9,6 +9,7 @@ FilterConfig, GitHubConfig, GroupingConfig, + LLMConfig, OpenAIConfig, ReleaseNotesConfig, ) @@ -88,24 +89,28 @@ def test_empty_token_raises_error(self): GitHubConfig(token="") -class TestOpenAIConfig: - """Test OpenAIConfig validation.""" +class TestLLMConfig: + """Test LLMConfig validation.""" def test_valid_config_with_defaults(self): - config = OpenAIConfig(api_key="test_key") + config = LLMConfig(api_key="test_key") assert config.api_key == "test_key" assert config.model == "gpt-4.1" assert config.max_patch_size == 10000 def test_valid_config_with_custom_values(self): - config = OpenAIConfig(api_key="test_key", model="gpt-4", max_patch_size=5000) + config = LLMConfig(api_key="test_key", model="gpt-4", max_patch_size=5000) assert config.api_key == "test_key" assert config.model == "gpt-4" assert config.max_patch_size == 5000 def test_empty_api_key_raises_error(self): - with pytest.raises(ValueError, match="OpenAI API key is required"): - OpenAIConfig(api_key="") + with pytest.raises(ValueError, match="LLM API key is required"): + LLMConfig(api_key="") + + def test_openai_config_alias_still_works(self): + config = OpenAIConfig(api_key="test_key") + assert config.api_key == "test_key" class TestDatabaseConfig: @@ -176,7 +181,7 @@ def test_release_notes_config_with_grouping(self): """Test ReleaseNotesConfig includes GroupingConfig.""" config = ReleaseNotesConfig( github=GitHubConfig(token="token"), - openai=OpenAIConfig(api_key="key"), + llm=LLMConfig(api_key="key"), grouping=GroupingConfig(group_by_type=True), ) assert config.grouping.group_by_type is True @@ -188,10 +193,11 @@ class TestReleaseNotesConfig: def test_minimal_config(self): config = ReleaseNotesConfig( github=GitHubConfig(token="gh_token"), - openai=OpenAIConfig(api_key="openai_key"), + llm=LLMConfig(api_key="llm_key"), ) assert config.github.token == "gh_token" - assert config.openai.api_key == "openai_key" + assert config.llm.api_key == "llm_key" + assert config.openai.api_key == "llm_key" assert config.database.type == "sqlite" assert config.filters.exclude_change_types == set() # Check that prompt_path is set to the package's prompt.txt @@ -202,39 +208,55 @@ def test_minimal_config(self): def test_full_config(self): config = ReleaseNotesConfig( github=GitHubConfig(token="gh_token", owner="owner"), - openai=OpenAIConfig(api_key="openai_key", model="gpt-4"), + llm=LLMConfig(api_key="llm_key", model="gpt-4"), database=DatabaseConfig(type="csv", enabled=False), filters=FilterConfig(exclude_change_types={"chore"}), prompt_path=Path("custom_prompt.txt"), force_use_commits=True, ) assert config.github.owner == "owner" - assert config.openai.model == "gpt-4" + assert config.llm.model == "gpt-4" assert config.database.type == "csv" assert config.database.enabled is False assert config.filters.exclude_change_types == {"chore"} assert config.prompt_path == Path("custom_prompt.txt") assert config.force_use_commits is True + def test_openai_constructor_alias(self): + config = ReleaseNotesConfig( + github=GitHubConfig(token="gh_token"), + openai=OpenAIConfig(api_key="legacy_key"), + ) + + assert config.llm.api_key == "legacy_key" + assert config.openai.api_key == "legacy_key" + class TestDictConfigLoader: """Test DictConfigLoader.""" def test_load_minimal_config(self): - config_dict = {"github_token": "gh_token", "openai_api_key": "openai_key"} + config_dict = {"github_token": "gh_token", "llm_api_key": "llm_key"} loader = DictConfigLoader(config_dict) config = loader.load() assert config.github.token == "gh_token" - assert config.openai.api_key == "openai_key" + assert config.llm.api_key == "llm_key" assert config.database.type == "sqlite" + def test_load_minimal_config_with_openai_alias_keys(self): + config_dict = {"github_token": "gh_token", "openai_api_key": "legacy_key"} + loader = DictConfigLoader(config_dict) + config = loader.load() + + assert config.llm.api_key == "legacy_key" + def test_load_full_config(self): config_dict = { "github_token": "gh_token", "github_owner": "test_owner", - "openai_api_key": "openai_key", - "openai_model": "gpt-4", + "llm_api_key": "llm_key", + "llm_model": "gpt-4", "max_patch_size": 5000, "db_type": "csv", "db_name": "custom_db", @@ -250,9 +272,9 @@ def test_load_full_config(self): assert config.github.token == "gh_token" assert config.github.owner == "test_owner" - assert config.openai.api_key == "openai_key" - assert config.openai.model == "gpt-4" - assert config.openai.max_patch_size == 5000 + assert config.llm.api_key == "llm_key" + assert config.llm.model == "gpt-4" + assert config.llm.max_patch_size == 5000 assert config.database.type == "csv" assert config.database.name == "custom_db" assert config.database.enabled is False @@ -263,7 +285,7 @@ def test_load_full_config(self): assert config.force_use_commits is True def test_missing_required_raises_error(self): - config_dict = {"openai_api_key": "openai_key"} + config_dict = {"llm_api_key": "llm_key"} loader = DictConfigLoader(config_dict) with pytest.raises(KeyError): loader.load() @@ -272,28 +294,50 @@ def test_missing_required_raises_error(self): class TestTomlConfigLoader: """Test TomlConfigLoader.""" - def test_load_minimal_config(self, tmp_path): + def _write_config(self, tmp_path, content: str): config_file = tmp_path / "config.toml" - config_file.write_text( + config_file.write_text(content) + return config_file + + def test_load_minimal_config(self, tmp_path): + config_file = self._write_config( + tmp_path, """ [github] token = "gh_token" -[openai] -api_key = "openai_key" -""" +[llm] +api_key = "llm_key" +""", ) loader = TomlConfigLoader(config_file) config = loader.load() assert config.github.token == "gh_token" - assert config.openai.api_key == "openai_key" + assert config.llm.api_key == "llm_key" assert config.database.type == "sqlite" + def test_load_minimal_config_with_openai_section_alias(self, tmp_path): + config_file = self._write_config( + tmp_path, + """ +[github] +token = "gh_token" + +[openai] +api_key = "legacy_key" +""", + ) + + loader = TomlConfigLoader(config_file) + config = loader.load() + + assert config.llm.api_key == "legacy_key" + def test_load_full_config(self, tmp_path): - config_file = tmp_path / "config.toml" - config_file.write_text( + config_file = self._write_config( + tmp_path, """prompt_path = "custom.txt" force_use_commits = true @@ -301,8 +345,8 @@ def test_load_full_config(self, tmp_path): token = "gh_token" owner = "test_owner" -[openai] -api_key = "openai_key" +[llm] +api_key = "llm_key" model = "gpt-4" max_patch_size = 5000 @@ -315,7 +359,7 @@ def test_load_full_config(self, tmp_path): exclude_change_types = ["chore", "refactor"] exclude_change_labels = ["skip"] exclude_authors = ["bot"] -""" +""", ) loader = TomlConfigLoader(config_file) @@ -323,9 +367,9 @@ def test_load_full_config(self, tmp_path): assert config.github.token == "gh_token" assert config.github.owner == "test_owner" - assert config.openai.api_key == "openai_key" - assert config.openai.model == "gpt-4" - assert config.openai.max_patch_size == 5000 + assert config.llm.api_key == "llm_key" + assert config.llm.model == "gpt-4" + assert config.llm.max_patch_size == 5000 assert config.database.type == "csv" assert config.database.name == "custom_db" assert config.database.enabled is False @@ -342,33 +386,33 @@ def test_missing_file_raises_error(self, tmp_path): loader.load() def test_missing_required_github_token_raises_error(self, tmp_path): - config_file = tmp_path / "config.toml" - config_file.write_text( + config_file = self._write_config( + tmp_path, """ [github] -[openai] -api_key = "openai_key" -""" +[llm] +api_key = "llm_key" +""", ) loader = TomlConfigLoader(config_file) with pytest.raises(ValueError, match="github.token is required"): loader.load() - def test_missing_required_openai_key_raises_error(self, tmp_path): - config_file = tmp_path / "config.toml" - config_file.write_text( + def test_missing_required_llm_key_raises_error(self, tmp_path): + config_file = self._write_config( + tmp_path, """ [github] token = "gh_token" -[openai] -""" +[llm] +""", ) loader = TomlConfigLoader(config_file) - with pytest.raises(ValueError, match="openai.api_key is required"): + with pytest.raises(ValueError, match="llm.api_key is required"): loader.load() def test_default_config_path(self): diff --git a/tests/test_openai_client.py b/tests/test_openai_client.py new file mode 100644 index 0000000..d26647e --- /dev/null +++ b/tests/test_openai_client.py @@ -0,0 +1,94 @@ +"""Tests for the any-llm-backed chat client wrapper.""" + +from types import SimpleNamespace +from unittest.mock import patch + +from pretty_release_notes.openai_client import format_model_name, get_chat_response + + +def _mock_completion_response(content: str): + return SimpleNamespace( + choices=[SimpleNamespace(message=SimpleNamespace(content=content))], + ) + + +class TestOpenAIClient: + """Test the compatibility wrapper around any-llm.""" + + @patch("pretty_release_notes.openai_client.completion") + def test_plain_model_defaults_to_openai_provider(self, mock_completion): + mock_completion.return_value = _mock_completion_response("summary") + + result = get_chat_response( + content="Write a summary", + model="gpt-4.1", + api_key="test-key", + ) + + assert result == "summary" + mock_completion.assert_called_once_with( + messages=[{"role": "user", "content": "Write a summary"}], + model="gpt-4.1", + provider="openai", + api_key="test-key", + client_args={"timeout": 900.0}, + service_tier="auto", + ) + + @patch("pretty_release_notes.openai_client.completion") + def test_openai_flex_models_keep_service_tier(self, mock_completion): + mock_completion.return_value = _mock_completion_response("summary") + + get_chat_response( + content="Write a summary", + model="gpt-5", + api_key="test-key", + ) + + mock_completion.assert_called_once_with( + messages=[{"role": "user", "content": "Write a summary"}], + model="gpt-5", + provider="openai", + api_key="test-key", + client_args={"timeout": 900.0}, + service_tier="flex", + ) + + @patch("pretty_release_notes.openai_client.completion") + def test_provider_prefixed_model_is_passed_through_to_any_llm(self, mock_completion): + mock_completion.return_value = _mock_completion_response("summary") + + result = get_chat_response( + content="Write a summary", + model="anthropic:claude-sonnet-4-5", + api_key="test-key", + ) + + assert result == "summary" + mock_completion.assert_called_once_with( + messages=[{"role": "user", "content": "Write a summary"}], + model="anthropic:claude-sonnet-4-5", + api_key="test-key", + ) + + def test_format_model_name_supports_provider_prefixed_models(self): + assert format_model_name("gpt-4.1") == "OpenAI gpt-4.1" + assert format_model_name("openrouter:deepseek-r1") == "openrouter:deepseek-r1" + + @patch("pretty_release_notes.openai_client.completion") + def test_prefixed_openai_models_keep_openai_specific_kwargs(self, mock_completion): + mock_completion.return_value = _mock_completion_response("summary") + + get_chat_response( + content="Write a summary", + model="openai:gpt-5", + api_key="test-key", + ) + + mock_completion.assert_called_once_with( + messages=[{"role": "user", "content": "Write a summary"}], + model="openai:gpt-5", + api_key="test-key", + client_args={"timeout": 900.0}, + service_tier="flex", + ) diff --git a/tests/test_setup_command.py b/tests/test_setup_command.py index 0e6d3ac..58b4036 100644 --- a/tests/test_setup_command.py +++ b/tests/test_setup_command.py @@ -14,7 +14,7 @@ def test_flatten_toml(self): """Test flattening nested TOML config.""" toml_config = { "github": {"token": "ghp_test", "owner": "frappe"}, - "openai": {"api_key": "sk-test", "model": "gpt-4", "max_patch_size": 5000}, + "llm": {"api_key": "sk-test", "model": "gpt-4", "max_patch_size": 5000}, "database": {"type": "csv", "name": "test_db", "enabled": False}, "filters": { "exclude_change_types": ["chore", "ci"], @@ -27,8 +27,8 @@ def test_flatten_toml(self): assert result["github_token"] == "ghp_test" assert result["github_owner"] == "frappe" - assert result["openai_key"] == "sk-test" - assert result["openai_model"] == "gpt-4" + assert result["llm_key"] == "sk-test" + assert result["llm_model"] == "gpt-4" assert result["max_patch_size"] == 5000 assert result["db_type"] == "csv" assert result["db_name"] == "test_db" @@ -41,15 +41,15 @@ def test_flatten_toml_with_missing_sections(self): """Test flattening TOML with missing sections.""" toml_config = { "github": {"token": "ghp_test"}, - "openai": {"api_key": "sk-test"}, + "llm": {"api_key": "sk-test"}, } result = _flatten_toml(toml_config) assert result["github_token"] == "ghp_test" assert result["github_owner"] == "" - assert result["openai_key"] == "sk-test" - assert result["openai_model"] == "" + assert result["llm_key"] == "sk-test" + assert result["llm_model"] == "" assert result["db_type"] == "" assert result["exclude_types"] == "" @@ -57,7 +57,7 @@ def test_flatten_toml_with_empty_strings(self): """Test that empty strings in TOML are preserved (caller should handle with 'or' operator).""" toml_config = { "github": {"token": "ghp_test", "owner": ""}, - "openai": {"api_key": "sk-test", "model": ""}, + "llm": {"api_key": "sk-test", "model": ""}, "database": {"type": "", "name": "", "enabled": True}, } @@ -65,10 +65,22 @@ def test_flatten_toml_with_empty_strings(self): # Empty strings should be preserved - it's the caller's job to handle them assert result["github_owner"] == "" - assert result["openai_model"] == "" + assert result["llm_model"] == "" assert result["db_type"] == "" assert result["db_name"] == "" + def test_flatten_toml_supports_openai_section_alias(self): + """Test that legacy openai section is still supported.""" + toml_config = { + "github": {"token": "ghp_test"}, + "openai": {"api_key": "sk-test", "model": "gpt-4"}, + } + + result = _flatten_toml(toml_config) + + assert result["llm_key"] == "sk-test" + assert result["llm_model"] == "gpt-4" + def test_migrate_env_to_dict(self): """Test migrating .env format to dict.""" env_values = { @@ -88,8 +100,8 @@ def test_migrate_env_to_dict(self): assert result["github_token"] == "ghp_test" assert result["github_owner"] == "frappe" - assert result["openai_key"] == "sk-test" - assert result["openai_model"] == "gpt-4" + assert result["llm_key"] == "sk-test" + assert result["llm_model"] == "gpt-4" assert result["max_patch_size"] == 5000 assert result["db_type"] == "sqlite" assert result["db_name"] == "test_db" @@ -109,7 +121,7 @@ def test_migrate_env_with_missing_values(self): assert result["github_token"] == "ghp_test" assert result["github_owner"] == "" - assert result["openai_key"] == "sk-test" + assert result["llm_key"] == "sk-test" assert result["max_patch_size"] == 10000 # default def test_build_toml_content(self): @@ -117,8 +129,8 @@ def test_build_toml_content(self): content = _build_toml_content( github_token="ghp_test", github_owner="frappe", - openai_key="sk-test", - openai_model="gpt-4", + llm_key="sk-test", + llm_model="gpt-4", max_patch_size=5000, db_type="sqlite", db_name="test_db", @@ -133,7 +145,7 @@ def test_build_toml_content(self): assert "[github]" in content assert 'token = "ghp_test"' in content assert 'owner = "frappe"' in content - assert "[openai]" in content + assert "[llm]" in content assert 'api_key = "sk-test"' in content assert 'model = "gpt-4"' in content assert "max_patch_size = 5000" in content @@ -153,8 +165,8 @@ def test_build_toml_content_with_empty_arrays(self): content = _build_toml_content( github_token="ghp_test", github_owner="", - openai_key="sk-test", - openai_model="gpt-4", + llm_key="sk-test", + llm_model="gpt-4", max_patch_size=10000, db_type="sqlite", db_name="stored_lines", @@ -185,8 +197,8 @@ def test_setup_creates_config_file(self, tmp_path, monkeypatch): [ "ghp_test", # github token "frappe", # github owner - "sk-test", # openai key - "gpt-4", # openai model + "sk-test", # llm key + "gpt-4", # llm model "10000", # max patch size "sqlite", # db type "stored_lines", # db name @@ -222,7 +234,7 @@ def mock_confirm(*args, **kwargs): content = config_path.read_text() assert "[github]" in content assert 'token = "ghp_test"' in content - assert "[openai]" in content + assert "[llm]" in content assert 'api_key = "sk-test"' in content def test_setup_migrates_from_env(self, tmp_path, monkeypatch): @@ -246,8 +258,8 @@ def test_setup_migrates_from_env(self, tmp_path, monkeypatch): [ "", # github token (accept default from .env) "", # github owner (accept default from .env) - "", # openai key (accept default from .env) - "gpt-4", # openai model + "", # llm key (accept default from .env) + "gpt-4", # llm model "10000", # max patch size "sqlite", # db type "stored_lines", # db name @@ -264,7 +276,7 @@ def test_setup_migrates_from_env(self, tmp_path, monkeypatch): def mock_prompt(*args, **kwargs): default = kwargs.get("default", "") value = next(inputs) - return value if value else default + return value or default def mock_confirm(*args, **kwargs): response = next(inputs) diff --git a/tests/test_web_api.py b/tests/test_web_api.py index b59e24e..010c0cd 100644 --- a/tests/test_web_api.py +++ b/tests/test_web_api.py @@ -27,7 +27,7 @@ def mock_generator(): # Create mock builder instance mock_builder = MagicMock() mock_builder.with_github_token.return_value = mock_builder - mock_builder.with_openai.return_value = mock_builder + mock_builder.with_llm.return_value = mock_builder mock_builder.with_filters.return_value = mock_builder mock_builder.with_progress_reporter.return_value = mock_builder mock_builder.build.return_value = mock_client @@ -64,7 +64,7 @@ def test_generate_creates_job(self, client, mock_generator): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) @@ -82,7 +82,7 @@ def test_generate_requires_owner(self, client): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) @@ -96,7 +96,7 @@ def test_generate_requires_repo(self, client): "owner": "test-owner", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) @@ -110,7 +110,7 @@ def test_generate_requires_tag(self, client): "owner": "test-owner", "repo": "test-repo", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) @@ -124,14 +124,14 @@ def test_generate_requires_github_token(self, client): "owner": "test-owner", "repo": "test-repo", "tag": "v1.0.0", - "openai_key": "test-key", + "llm_key": "test-key", }, ) assert response.status_code == 422 - def test_generate_requires_openai_key(self, client): - """Generate endpoint should require openai_key field.""" + def test_generate_requires_llm_key(self, client): + """Generate endpoint should require llm_key field.""" response = client.post( "/generate", json={ @@ -144,6 +144,21 @@ def test_generate_requires_openai_key(self, client): assert response.status_code == 422 + def test_generate_accepts_openai_key_alias(self, client, mock_generator): + """Generate endpoint should accept legacy openai_key field.""" + response = client.post( + "/generate", + json={ + "owner": "test-owner", + "repo": "test-repo", + "tag": "v1.0.0", + "github_token": "test-token", + "openai_key": "test-key", + }, + ) + + assert response.status_code == 200 + def test_generate_accepts_optional_parameters(self, client, mock_generator): """Generate endpoint should accept optional configuration parameters.""" response = client.post( @@ -153,8 +168,8 @@ def test_generate_accepts_optional_parameters(self, client, mock_generator): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", - "openai_model": "gpt-4", + "llm_key": "test-key", + "llm_model": "gpt-4", "exclude_types": ["chore", "ci"], "exclude_labels": ["skip-release-notes"], "exclude_authors": ["bot[bot]"], @@ -184,7 +199,7 @@ def test_jobs_returns_job_status(self, client, mock_generator): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) job_id = create_response.json()["job_id"] @@ -206,7 +221,7 @@ def test_job_completes_successfully(self, client, mock_generator): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) job_id = create_response.json()["job_id"] @@ -240,7 +255,7 @@ def test_job_captures_progress_events(self, client, mock_generator): "repo": "test-repo", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) job_id = create_response.json()["job_id"] @@ -272,7 +287,7 @@ def create_and_wait_for_job(job_num): "repo": f"repo-{job_num}", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) job_id = response.json()["job_id"] @@ -316,7 +331,7 @@ def create_job(job_num): "repo": f"repo-{job_num}", "tag": "v1.0.0", "github_token": "test-token", - "openai_key": "test-key", + "llm_key": "test-key", }, ) return response.json()["job_id"] @@ -351,7 +366,7 @@ def test_job_with_invalid_credentials_fails_gracefully(self, client): "repo": "nonexistent-repo", "tag": "v1.0.0", "github_token": "invalid-token", - "openai_key": "invalid-key", + "llm_key": "invalid-key", }, ) job_id = response.json()["job_id"]