Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ owner = "frappe" # Default repository owner (optional)

[openai]
api_key = "sk-xxxxx" # OpenAI API key (required)
model = "gpt-4.1" # Model to use (default: "gpt-4.1")
model = "openai:gpt-4.1" # Model to use (default: "openai:gpt-4.1")
max_patch_size = 10000 # Max patch size before fallback (default: 10000)

[database]
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ nano ~/.pretty-release-notes/config.toml

### Configuration Format

The configuration file uses TOML format with sections for GitHub credentials, LLM settings, database caching, and filters. The canonical section name is `[llm]`, while the legacy `[openai]` section is still accepted for backward compatibility. Plain model names still default to OpenAI, and `provider:model` targets other `any-llm` providers. See [`config.toml.example`](config.toml.example) for the complete structure and all available options.
The configuration file uses TOML format with sections for GitHub credentials, LLM settings, database caching, and filters. The canonical section name is `[llm]`, while the legacy `[openai]` section is still accepted for backward compatibility. Prefer fully qualified `provider:model` values such as `openai:gpt-4.1`; unqualified model names are still accepted and default to OpenAI for backward compatibility. See [`config.toml.example`](config.toml.example) for the complete structure and all available options.

You can override the config location using the `--config-path` flag.

Expand Down Expand Up @@ -130,7 +130,7 @@ from pretty_release_notes import ReleaseNotesBuilder
client = (
ReleaseNotesBuilder()
.with_github_token("ghp_your_token")
.with_llm("sk_your_key", model="gpt-4") # or model="anthropic:claude-sonnet-4-5"
.with_llm("sk_your_key", model="openai:gpt-4.1") # or model="anthropic:claude-sonnet-4-5"
.with_database("sqlite", enabled=True)
.with_filters(
exclude_types={"chore", "ci", "refactor"},
Expand Down Expand Up @@ -197,7 +197,7 @@ curl -X POST http://localhost:8000/generate \
"previous_tag_name": "v15.38.0",
"github_token": "ghp_your_token_here",
"llm_key": "sk-your_key_here",
"llm_model": "gpt-4",
"llm_model": "openai:gpt-4.1",
"exclude_types": ["chore", "ci", "refactor"],
"exclude_labels": ["skip-release-notes"],
"exclude_authors": ["dependabot[bot]"]
Expand Down
8 changes: 4 additions & 4 deletions config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ owner = "frappe"

[llm]
# LLM provider API key (required)
# For plain OpenAI models, use an OpenAI key. For other providers, use that provider's key.
# Use the API key for the provider referenced by `model`.
# Legacy [openai] is still accepted for backward compatibility.
api_key = ""

# Model to use. Plain names default to OpenAI.
# For other providers, use "provider:model" (for example: "anthropic:claude-sonnet-4-5")
model = "gpt-4.1"
# Model to use. Prefer fully qualified "provider:model" values.
# Unqualified names still default to OpenAI for backward compatibility.
model = "openai:gpt-4.1"

# Maximum patch size before fallback to commit message (default: 10000)
max_patch_size = 10000
Expand Down
8 changes: 4 additions & 4 deletions docs/adr/003-toml-configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ EXCLUDE_PR_TYPES=chore,refactor,ci
token = "ghp_xxxxx"
owner = "frappe"

[openai]
[llm]
api_key = "sk-xxxxx"
model = "gpt-4.1"
model = "openai:gpt-4.1"

[filters]
exclude_change_types = ["chore", "refactor", "ci"]
Expand Down Expand Up @@ -100,9 +100,9 @@ force_use_commits = false # Force using commits over PRs
token = "" # Required
owner = "" # Optional default owner

[openai] # OpenAI API settings
[llm] # LLM API settings
api_key = "" # Required
model = "" # Optional, default: "gpt-4.1"
model = "" # Optional, default: "openai:gpt-4.1"
max_patch_size # Optional, default: 10000

[database] # Cache configuration
Expand Down
4 changes: 2 additions & 2 deletions examples/library_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def advanced_usage():
client = (
ReleaseNotesBuilder()
.with_github_token("ghp_xxxxx") # Replace with your token
.with_llm("sk-xxxxx", model="gpt-4", max_patch_size=15000)
.with_llm("sk-xxxxx", model="openai:gpt-4.1", max_patch_size=15000)
.with_database("sqlite", enabled=True)
.with_filters(
exclude_types={"chore", "refactor", "ci", "style", "test"},
Expand Down Expand Up @@ -76,7 +76,7 @@ def direct_config_usage():

config = ReleaseNotesConfig(
github=GitHubConfig(token="ghp_xxxxx"), # Replace with your token
llm=LLMConfig(api_key="sk-xxxxx", model="gpt-4.1"), # Replace with your key
llm=LLMConfig(api_key="sk-xxxxx", model="openai:gpt-4.1"), # Replace with your key
database=DatabaseConfig(type="sqlite", enabled=True),
filters=FilterConfig(
exclude_change_types={"chore", "refactor"},
Expand Down
9 changes: 6 additions & 3 deletions pretty_release_notes/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
)
from .core.interfaces import NullProgressReporter, ProgressReporter
from .generator import ReleaseNotesGenerator
from .openai_client import DEFAULT_MODEL


class ReleaseNotesClient:
Expand Down Expand Up @@ -73,7 +74,7 @@ class ReleaseNotesBuilder:
def __init__(self):
self._github_token = None
self._llm_key = None
self._llm_model = "gpt-4.1"
self._llm_model = DEFAULT_MODEL
self._max_patch_size = 10000
self._db_type = "sqlite"
self._db_name = "stored_lines"
Expand All @@ -93,14 +94,16 @@ def with_github_token(self, token: str) -> "ReleaseNotesBuilder":
self._github_token = token
return self

def with_llm(self, api_key: str, model: str = "gpt-4.1", max_patch_size: int = 10000) -> "ReleaseNotesBuilder":
def with_llm(self, api_key: str, model: str = DEFAULT_MODEL, max_patch_size: int = 10000) -> "ReleaseNotesBuilder":
"""Set LLM configuration."""
self._llm_key = api_key
self._llm_model = model
self._max_patch_size = max_patch_size
return self

def with_openai(self, api_key: str, model: str = "gpt-4.1", max_patch_size: int = 10000) -> "ReleaseNotesBuilder":
def with_openai(
self, api_key: str, model: str = DEFAULT_MODEL, max_patch_size: int = 10000
) -> "ReleaseNotesBuilder":
"""Backward-compatible alias for with_llm()."""
return self.with_llm(api_key=api_key, model=model, max_patch_size=max_patch_size)

Expand Down
4 changes: 3 additions & 1 deletion pretty_release_notes/core/config.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from dataclasses import dataclass, field
from pathlib import Path

from ..openai_client import DEFAULT_MODEL


@dataclass
class GitHubConfig:
Expand All @@ -15,7 +17,7 @@ def __post_init__(self):
@dataclass
class LLMConfig:
api_key: str
model: str = "gpt-4.1"
model: str = DEFAULT_MODEL
max_patch_size: int = 10000

def __post_init__(self):
Expand Down
7 changes: 4 additions & 3 deletions pretty_release_notes/core/config_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from dotenv import dotenv_values

from ..openai_client import DEFAULT_MODEL
from .config import (
DatabaseConfig,
FilterConfig,
Expand Down Expand Up @@ -41,7 +42,7 @@ def load(self) -> ReleaseNotesConfig:
),
llm=LLMConfig(
api_key=llm_api_key,
model=self.config_dict.get("llm_model", self.config_dict.get("openai_model", "gpt-4.1")),
model=self.config_dict.get("llm_model", self.config_dict.get("openai_model", DEFAULT_MODEL)),
max_patch_size=self.config_dict.get("max_patch_size", 10000),
),
database=DatabaseConfig(
Expand Down Expand Up @@ -97,7 +98,7 @@ def load(self) -> ReleaseNotesConfig:
github=GitHubConfig(token=github_token, owner=config.get("DEFAULT_OWNER")),
llm=LLMConfig(
api_key=llm_key,
model=config.get("LLM_MODEL") or config.get("OPENAI_MODEL") or "gpt-4.1",
model=config.get("LLM_MODEL") or config.get("OPENAI_MODEL") or DEFAULT_MODEL,
max_patch_size=int(config.get("MAX_PATCH_SIZE") or "10000"),
),
database=DatabaseConfig(
Expand Down Expand Up @@ -180,7 +181,7 @@ def load(self) -> ReleaseNotesConfig:
),
llm=LLMConfig(
api_key=llm_key,
model=llm_config.get("model", "gpt-4.1"),
model=llm_config.get("model", DEFAULT_MODEL),
max_patch_size=llm_config.get("max_patch_size", 10000),
),
database=DatabaseConfig(
Expand Down
1 change: 1 addition & 0 deletions pretty_release_notes/openai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
)

DEFAULT_PROVIDER = "openai"
DEFAULT_MODEL = "openai:gpt-4.1"
OPENAI_MODELS_WITH_FLEX = {
"o3",
"o4-mini",
Expand Down
9 changes: 6 additions & 3 deletions pretty_release_notes/setup_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from rich.console import Console
from rich.prompt import Confirm, Prompt

from .openai_client import DEFAULT_MODEL

console = Console()


Expand Down Expand Up @@ -80,8 +82,8 @@ def setup_config(
password=True,
)
llm_model = Prompt.ask(
"Model (use provider:model for non-OpenAI providers)",
default=existing_values.get("llm_model") or "gpt-4.1",
"Model (prefer provider:model, e.g. openai:gpt-4.1)",
default=existing_values.get("llm_model") or DEFAULT_MODEL,
)
max_patch_size = Prompt.ask(
"Maximum patch size before fallback",
Expand Down Expand Up @@ -259,7 +261,8 @@ def to_toml_array(s: str) -> str:
[llm]
# Legacy [openai] is still accepted for backward compatibility.
api_key = "{llm_key}"
# Use plain model names for OpenAI or "provider:model" for other providers.
# Prefer "provider:model" syntax (for example: "openai:gpt-4.1" or "anthropic:claude-sonnet-4-5").
# Unqualified names are still treated as OpenAI for backward compatibility.
model = "{llm_model}"
max_patch_size = {max_patch_size}

Expand Down
3 changes: 2 additions & 1 deletion pretty_release_notes/web/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from ..api import ReleaseNotesBuilder
from ..core.interfaces import ProgressEvent, ProgressReporter
from ..openai_client import DEFAULT_MODEL

app = FastAPI(title="Pretty Release Notes API", version="1.0.0")

Expand All @@ -25,7 +26,7 @@ class GenerateRequest(BaseModel):
tag: str
github_token: str
llm_key: str = Field(validation_alias=AliasChoices("llm_key", "openai_key"))
llm_model: str = Field(default="gpt-4.1", validation_alias=AliasChoices("llm_model", "openai_model"))
llm_model: str = Field(default=DEFAULT_MODEL, validation_alias=AliasChoices("llm_model", "openai_model"))
exclude_types: list[str] = []
exclude_labels: list[str] = []
exclude_authors: list[str] = []
Expand Down
3 changes: 2 additions & 1 deletion tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
ReleaseNotesConfig,
)
from pretty_release_notes.core.interfaces import ProgressEvent, ProgressReporter
from pretty_release_notes.openai_client import DEFAULT_MODEL


class TestProgressReporting:
Expand Down Expand Up @@ -222,7 +223,7 @@ def test_llm_defaults(self):
client = ReleaseNotesBuilder().with_github_token("test_token").with_llm("test_key").build()

# Check defaults
assert client.config.llm.model == "gpt-4.1"
assert client.config.llm.model == DEFAULT_MODEL
assert client.config.llm.max_patch_size == 10000


Expand Down
3 changes: 2 additions & 1 deletion tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
ProgressEvent,
ProgressReporter,
)
from pretty_release_notes.openai_client import DEFAULT_MODEL


class TestProgressEvent:
Expand Down Expand Up @@ -95,7 +96,7 @@ class TestLLMConfig:
def test_valid_config_with_defaults(self):
config = LLMConfig(api_key="test_key")
assert config.api_key == "test_key"
assert config.model == "gpt-4.1"
assert config.model == DEFAULT_MODEL
assert config.max_patch_size == 10000

def test_valid_config_with_custom_values(self):
Expand Down
24 changes: 22 additions & 2 deletions tests/test_openai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from types import SimpleNamespace
from unittest.mock import patch

from pretty_release_notes.openai_client import format_model_name, get_chat_response
from pretty_release_notes.openai_client import DEFAULT_MODEL, format_model_name, get_chat_response


def _mock_completion_response(content: str):
Expand All @@ -16,7 +16,26 @@ class TestOpenAIClient:
"""Test the compatibility wrapper around any-llm."""

@patch("pretty_release_notes.openai_client.completion")
def test_plain_model_defaults_to_openai_provider(self, mock_completion):
def test_default_model_is_passed_through_to_any_llm(self, mock_completion):
mock_completion.return_value = _mock_completion_response("summary")

result = get_chat_response(
content="Write a summary",
model=DEFAULT_MODEL,
api_key="test-key",
)

assert result == "summary"
mock_completion.assert_called_once_with(
messages=[{"role": "user", "content": "Write a summary"}],
model=DEFAULT_MODEL,
api_key="test-key",
client_args={"timeout": 900.0},
service_tier="auto",
)

@patch("pretty_release_notes.openai_client.completion")
def test_plain_model_defaults_to_openai_provider_for_backward_compatibility(self, mock_completion):
mock_completion.return_value = _mock_completion_response("summary")

result = get_chat_response(
Expand Down Expand Up @@ -72,6 +91,7 @@ def test_provider_prefixed_model_is_passed_through_to_any_llm(self, mock_complet
)

def test_format_model_name_supports_provider_prefixed_models(self):
assert format_model_name("openai:gpt-4.1") == "OpenAI gpt-4.1"
assert format_model_name("gpt-4.1") == "OpenAI gpt-4.1"
assert format_model_name("openrouter:deepseek-r1") == "openrouter:deepseek-r1"

Expand Down