diff --git a/.env.example b/.env.example
index 341d28b..61e91d6 100644
--- a/.env.example
+++ b/.env.example
@@ -35,9 +35,17 @@ ABOGEN_GID=1000
# Optional: Seed the web UI with working defaults for the LLM-powered
# text normalization features. Leave these blank to configure everything
# from the Settings page.
+
+# --- Ollama (local) ---
ABOGEN_LLM_BASE_URL=http://localhost:11434 # Supply the server root; /v1 is added automatically.
ABOGEN_LLM_API_KEY=ollama
ABOGEN_LLM_MODEL=llama3.1:8b
+
+# --- MiniMax Cloud ---
+# ABOGEN_LLM_BASE_URL=https://api.minimax.io/v1
+# ABOGEN_LLM_API_KEY=your-minimax-api-key
+# ABOGEN_LLM_MODEL=MiniMax-M2.7
+
ABOGEN_LLM_TIMEOUT=45
ABOGEN_LLM_CONTEXT_MODE=sentence
# For custom prompts, keep the text on a single line or escape newlines.
diff --git a/README.md b/README.md
index ddb3f79..1b3bb88 100644
--- a/README.md
+++ b/README.md
@@ -385,11 +385,22 @@ docker run --rm \
## `LLM-assisted text normalization`
Abogen can hand tricky apostrophes and contractions to an OpenAI-compatible large language model. Configure it from **Settings → LLM**:
-1. Enter the base URL for your endpoint (Ollama, OpenAI proxy, etc.) and an API key if required. Use the server root (for Ollama: `http://localhost:11434`)—Abogen appends `/v1/...` automatically, but it also accepts inputs that already end in `/v1`.
-2. Click **Refresh models** to load the catalog, pick a default model, and adjust the timeout or prompt template.
+1. Pick a **Provider** from the dropdown (MiniMax, OpenAI, DeepSeek, Ollama) to auto-fill the endpoint and available models, or choose *Custom endpoint* to enter any OpenAI-compatible URL manually.
+2. Enter an API key if required, then click **Refresh models** to load the catalog. Pick a default model and adjust the timeout or prompt template.
3. Use the preview box to test the prompt, then save the settings. The Normalization panel can synthesize a short audio preview with the current configuration.
-When you are running inside Docker or a CI pipeline, seed the form automatically with `ABOGEN_LLM_*` variables in your `.env` file. The `.env.example` file includes sample values for a local Ollama server.
+### Supported providers
+
+| Provider | Base URL | Models |
+|----------|----------|--------|
+| **MiniMax** | `https://api.minimax.io/v1` | MiniMax-M2.7, MiniMax-M2.5-highspeed, … |
+| **OpenAI** | `https://api.openai.com/v1` | gpt-4o, gpt-4o-mini, … |
+| **DeepSeek** | `https://api.deepseek.com/v1` | deepseek-chat, deepseek-reasoner |
+| **Ollama** | `http://localhost:11434/v1` | *(local models)* |
+
+Any service that exposes `/v1/chat/completions` (e.g. LM Studio, vLLM, text-generation-webui) also works via *Custom endpoint*.
+
+When you are running inside Docker or a CI pipeline, seed the form automatically with `ABOGEN_LLM_*` variables in your `.env` file. The `.env.example` file includes sample values for a local Ollama server and MiniMax Cloud.
## `Audiobookshelf integration`
Abogen can push finished audiobooks directly into Audiobookshelf. Configure this under **Settings → Integrations → Audiobookshelf** by providing:
diff --git a/abogen/llm_providers.py b/abogen/llm_providers.py
new file mode 100644
index 0000000..d590778
--- /dev/null
+++ b/abogen/llm_providers.py
@@ -0,0 +1,99 @@
+"""Built-in LLM provider presets for quick configuration.
+
+Each preset bundles the endpoint URL, a list of known models, and the
+environment variable that typically holds the API key. The Web UI
+uses these presets so users can pick a provider from a dropdown instead
+of typing the URL manually.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Dict, List, Sequence, Tuple
+
+
+@dataclass(frozen=True)
+class LLMProviderPreset:
+ """A preconfigured cloud or local LLM endpoint."""
+
+ id: str
+ name: str
+ base_url: str
+ api_key_env: str = ""
+ api_key_hint: str = ""
+ models: Tuple[str, ...] = ()
+
+ def to_dict(self) -> Dict[str, object]:
+ return {
+ "id": self.id,
+ "name": self.name,
+ "base_url": self.base_url,
+ "api_key_env": self.api_key_env,
+ "api_key_hint": self.api_key_hint,
+ "models": list(self.models),
+ }
+
+
+_BUILTIN_PRESETS: Tuple[LLMProviderPreset, ...] = (
+ LLMProviderPreset(
+ id="minimax",
+ name="MiniMax",
+ base_url="https://api.minimax.io/v1",
+ api_key_env="MINIMAX_API_KEY",
+ api_key_hint="Get your key at https://platform.minimax.io",
+ models=(
+ "MiniMax-M1",
+ "MiniMax-Text-01",
+ "MiniMax-M2.5",
+ "MiniMax-M2.5-highspeed",
+ "MiniMax-M2.7",
+ "MiniMax-M2.7-highspeed",
+ ),
+ ),
+ LLMProviderPreset(
+ id="openai",
+ name="OpenAI",
+ base_url="https://api.openai.com/v1",
+ api_key_env="OPENAI_API_KEY",
+ api_key_hint="Get your key at https://platform.openai.com/api-keys",
+ models=(
+ "gpt-4o",
+ "gpt-4o-mini",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ ),
+ ),
+ LLMProviderPreset(
+ id="deepseek",
+ name="DeepSeek",
+ base_url="https://api.deepseek.com/v1",
+ api_key_env="DEEPSEEK_API_KEY",
+ api_key_hint="Get your key at https://platform.deepseek.com",
+ models=(
+ "deepseek-chat",
+ "deepseek-reasoner",
+ ),
+ ),
+ LLMProviderPreset(
+ id="ollama",
+ name="Ollama (local)",
+ base_url="http://localhost:11434/v1",
+ api_key_env="",
+ api_key_hint='Use "ollama" or leave blank',
+ models=(),
+ ),
+)
+
+
+def get_provider_presets() -> Sequence[LLMProviderPreset]:
+ """Return all built-in provider presets."""
+ return _BUILTIN_PRESETS
+
+
+def get_provider_by_id(provider_id: str) -> LLMProviderPreset | None:
+ """Look up a single preset by its identifier."""
+ for preset in _BUILTIN_PRESETS:
+ if preset.id == provider_id:
+ return preset
+ return None
diff --git a/abogen/normalization_settings.py b/abogen/normalization_settings.py
index 4242265..10abfc3 100644
--- a/abogen/normalization_settings.py
+++ b/abogen/normalization_settings.py
@@ -27,6 +27,7 @@
)
_SETTINGS_DEFAULTS: Dict[str, Any] = {
+ "llm_provider": "",
"llm_base_url": "",
"llm_api_key": "",
"llm_model": "",
diff --git a/abogen/webui/routes/settings.py b/abogen/webui/routes/settings.py
index d0bb991..bf92d13 100644
--- a/abogen/webui/routes/settings.py
+++ b/abogen/webui/routes/settings.py
@@ -23,6 +23,7 @@
from abogen.webui.debug_tts_runner import run_debug_tts_wavs
from abogen.debug_tts_samples import DEBUG_TTS_SAMPLES
from abogen.utils import get_user_output_path, load_config
+from abogen.llm_providers import get_provider_presets
settings_bp = Blueprint("settings", __name__)
@@ -216,6 +217,7 @@ def settings_page() -> str | ResponseReturnValue:
save_locations=save_locations,
default_output_dir=default_output_dir,
llm_ready=llm_ready(load_settings()),
+ llm_provider_presets=[p.to_dict() for p in get_provider_presets()],
debug_samples=DEBUG_TTS_SAMPLES,
debug_manifest=debug_manifest,
)
diff --git a/abogen/webui/routes/utils/settings.py b/abogen/webui/routes/utils/settings.py
index c96a66c..4eb5067 100644
--- a/abogen/webui/routes/utils/settings.py
+++ b/abogen/webui/routes/utils/settings.py
@@ -196,6 +196,7 @@ def settings_defaults() -> Dict[str, Any]:
"speaker_analysis_threshold": _DEFAULT_ANALYSIS_THRESHOLD,
"speaker_pronunciation_sentence": "This is {{name}} speaking.",
"speaker_random_languages": [],
+ "llm_provider": "",
"llm_base_url": llm_env_defaults.get("llm_base_url", ""),
"llm_api_key": llm_env_defaults.get("llm_api_key", ""),
"llm_model": llm_env_defaults.get("llm_model", ""),
@@ -344,7 +345,7 @@ def normalize_setting_value(key: str, value: Any, defaults: Dict[str, Any]) -> A
if key == "llm_prompt":
candidate = str(value or "").strip()
return candidate if candidate else defaults[key]
- if key in {"llm_base_url", "llm_api_key", "llm_model"}:
+ if key in {"llm_provider", "llm_base_url", "llm_api_key", "llm_model"}:
return str(value or "").strip()
if key == "speaker_random_languages":
if isinstance(value, (list, tuple, set)):
diff --git a/abogen/webui/static/settings.js b/abogen/webui/static/settings.js
index 4a3e187..b752d72 100644
--- a/abogen/webui/static/settings.js
+++ b/abogen/webui/static/settings.js
@@ -367,6 +367,7 @@ function collectLLMFields() {
const prompt = form.querySelector('#llm_prompt');
const timeout = form.querySelector('#llm_timeout');
const context = form.querySelector('input[name="llm_context_mode"]:checked');
+ const provider = form.querySelector('#llm_provider');
return {
base_url: baseUrl ? baseUrl.value.trim() : '',
api_key: apiKey ? apiKey.value.trim() : '',
@@ -374,9 +375,59 @@ function collectLLMFields() {
prompt: prompt ? prompt.value : '',
context_mode: context ? context.value : 'sentence',
timeout: timeout ? parseNumber(timeout.value, 30) : 30,
+ provider: provider ? provider.value : '',
};
}
+function getProviderPresets() {
+ const select = form.querySelector('#llm_provider');
+ if (!select || !select.dataset.presets) {
+ return [];
+ }
+ try {
+ return JSON.parse(select.dataset.presets);
+ } catch (_) {
+ return [];
+ }
+}
+
+function applyProviderPreset(providerId) {
+ const presets = getProviderPresets();
+ const preset = presets.find((p) => p.id === providerId);
+ const baseUrlInput = form.querySelector('#llm_base_url');
+ const apiKeyInput = form.querySelector('#llm_api_key');
+ const apiKeyHint = document.querySelector('#llm_api_key_hint');
+
+ if (!preset) {
+ if (apiKeyHint) {
+ apiKeyHint.innerHTML = 'Leave blank or use ollama for local servers that do not require keys.';
+ }
+ return;
+ }
+
+ if (baseUrlInput) {
+ baseUrlInput.value = preset.base_url;
+ }
+ if (apiKeyHint && preset.api_key_hint) {
+ apiKeyHint.textContent = preset.api_key_hint;
+ }
+ if (preset.models && preset.models.length) {
+ const models = preset.models.map((id) => ({ id, label: id }));
+ updateModelOptions(models);
+ }
+ updateLLMNavState();
+}
+
+function initProviderDropdown() {
+ const providerSelect = form.querySelector('#llm_provider');
+ if (!providerSelect) {
+ return;
+ }
+ providerSelect.addEventListener('change', () => {
+ applyProviderPreset(providerSelect.value);
+ });
+}
+
function updateModelOptions(models) {
const select = form.querySelector('#llm_model');
if (!select) {
@@ -879,4 +930,5 @@ if (form) {
initFolderPicker();
initContractionModal();
initLLMStateWatchers();
+ initProviderDropdown();
}
diff --git a/abogen/webui/templates/settings.html b/abogen/webui/templates/settings.html
index 7981834..6538efe 100644
--- a/abogen/webui/templates/settings.html
+++ b/abogen/webui/templates/settings.html
@@ -276,6 +276,16 @@