Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ ACESTEP_INIT_LLM=auto
# API key for authentication (optional)
# ACESTEP_API_KEY=sk-your-secret-key

# Forge external LM settings (optional)
# FORGE_API_KEY=your-forge-api-key
# FORGE_API_BASE=https://api.forge.tensorblock.co/v1
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Description: Search for FORGE_API_BASE usage in the codebase
# Expected: Find code that reads FORGE_API_BASE env var, or confirm it's unused

echo "=== Searching for FORGE_API_BASE usage ==="
rg -n "FORGE_API_BASE" --type py

echo ""
echo "=== Searching for os.getenv/environ patterns with FORGE ==="
rg -n "getenv.*FORGE|environ.*FORGE" --type py

Repository: ace-step/ACE-Step-1.5

Length of output: 168


Remove FORGE_API_BASE from .env.example or implement support for it.

FORGE_API_BASE is documented in this file but is not consumed by the application. The code defaults to profile.default_base_url instead of checking this environment variable, making this documentation misleading to users.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In @.env.example at line 63, The .env.example entry FORGE_API_BASE is misleading
because the app uses profile.default_base_url; either remove the FORGE_API_BASE
line from .env.example, or implement support by reading
process.env.FORGE_API_BASE where the base URL is resolved (replace or override
profile.default_base_url). Update the code path that computes the base URL (the
place referencing profile.default_base_url) to prefer process.env.FORGE_API_BASE
when set, and adjust .env.example accordingly if you choose to implement
support.


# ==================== Gradio UI Settings ====================
# Server port (default: 7860)
# PORT=7860
Expand All @@ -75,4 +79,4 @@ ACESTEP_INIT_LLM=auto
# ==================== Startup Settings ====================
# By default models are lazy-loaded on first request (fast server startup).
# Set to false to force eager model loading at startup.
# ACESTEP_NO_INIT=true
# ACESTEP_NO_INIT=true
2 changes: 1 addition & 1 deletion acestep/text_tasks/external_ai_request_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def build_request_for_protocol(
"max_tokens": max_tokens or int(os.getenv("ACESTEP_OPENAI_MAX_TOKENS", "3072")),
"temperature": 0.4,
}
if require_json_output and provider in {"openai", "zai"}:
if require_json_output and provider in {"openai", "forge", "zai"}:
payload["response_format"] = {"type": "json_object"}
payload["stop"] = ["```"]
if disable_thinking and provider == "zai":
Expand Down
18 changes: 18 additions & 0 deletions acestep/text_tasks/external_ai_request_helpers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,24 @@ def test_build_request_for_protocol_requests_json_output_for_openai_format(self)
self.assertEqual(payload["stop"], ["```"])
self.assertNotIn("thinking", payload)

def test_build_request_for_protocol_requests_json_output_for_forge_format(self) -> None:
"""Forge format-mode requests should use OpenAI-compatible JSON output flags."""

payload, _headers = build_request_for_protocol(
protocol="openai_chat",
provider="forge",
api_key="test-key",
model="OpenAI/gpt-4o-mini",
messages=[{"role": "system", "content": "s"}, {"role": "user", "content": "u"}],
base_url="https://api.forge.tensorblock.co/v1/chat/completions",
max_tokens=768,
require_json_output=True,
)

self.assertEqual(payload["response_format"], {"type": "json_object"})
self.assertEqual(payload["stop"], ["```"])
self.assertNotIn("thinking", payload)

def test_build_request_for_protocol_disables_zai_thinking_and_requests_json(self) -> None:
"""Z.ai format calls should disable thinking and request JSON output."""

Expand Down
16 changes: 15 additions & 1 deletion acestep/text_tasks/external_lm_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,20 @@ class ExternalProviderProfile:
("OpenAI chat completions", "https://api.openai.com/v1/chat/completions"),
),
),
"forge": ExternalProviderProfile(
provider_id="forge",
label="Forge",
protocol="openai_chat",
default_model="OpenAI/gpt-4o-mini",
default_base_url="https://api.forge.tensorblock.co/v1/chat/completions",
api_key_env="FORGE_API_KEY",
api_key_required=True,
secret_path_env="ACESTEP_FORGE_SECRET_PATH",
secret_file_name="forge_api_key.enc",
base_url_presets=(
("Forge chat completions", "https://api.forge.tensorblock.co/v1/chat/completions"),
),
),
"claude": ExternalProviderProfile(
provider_id="claude",
label="Anthropic Claude",
Expand Down Expand Up @@ -101,7 +115,7 @@ def get_external_provider_profile(provider: str | None) -> ExternalProviderProfi
def get_external_provider_choices() -> list[tuple[str, str]]:
"""Return provider dropdown choices as ``(label, value)`` pairs."""

order = ("zai", "openai", "claude", "ollama")
order = ("zai", "openai", "forge", "claude", "ollama")
return [
(_EXTERNAL_PROVIDER_PROFILES[provider_id].label, provider_id)
for provider_id in order
Expand Down
29 changes: 29 additions & 0 deletions acestep/text_tasks/external_lm_providers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,50 @@

from acestep.text_tasks.external_lm_providers import (
CUSTOM_BASE_URL_PRESET,
build_external_model_choice,
get_external_base_url_preset_choices,
get_external_base_url_preset_value,
get_external_provider_choices,
get_external_provider_profile,
)


class ExternalLmProvidersTests(unittest.TestCase):
"""Verify provider lookup and base-URL preset helpers stay explicit."""

def test_get_external_provider_profile_returns_forge_defaults(self) -> None:
"""Forge should use its dedicated OpenAI-compatible defaults."""

profile = get_external_provider_profile("forge")

self.assertEqual(profile.protocol, "openai_chat")
self.assertEqual(profile.default_model, "OpenAI/gpt-4o-mini")
self.assertEqual(
profile.default_base_url,
"https://api.forge.tensorblock.co/v1/chat/completions",
)
self.assertEqual(profile.api_key_env, "FORGE_API_KEY")

def test_get_external_provider_profile_rejects_unknown_provider(self) -> None:
"""Unknown providers should fail fast instead of silently defaulting."""

with self.assertRaises(ValueError):
get_external_provider_profile("mystery")

def test_get_external_provider_choices_includes_forge(self) -> None:
"""Provider choices should expose Forge to the external LM picker."""

choices = get_external_provider_choices()

self.assertIn(("Forge", "forge"), choices)

def test_build_external_model_choice_defaults_forge_model(self) -> None:
"""Missing Forge model names should fall back to the provider default."""

choice = build_external_model_choice("forge", "")

self.assertEqual(choice, "external:forge:OpenAI/gpt-4o-mini")

def test_base_url_preset_helpers_use_shared_custom_token(self) -> None:
"""Custom base-URL selection should use the centralized custom token."""

Expand Down