Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion backend/agent_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from agents._config import set_default_openai_api
from agents.tracing import set_tracing_disabled
# from .utils.logger import log
from .utils.logger import log

# def load_env_config():
# """Load environment variables from .env.llm file"""
Expand All @@ -37,6 +37,8 @@
def create_agent(**kwargs) -> Agent:
# 通过用户配置拿/环境变量
config = kwargs.pop("config") if "config" in kwargs else {}
log.info(f"Agent Factory - Received config: {config}")

# 避免将 None 写入 headers
session_id = (config or {}).get("session_id")
default_headers = {}
Expand All @@ -53,11 +55,17 @@ def create_agent(**kwargs) -> Agent:
if config.get("openai_api_key") and config.get("openai_api_key") != "":
api_key = config.get("openai_api_key")

log.info(f"Agent Factory - Using base_url: {base_url}")
log.info(f"Agent Factory - Using api_key: {'***' if api_key else 'None'}")

# Check if this is LMStudio and adjust API key handling
is_lmstudio = is_lmstudio_url(base_url)
log.info(f"Agent Factory - Is LMStudio: {is_lmstudio}")

if is_lmstudio and not api_key:
# LMStudio typically doesn't require an API key, use a placeholder
api_key = "lmstudio-local"
log.info("Agent Factory - Set LMStudio placeholder API key")

client = AsyncOpenAI(
api_key=api_key,
Expand All @@ -67,6 +75,7 @@ def create_agent(**kwargs) -> Agent:

default_model_name = os.environ.get("OPENAI_MODEL", "gemini-2.5-flash")
model_name = kwargs.pop("model") or default_model_name
log.info(f"Agent Factory - Using model: {model_name}")
model = OpenAIChatCompletionsModel(model_name, openai_client=client)

if config.get("max_tokens"):
Expand Down
47 changes: 45 additions & 2 deletions backend/controller/conversation_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,35 @@ async def invoke_chat(request):
set_language(language)

# 构建配置信息
openai_api_key = request.headers.get('Openai-Api-Key')
openai_base_url = request.headers.get('Openai-Base-Url')

# If base URL header missing, fall back to last successful one (e.g., LMStudio) or default LMStudio
if not openai_base_url:
from ..utils.globals import get_last_openai_base_url, LMSTUDIO_DEFAULT_BASE_URL
remembered = get_last_openai_base_url()
if remembered:
openai_base_url = remembered
log.info(f"Using remembered base URL for chat: {openai_base_url}")
else:
openai_base_url = LMSTUDIO_DEFAULT_BASE_URL
log.info(f"No base URL provided; defaulting to LMStudio: {openai_base_url}")

# If neither API key nor base URL is provided, assume local LMStudio by default
# This enables out-of-the-box local usage without requiring a cloud key
if not openai_api_key and not openai_base_url:
from ..utils.globals import LMSTUDIO_DEFAULT_BASE_URL
openai_base_url = LMSTUDIO_DEFAULT_BASE_URL
log.info(f"No OpenAI credentials provided; defaulting base URL to LMStudio: {openai_base_url}")

log.info(f"Request headers - Openai-Api-Key: {'***' if openai_api_key else 'None'}")
log.info(f"Request headers - Openai-Base-Url: {openai_base_url}")

config = {
"session_id": session_id,
"workflow_checkpoint_id": workflow_checkpoint_id,
"openai_api_key": request.headers.get('Openai-Api-Key'),
"openai_base_url": request.headers.get('Openai-Base-Url'),
"openai_api_key": openai_api_key,
"openai_base_url": openai_base_url,
"model_select": next((x['data'][0] for x in ext if x['type'] == 'model_select' and x.get('data')), None)
}

Expand Down Expand Up @@ -165,6 +189,25 @@ async def invoke_chat(request):
# 不再需要创建用户消息存储到后端,前端负责消息存储

try:
# Validate API key presence unless using LMStudio/local base URL
from ..utils.globals import is_lmstudio_url
if not openai_api_key and not (openai_base_url and is_lmstudio_url(openai_base_url)):
warning_msg = (
"No OpenAI API key provided. Please click the gear icon (⚙️) to configure your key, "
"or set a local LMStudio base URL in settings."
)
log.error(warning_msg)
chat_response = ChatResponse(
session_id=session_id,
text=warning_msg,
finished=True,
type="message",
format="text",
ext=None
)
await response.write(json.dumps(chat_response).encode() + b"\n")
await response.write_eof()
return response
# Call the MCP client to get streaming response with historical messages and image support
# Pass OpenAI-formatted messages and processed images to comfyui_agent_invoke
accumulated_text = ""
Expand Down
7 changes: 6 additions & 1 deletion backend/controller/llm_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import json
from typing import List, Dict, Any
from aiohttp import web
from ..utils.globals import LLM_DEFAULT_BASE_URL, LMSTUDIO_DEFAULT_BASE_URL, is_lmstudio_url
from ..utils.globals import LLM_DEFAULT_BASE_URL, LMSTUDIO_DEFAULT_BASE_URL, is_lmstudio_url, set_last_openai_base_url
import server
import requests
from ..utils.logger import log
Expand Down Expand Up @@ -57,6 +57,11 @@ async def list_models(request):
"name": model['id'],
"image_enable": True
})
# Remember a working base URL for later chat calls
set_last_openai_base_url(openai_base_url)
else:
# Surface error info to logs to help users diagnose (e.g., 401)
log.error(f"Model list request failed: HTTP {response.status_code} - {response.text}")

return web.json_response({
"models": llm_config
Expand Down
16 changes: 12 additions & 4 deletions backend/service/debug_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,10 @@ async def debug_workflow_errors(workflow_data: Dict[str, Any]):
if not session_id:
session_id = str(uuid.uuid4()) # Fallback if no context

# Use user's selected model if available, otherwise fall back to WORKFLOW_MODEL_NAME
selected_model = config.get('model_select') if config else None
model_to_use = selected_model if selected_model else WORKFLOW_MODEL_NAME

# 1. 保存工作流数据到数据库
log.info(f"Saving workflow data for session {session_id}")
save_result = save_workflow_data(
Expand Down Expand Up @@ -252,16 +256,20 @@ async def debug_workflow_errors(workflow_data: Dict[str, Any]):
**Note**: The workflow validation is done using ComfyUI's internal functions, not actual execution, so it's fast and safe.

Start by validating the workflow to see its current state.""",
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
tools=[run_workflow, analyze_error_type, save_current_workflow],
config={
"max_tokens": 8192
}
)

# Use user's selected model if available, otherwise fall back to WORKFLOW_MODEL_NAME
selected_model = config.get('model_select') if config else None
model_to_use = selected_model if selected_model else WORKFLOW_MODEL_NAME

workflow_bugfix_default_agent = create_agent(
name="Workflow Bugfix Default Agent",
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
handoff_description="""
I am the Workflow Bugfix Default Agent. I specialize in fixing structural issues in ComfyUI workflows.

Expand Down Expand Up @@ -310,7 +318,7 @@ async def debug_workflow_errors(workflow_data: Dict[str, Any]):

link_agent = create_agent(
name="Link Agent",
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
handoff_description="""
I am the Link Agent. I specialize in analyzing and fixing workflow connection issues.

Expand Down Expand Up @@ -401,7 +409,7 @@ async def debug_workflow_errors(workflow_data: Dict[str, Any]):

parameter_agent = create_agent(
name="Parameter Agent",
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
handoff_description="""
I am the Parameter Agent. I specialize in handling parameter-related errors in ComfyUI workflows.

Expand Down
7 changes: 7 additions & 0 deletions backend/service/mcp_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ async def on_handoff(ctx: RunContextWrapper[None], input_data: HandoffRewriteDat
# Enhanced retry mechanism for OpenAI streaming errors
max_retries = 3
retry_count = 0
last_non_retryable_error_message: Optional[str] = None

async def process_stream_events(stream_result):
"""Process stream events with enhanced error handling"""
Expand Down Expand Up @@ -335,6 +336,8 @@ async def process_stream_events(stream_result):
else:
log.error(f"Non-retryable streaming error or max retries reached: {error_msg}")
log.error(f"Traceback: {traceback.format_exc()}")
# Capture the error to surface it to the caller if no content was streamed
last_non_retryable_error_message = error_msg
if isinstance(stream_error, RateLimitError):
default_error_msg = 'Rate limit exceeded, please try again later.'
error_body = stream_error.body
Expand All @@ -359,6 +362,10 @@ async def process_stream_events(stream_result):
await asyncio.sleep(1)
continue

# If we encountered a non-retryable error and produced no text, surface the error message
if last_non_retryable_error_message and not current_text:
current_text = last_non_retryable_error_message

# Add detailed debugging info about tool results
log.info(f"Total tool results: {len(tool_results)}")
for tool_name, result in tool_results.items():
Expand Down
18 changes: 12 additions & 6 deletions backend/service/workflow_rewrite_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from ..agent_factory import create_agent
from ..utils.globals import WORKFLOW_MODEL_NAME, get_language
from ..utils.request_context import get_session_id
from ..utils.request_context import get_session_id, get_config

from ..service.workflow_rewrite_tools import *

Expand All @@ -43,10 +43,19 @@ def create_workflow_rewrite_agent():

language = get_language()
session_id = get_session_id() or "unknown_session"
config = get_config() or {}

# Use user's selected model if available, otherwise fall back to WORKFLOW_MODEL_NAME
selected_model = config.get('model_select') if config else None
model_to_use = selected_model if selected_model else WORKFLOW_MODEL_NAME

# Merge max_tokens into config
agent_config = {**config, "max_tokens": 8192} if config else {"max_tokens": 8192}

return create_agent(
name="Workflow Rewrite Agent",
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
config=agent_config,
handoff_description="""
我是工作流改写代理,专门负责根据用户需求修改和优化当前画布上的ComfyUI工作流。
""",
Expand Down Expand Up @@ -104,10 +113,7 @@ def create_workflow_rewrite_agent():

始终以用户的实际需求为导向,提供专业、准确、高效的工作流改写服务。
""",
tools=[get_rewrite_expert_by_name, get_current_workflow, get_node_info, update_workflow, remove_node],
config={
"max_tokens": 8192
}
tools=[get_rewrite_expert_by_name, get_current_workflow, get_node_info, update_workflow, remove_node]
)

# 注意:工作流改写代理现在需要在有session context的环境中创建
Expand Down
7 changes: 6 additions & 1 deletion backend/service/workflow_rewrite_agent_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,14 @@ def rewrite_workflow_simple(rewrite_context: RewriteContext) -> str:
api_key = get_comfyui_copilot_api_key() or ""
)

# Use user's selected model if available, otherwise fall back to WORKFLOW_MODEL_NAME
config = get_config()
selected_model = config.get('model_select') if config else None
model_to_use = selected_model if selected_model else WORKFLOW_MODEL_NAME

# 调用LLM
completion = client.chat.completions.parse(
model=WORKFLOW_MODEL_NAME,
model=model_to_use,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": context_info}
Expand Down
11 changes: 11 additions & 0 deletions backend/utils/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def __init__(self):
self._lock = threading.RLock()
self._state: Dict[str, Any] = {
'LANGUAGE': 'en', # Default language
'LAST_OPENAI_BASE_URL': None,
}

def get(self, key: str, default: Any = None) -> Any:
Expand Down Expand Up @@ -91,6 +92,16 @@ def set_comfyui_copilot_api_key(api_key: str) -> None:
_global_state.set('comfyui_copilot_api_key', api_key)


def set_last_openai_base_url(base_url: Optional[str]) -> None:
"""Remember the last used OpenAI base URL (e.g., LMStudio), for fallback in chat."""
_global_state.set('LAST_OPENAI_BASE_URL', base_url)


def get_last_openai_base_url() -> Optional[str]:
"""Get the last remembered OpenAI base URL."""
return _global_state.get('LAST_OPENAI_BASE_URL')


BACKEND_BASE_URL = "https://comfyui-copilot-server.onrender.com"
LMSTUDIO_DEFAULT_BASE_URL = "http://localhost:1234/v1"
WORKFLOW_MODEL_NAME = "us.anthropic.claude-sonnet-4-20250514-v1:0"
Expand Down
Loading