diff --git a/IMPLEMENTATION_GUARDRAILS.md b/IMPLEMENTATION_GUARDRAILS.md new file mode 100644 index 000000000..10edba15d --- /dev/null +++ b/IMPLEMENTATION_GUARDRAILS.md @@ -0,0 +1,10 @@ +# Implementation Guardrails + +- When the live prompt surface is active, `agent_share_your_reasoning` must render through the structured `AGENT REASONING` path, not as low-level `Calling ... token(s)` tool progress. +- When the live prompt surface is active, mutable tool progress that upstream prints and clears must render in the prompt-local ephemeral status strip, not as transcript output and not via above-prompt prints. +- When the live prompt surface is active, streamed `TextPart` content may appear only in the prompt-local ephemeral preview; the permanent transcript must still come only from the final `AGENT RESPONSE`. +- When the live prompt surface is active, shell output with carriage-return progress must use the prompt-local ephemeral status strip; ordinary shell lines remain on the durable shell output path. +- Durable structured outputs like `AGENT REASONING` and `DIRECTORY LISTING` should still render above the prompt. +- Prompt-surface stream fixes must not duplicate the final `AGENT RESPONSE`. +- The prompt-local ephemeral status/preview is foreground-only; session-tagged sub-agent messages must never write to it or clear it. +- Terminal/emulator-specific behavior must flow through the shared terminal-capability helper in `terminal_utils` rather than adding new scattered env checks. diff --git a/code_puppy/agents/base_agent.py b/code_puppy/agents/base_agent.py index 8bd383fb0..776e4d70b 100644 --- a/code_puppy/agents/base_agent.py +++ b/code_puppy/agents/base_agent.py @@ -1960,7 +1960,7 @@ async def run_agent_task(): "Try disabling any malfunctioning MCP servers", group_id=group_id ) except* asyncio.exceptions.CancelledError: - emit_info("Cancelled") + # We don't print "Cancelled" here anymore so smooth interjections stay clean. if get_use_dbos(): await DBOS.cancel_workflow_async(group_id) except* InterruptedError as ie: diff --git a/code_puppy/agents/event_stream_handler.py b/code_puppy/agents/event_stream_handler.py index c21aff552..2052619ce 100644 --- a/code_puppy/agents/event_stream_handler.py +++ b/code_puppy/agents/event_stream_handler.py @@ -2,6 +2,7 @@ import asyncio import logging +import sys from collections.abc import AsyncIterable from typing import Any, Optional @@ -85,6 +86,108 @@ def _should_suppress_output() -> bool: return is_subagent() and not get_subagent_verbose() +def _get_active_prompt_runtime() -> Any | None: + """Return the active interactive runtime, if available.""" + try: + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + + return get_active_interactive_runtime() + except Exception: + return None + + +def _has_active_prompt_surface() -> bool: + """Return True when the always-on prompt surface is mounted.""" + runtime = _get_active_prompt_runtime() + return runtime.has_prompt_surface() if runtime is not None else False + + +def _set_prompt_ephemeral_status(text: str | None) -> None: + """Update transient prompt-local status for mutable stream output.""" + runtime = _get_active_prompt_runtime() + if runtime is None: + return + try: + runtime.set_prompt_ephemeral_status(text) + except Exception: + pass + + +def _clear_prompt_ephemeral_status() -> None: + """Clear transient prompt-local status.""" + runtime = _get_active_prompt_runtime() + if runtime is None: + return + try: + runtime.clear_prompt_ephemeral_status() + except Exception: + pass + + +def _set_prompt_ephemeral_preview(text: str | None) -> None: + """Update transient prompt-local preview for live response text.""" + runtime = _get_active_prompt_runtime() + if runtime is None: + return + try: + runtime.set_prompt_ephemeral_preview(text) + except Exception: + pass + + +def _merge_tool_name(current_name: str, tool_name_delta: str) -> str: + """Merge a streamed tool name delta without duplicating already-known names.""" + if not tool_name_delta: + return current_name + if not current_name: + return tool_name_delta + if tool_name_delta.startswith(current_name): + return tool_name_delta + if tool_name_delta in current_name: + return current_name + for overlap in range(min(len(current_name), len(tool_name_delta)), 0, -1): + if current_name.endswith(tool_name_delta[:overlap]): + return current_name + tool_name_delta[overlap:] + return current_name + tool_name_delta + + +def _is_reasoning_tool_name(tool_name: str) -> bool: + """Return True for the reasoning tool, including streamed prefixes.""" + reasoning_tool = "agent_share_your_reasoning" + return bool(tool_name) and ( + reasoning_tool.startswith(tool_name) or tool_name.startswith(reasoning_tool) + ) + + +def _build_prompt_safe_console(source_console: Console) -> Console: + """Create a console that writes to the real terminal above the prompt.""" + return Console( + file=sys.__stdout__, + force_terminal=source_console.is_terminal, + width=source_console.width, + color_system=source_console.color_system, + soft_wrap=source_console.soft_wrap, + legacy_windows=source_console.legacy_windows, + ) + + +async def _print_stream_output( + console: Console, *args: Any, **kwargs: Any +) -> None: + """Render stream output above the prompt when the prompt surface is mounted.""" + runtime = _get_active_prompt_runtime() + if runtime is not None and runtime.has_prompt_surface(): + prompt_safe_console = _build_prompt_safe_console(console) + rendered = await runtime.run_above_prompt_async( + lambda: prompt_safe_console.print(*args, **kwargs) + ) + if rendered: + return + console.print(*args, **kwargs) + + async def event_stream_handler( ctx: RunContext, events: AsyncIterable[Any], @@ -119,6 +222,8 @@ async def event_stream_handler( token_count: dict[int, int] = {} # Track token count per text/tool part tool_names: dict[int, str] = {} # Track tool name per tool part index did_stream_anything = False # Track if we streamed any content + spinner_paused = False + prompt_surface_response_preview = "" # Termflow streaming state for text parts termflow_parsers: dict[int, TermflowParser] = {} @@ -127,16 +232,22 @@ async def event_stream_handler( async def _print_thinking_banner() -> None: """Print the THINKING banner with spinner pause and line clear.""" - nonlocal did_stream_anything - - pause_all_spinners() - await asyncio.sleep(0.1) # Delay to let spinner fully clear - # Clear line and print newline before banner - console.print(" " * 50, end="\r") - console.print() # Newline before banner + nonlocal did_stream_anything, spinner_paused + + prompt_surface_active = _has_active_prompt_surface() + if not spinner_paused: + pause_all_spinners() + spinner_paused = True + await asyncio.sleep(0.02) + if prompt_surface_active: + await _print_stream_output(console) + else: + await _print_stream_output(console, " " * 50, end="\r") + await _print_stream_output(console) # Newline before banner # Bold banner with configurable color and lightning bolt thinking_color = get_banner_color("thinking") - console.print( + await _print_stream_output( + console, Text.from_markup( f"[bold white on {thinking_color}] THINKING [/bold white on {thinking_color}] [dim]\u26a1 " ), @@ -146,15 +257,21 @@ async def _print_thinking_banner() -> None: async def _print_response_banner() -> None: """Print the AGENT RESPONSE banner with spinner pause and line clear.""" - nonlocal did_stream_anything - - pause_all_spinners() - await asyncio.sleep(0.1) # Delay to let spinner fully clear - # Clear line and print newline before banner - console.print(" " * 50, end="\r") - console.print() # Newline before banner + nonlocal did_stream_anything, spinner_paused + + prompt_surface_active = _has_active_prompt_surface() + if not spinner_paused: + pause_all_spinners() + spinner_paused = True + await asyncio.sleep(0.02) + if prompt_surface_active: + await _print_stream_output(console) + else: + await _print_stream_output(console, " " * 50, end="\r") + await _print_stream_output(console) # Newline before banner response_color = get_banner_color("agent_response") - console.print( + await _print_stream_output( + console, Text.from_markup( f"[bold white on {response_color}] AGENT RESPONSE [/bold white on {response_color}]" ) @@ -182,32 +299,33 @@ async def _print_response_banner() -> None: if part.content and part.content.strip(): await _print_thinking_banner() escaped = escape(part.content) - console.print(f"[dim]{escaped}[/dim]", end="") + await _print_stream_output(console, f"[dim]{escaped}[/dim]", end="") banner_printed.add(event.index) elif isinstance(part, TextPart): streaming_parts.add(event.index) text_parts.add(event.index) - # Initialize termflow streaming for this text part - termflow_parsers[event.index] = TermflowParser() - termflow_renderers[event.index] = TermflowRenderer( - output=console.file, width=console.width - ) - termflow_line_buffers[event.index] = "" - # Handle initial content if present - if part.content and part.content.strip(): - await _print_response_banner() - banner_printed.add(event.index) - termflow_line_buffers[event.index] = part.content + if _has_active_prompt_surface(): + if part.content: + prompt_surface_response_preview += part.content + _set_prompt_ephemeral_preview(prompt_surface_response_preview) + else: + # Initialize termflow streaming for this text part + termflow_parsers[event.index] = TermflowParser() + termflow_renderers[event.index] = TermflowRenderer( + output=console.file, width=console.width + ) + termflow_line_buffers[event.index] = "" + # Handle initial content if present + if part.content and part.content.strip(): + await _print_response_banner() + banner_printed.add(event.index) + termflow_line_buffers[event.index] = part.content elif isinstance(part, ToolCallPart): streaming_parts.add(event.index) tool_parts.add(event.index) token_count[event.index] = 0 # Initialize token counter # Capture tool name from the start event tool_names[event.index] = part.tool_name or "" - # Track tool name for display - banner_printed.add( - event.index - ) # Use banner_printed to track if we've shown tool info # PartDeltaEvent - stream the content as it arrives elif isinstance(event, PartDeltaEvent): @@ -227,6 +345,12 @@ async def _print_response_banner() -> None: if delta.content_delta: # For text parts, stream markdown with termflow if event.index in text_parts: + if _has_active_prompt_surface(): + prompt_surface_response_preview += delta.content_delta + _set_prompt_ephemeral_preview( + prompt_surface_response_preview + ) + continue # Print banner on first content if event.index not in banner_printed: await _print_response_banner() @@ -252,8 +376,11 @@ async def _print_response_banner() -> None: await _print_thinking_banner() banner_printed.add(event.index) escaped = escape(delta.content_delta) - console.print(f"[dim]{escaped}[/dim]", end="") + await _print_stream_output( + console, f"[dim]{escaped}[/dim]", end="" + ) elif isinstance(delta, ToolCallPartDelta): + prompt_surface_active = _has_active_prompt_surface() # For tool calls, estimate tokens from args_delta content # args_delta contains the streaming JSON arguments args_delta = getattr(delta, "args_delta", "") or "" @@ -268,21 +395,35 @@ async def _print_response_banner() -> None: # Update tool name if delta provides more of it tool_name_delta = getattr(delta, "tool_name_delta", "") or "" if tool_name_delta: - tool_names[event.index] = ( - tool_names.get(event.index, "") + tool_name_delta + tool_names[event.index] = _merge_tool_name( + tool_names.get(event.index, ""), tool_name_delta ) # Use stored tool name for display tool_name = tool_names.get(event.index, "") + if prompt_surface_active: + if not _is_reasoning_tool_name(tool_name): + count = token_count[event.index] + if tool_name: + _set_prompt_ephemeral_status( + f"\U0001f527 Calling {tool_name}... {count} token(s)" + ) + else: + _set_prompt_ephemeral_status( + f"\U0001f527 Calling tool... {count} token(s)" + ) + continue count = token_count[event.index] # Display with tool wrench icon and tool name if tool_name: - console.print( + await _print_stream_output( + console, f" \U0001f527 Calling {tool_name}... {count} token(s) ", end="\r", ) else: - console.print( + await _print_stream_output( + console, f" \U0001f527 Calling tool... {count} token(s) ", end="\r", ) @@ -322,11 +463,14 @@ async def _print_response_banner() -> None: del termflow_line_buffers[event.index] # For tool parts, clear the chunk counter line elif event.index in tool_parts: - # Clear the chunk counter line by printing spaces and returning - console.print(" " * 50, end="\r") + if _has_active_prompt_surface(): + _clear_prompt_ephemeral_status() + else: + # Clear the chunk counter line by printing spaces and returning + await _print_stream_output(console, " " * 50, end="\r") # For thinking parts, just print newline elif event.index in banner_printed: - console.print() # Final newline after streaming + await _print_stream_output(console) # Final newline after streaming # Clean up token count and tool names token_count.pop(event.index, None) @@ -344,5 +488,8 @@ async def _print_response_banner() -> None: next_kind = getattr(event, "next_part_kind", None) if next_kind not in ("text", "thinking", "tool-call"): resume_all_spinners() + spinner_paused = False # Spinner is resumed in PartEndEvent when appropriate (based on next_part_kind) + if spinner_paused: + resume_all_spinners() diff --git a/code_puppy/cli_runner.py b/code_puppy/cli_runner.py index 3f8d2c028..b50d88390 100644 --- a/code_puppy/cli_runner.py +++ b/code_puppy/cli_runner.py @@ -9,6 +9,9 @@ apply_all_patches() import argparse +from typing import Any, Literal + + import asyncio import os import sys @@ -23,12 +26,20 @@ from code_puppy.agents import get_current_agent from code_puppy.command_line.attachments import parse_prompt_attachments from code_puppy.command_line.clipboard import get_clipboard_manager +from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand +from code_puppy.command_line.interactive_runtime import ( + PromptRuntimeState, + QueuedPrompt, + clear_active_interactive_runtime, + register_active_interactive_runtime, +) from code_puppy.config import ( AUTOSAVE_DIR, COMMAND_HISTORY_FILE, DBOS_DATABASE_URL, ensure_config_exists, finalize_autosave_session, + get_queue_limit, get_use_dbos, initialize_command_history_file, save_command_to_history, @@ -49,9 +60,200 @@ from code_puppy.tools.common import console from code_puppy.version_checker import default_version_mismatch_behavior +try: + from code_puppy.debug_capture import ( + get_active_capture, + log_event, + set_active_capture, + start_capture_session, + ) +except ImportError: + # Keep CLI usable in checkouts that don't include debug_capture. + def get_active_capture(): + return None + + def log_event(*args, **kwargs): + return None + + def set_active_capture(*args, **kwargs): + return None + + def start_capture_session(): + return None + + plugins.load_plugin_callbacks() +def emit_interject_queue_lifecycle( + runtime_state: PromptRuntimeState, + action: str, + *, + item: QueuedPrompt | None = None, + reason: str | None = None, + position: int | None = None, + level: str = "info", +) -> dict[str, Any]: + """Emit interject/queue lifecycle to UI, debug log, and frontend emitter.""" + payload: dict[str, Any] = { + "action": action, + "kind": item.kind if item else None, + "text": item.text if item else None, + "reason": reason, + "position": position, + "queue_size": len(runtime_state.queue), + "running": runtime_state.running, + } + try: + from code_puppy.plugins.frontend_emitter.emitter import emit_event + + emit_event("interject_queue", payload) + except Exception: + pass + + log_event("interject_queue", **payload) + + try: + from code_puppy.messaging import MessageLevel, TextMessage, get_message_bus + + text = _format_queue_lifecycle_text( + action, + item=item, + reason=reason, + position=position, + ) + if text is None: + return payload + + level_map = { + "error": MessageLevel.ERROR, + "warning": MessageLevel.WARNING, + "success": MessageLevel.SUCCESS, + "info": MessageLevel.INFO, + } + get_message_bus().emit( + TextMessage(level=level_map.get(level, MessageLevel.INFO), text=text) + ) + except Exception: + pass + return payload + + +def _format_queue_lifecycle_text( + action: str, + *, + item: QueuedPrompt | None = None, + reason: str | None = None, + position: int | None = None, +) -> str | None: + """Translate internal queue lifecycle steps into user-facing copy.""" + if action == "dequeued": + return None + + if item is None: + if action == "rejected": + return "[QUEUE] couldn't save that prompt" + return f"[QUEUE] {action}" + + if item.kind == "interject": + if action in {"started", "completed"}: + return None + if action == "cancelled" and reason == "run_cancelled": + return None + action_text = { + "queued": "stopping current work", + "cancelled": "cancelled", + "failed": "failed", + "rejected": "couldn't apply", + }.get(action, action.replace("_", " ")) + return f"[INTERJECT] {action_text}: {item.text}" + + if action == "queued": + if position is not None: + return f"[Queued][{position}] {item.text}" + return f"[Queued] {item.text}" + + if action == "started": + return None + if action == "completed" and reason is None: + return None + if action == "cancelled" and reason == "run_cancelled": + return None + + action_text = { + "completed": "finished", + "cancelled": "cancelled", + "failed": "failed", + "rejected": "couldn't save", + }.get(action, action.replace("_", " ")) + return f"[QUEUE] {action_text}: {item.text}" + + +def _build_interject_submission_text(text: str) -> str: + """Wrap an interjected prompt so the agent resumes the interrupted task.""" + return ( + "user interjects - " + f"{text} - " + "please affirm you've seen this interjection, continue the interrupted task, " + "and proceed with that in mind" + ) + + +def _seed_spinner_context(agent, prompt: str, *, link_attachments: list[str]) -> None: + """Seed context usage immediately so the prompt status line is not stale.""" + from code_puppy.messaging.spinner import ( + SpinnerBase, + clear_spinner_context, + update_spinner_context, + ) + + clear_spinner_context() + + try: + history = ( + agent.get_message_history() if hasattr(agent, "get_message_history") else [] + ) or [] + estimate_tokens_for_message = getattr( + agent, "estimate_tokens_for_message", None + ) + estimate_context_overhead_tokens = getattr( + agent, "estimate_context_overhead_tokens", None + ) + estimate_token_count = getattr(agent, "estimate_token_count", None) + get_model_context_length = getattr(agent, "get_model_context_length", None) + + if not callable(estimate_token_count) or not callable(get_model_context_length): + return + + history_tokens = 0 + if callable(estimate_tokens_for_message): + history_tokens = sum( + estimate_tokens_for_message(message) for message in history + ) + + overhead_tokens = ( + estimate_context_overhead_tokens() + if callable(estimate_context_overhead_tokens) + else 0 + ) + prompt_tokens = estimate_token_count(prompt) if prompt else 0 + link_tokens = sum(estimate_token_count(url) for url in link_attachments) + total_tokens = max( + 0, history_tokens + overhead_tokens + prompt_tokens + link_tokens + ) + capacity = max(1, int(get_model_context_length())) + + update_spinner_context( + SpinnerBase.format_context_info( + total_tokens, + capacity, + total_tokens / capacity, + ) + ) + except Exception: + return + + async def main(): """Main async entry point for Code Puppy CLI.""" parser = argparse.ArgumentParser(description="Code Puppy - A code generation agent") @@ -86,6 +288,11 @@ async def main(): type=str, help="Specify which model to use (e.g., --model gpt-5)", ) + parser.add_argument( + "--debug-capture", + action="store_true", + help="Write timestamped interactive terminal capture artifacts", + ) parser.add_argument( "command", nargs="*", help="Run a single command (deprecated, use -p instead)" ) @@ -93,21 +300,34 @@ async def main(): from code_puppy.messaging import ( RichConsoleRenderer, - SynchronousInteractiveRenderer, get_global_queue, get_message_bus, ) + from code_puppy.messaging.legacy_bridge import LegacyQueueToBusBridge + + capture_session = None + if args.debug_capture: + capture_session = start_capture_session() + if capture_session is not None: + set_active_capture(capture_session) + log_event( + "debug_capture_enabled", session_dir=str(capture_session.session_dir) + ) + else: + print( + "Warning: --debug-capture requested but debug_capture module is unavailable." + ) - # Create a shared console for both renderers + # Create one shared console to avoid multi-renderer race conditions. display_console = Console() - # Legacy renderer for backward compatibility (emits via get_global_queue) + # Bridge legacy queue emitters into the structured bus. message_queue = get_global_queue() - message_renderer = SynchronousInteractiveRenderer(message_queue, display_console) - message_renderer.start() - - # New MessageBus renderer for structured messages (tools emit here) message_bus = get_message_bus() + legacy_bridge = LegacyQueueToBusBridge(message_queue, message_bus) + legacy_bridge.start() + + # Single UI renderer in interactive mode. bus_renderer = RichConsoleRenderer(message_bus, display_console) bus_renderer.start() @@ -328,15 +548,18 @@ def _uvx_protective_sigint_handler(_sig, _frame): prompt_only_mode = False if prompt_only_mode: - await execute_single_prompt(initial_command, message_renderer) + await execute_single_prompt(initial_command, bus_renderer) else: # Default to interactive mode (no args = same as -i) - await interactive_mode(message_renderer, initial_command=initial_command) + await interactive_mode(bus_renderer, initial_command=initial_command) finally: - if message_renderer: - message_renderer.stop() if bus_renderer: bus_renderer.stop() + if legacy_bridge: + legacy_bridge.stop() + if capture_session: + capture_session.stop(exit_reason="shutdown") + set_active_capture(None) await callbacks.on_shutdown() if get_use_dbos(): DBOS.destroy() @@ -346,573 +569,1173 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non """Run the agent in interactive mode.""" from code_puppy.command_line.command_handler import handle_command - display_console = message_renderer.console - from code_puppy.messaging import emit_info, emit_system_message - - emit_system_message( - "Type '/exit', '/quit', or press Ctrl+D to exit the interactive mode." - ) - emit_system_message("Type 'clear' to reset the conversation history.") - emit_system_message("Type /help to view all commands") - emit_system_message( - "Type @ for path completion, or /model to pick a model. Toggle multiline with Alt+M or F2; newline: Ctrl+J." - ) - emit_system_message("Paste images: Ctrl+V (even on Mac!), F3, or /paste command.") - import platform + runtime = PromptRuntimeState() + register_active_interactive_runtime(runtime) + runtime.mark_idle() + try: + display_console = message_renderer.console + from code_puppy.messaging import emit_info, emit_system_message - if platform.system() == "Darwin": emit_system_message( - "šŸ’” macOS tip: Use Ctrl+V (not Cmd+V) to paste images in terminal." + "Type '/exit', '/quit', or press Ctrl+D to exit the interactive mode." ) - cancel_key = get_cancel_agent_display_name() - emit_system_message( - f"Press {cancel_key} during processing to cancel the current task or inference. Use Ctrl+X to interrupt running shell commands." - ) - emit_system_message( - "Use /autosave_load to manually load a previous autosave session." - ) - emit_system_message( - "Use /diff to configure diff highlighting colors for file changes." - ) - emit_system_message("To re-run the tutorial, use /tutorial.") - emit_system_message( - "! to run shell commands directly (e.g., !git status)", - ) - try: - from code_puppy.command_line.motd import print_motd - - print_motd(console, force=False) - except Exception as e: - from code_puppy.messaging import emit_warning - - emit_warning(f"MOTD error: {e}") - - # Print truecolor warning LAST so it's the most visible thing on startup - # Big ugly red box should be impossible to miss! šŸ”“ - print_truecolor_warning(display_console) + log_event("interactive_mode_start") + emit_system_message("Type 'clear' to reset the conversation history.") + emit_system_message("Type /help to view all commands") + emit_system_message( + "Type @ for path completion, or /model to pick a model. Toggle multiline with Alt+M or F2; newline: Ctrl+J." + ) + emit_system_message( + "Paste images: Ctrl+V (even on Mac!), F3, or /paste command." + ) + import platform - # Shell pass-through for initial_command: ! bypasses the agent - if initial_command: - from code_puppy.command_line.shell_passthrough import ( - execute_shell_passthrough, - is_shell_passthrough, + if platform.system() == "Darwin": + emit_system_message( + "šŸ’” macOS tip: Use Ctrl+V (not Cmd+V) to paste images in terminal." + ) + cancel_key = get_cancel_agent_display_name() + emit_system_message( + f"Press {cancel_key} during processing to cancel the current task or inference. Use Ctrl+X to interrupt running shell commands." + ) + emit_system_message( + "Use /autosave_load to manually load a previous autosave session." + ) + emit_system_message( + "Use /diff to configure diff highlighting colors for file changes." + ) + emit_system_message("To re-run the tutorial, use /tutorial.") + emit_system_message( + "! to run shell commands directly (e.g., !git status)" ) + try: + from code_puppy.command_line.motd import print_motd - if is_shell_passthrough(initial_command): - execute_shell_passthrough(initial_command) - initial_command = None + print_motd(console, force=False) + except Exception as e: + from code_puppy.messaging import emit_warning - # Initialize the runtime agent manager - if initial_command: - from code_puppy.agents import get_current_agent - from code_puppy.messaging import emit_info, emit_success, emit_system_message + emit_warning(f"MOTD error: {e}") - agent = get_current_agent() - emit_info(f"Processing initial command: {initial_command}") + # Print truecolor warning LAST so it's the most visible thing on startup + # Big ugly red box should be impossible to miss! šŸ”“ + print_truecolor_warning(display_console) - try: - # Check if any tool is waiting for user input before showing spinner - try: - from code_puppy.tools.command_runner import is_awaiting_user_input + # Shell pass-through for initial_command: ! bypasses the agent. + if initial_command: + from code_puppy.command_line.shell_passthrough import ( + execute_shell_passthrough, + is_shell_passthrough, + ) - awaiting_input = is_awaiting_user_input() - except ImportError: - awaiting_input = False + if is_shell_passthrough(initial_command): + execute_shell_passthrough(initial_command) + initial_command = None - # Run with or without spinner based on whether we're awaiting input - response, agent_task = await run_prompt_with_attachments( - agent, - initial_command, - spinner_console=display_console, - use_spinner=not awaiting_input, + # Initialize the runtime agent manager + if initial_command: + from code_puppy.agents import get_current_agent + from code_puppy.messaging import ( + emit_info, + emit_success, + emit_system_message, ) - if response is not None: - agent_response = response.output - # Update the agent's message history with the complete conversation - # including the final assistant response - if hasattr(response, "all_messages"): - agent.set_message_history(list(response.all_messages())) + agent = get_current_agent() + emit_info(f"Processing initial command: {initial_command}") - # Emit structured message for proper markdown rendering - from code_puppy.messaging import get_message_bus - from code_puppy.messaging.messages import AgentResponseMessage + try: + # Check if any tool is waiting for user input before showing spinner + try: + from code_puppy.tools.command_runner import is_awaiting_user_input - response_msg = AgentResponseMessage( - content=agent_response, - is_markdown=True, + awaiting_input = is_awaiting_user_input() + except ImportError: + awaiting_input = False + + # Run with or without spinner based on whether we're awaiting input + response, agent_task = await run_prompt_with_attachments( + agent, + initial_command, + spinner_console=display_console, + use_spinner=not awaiting_input, ) - get_message_bus().emit(response_msg) + if response is not None: + agent_response = response.output - emit_success("🐶 Continuing in Interactive Mode") - emit_system_message( - "Your command and response are preserved in the conversation history." - ) + # Update the agent's message history with the complete conversation + # including the final assistant response + if hasattr(response, "all_messages"): + agent.set_message_history(list(response.all_messages())) - except Exception as e: - from code_puppy.messaging import emit_error + # Emit structured message for proper markdown rendering + from code_puppy.messaging import get_message_bus + from code_puppy.messaging.messages import AgentResponseMessage - emit_error(f"Error processing initial command: {str(e)}") + response_msg = AgentResponseMessage( + content=agent_response, + is_markdown=True, + ) + get_message_bus().emit(response_msg) - # Check if prompt_toolkit is installed - try: - from code_puppy.command_line.prompt_toolkit_completion import ( - get_input_with_combined_completion, - get_prompt_with_active_model, - ) - except ImportError: - from code_puppy.messaging import emit_warning + emit_success("🐶 Continuing in Interactive Mode") + emit_system_message( + "Your command and response are preserved in the conversation history." + ) - emit_warning("Warning: prompt_toolkit not installed. Installing now...") - try: - import subprocess + except Exception as e: + from code_puppy.messaging import emit_error - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "--quiet", "prompt_toolkit"] - ) - from code_puppy.messaging import emit_success + emit_error(f"Error processing initial command: {str(e)}") - emit_success("Successfully installed prompt_toolkit") + # Check if prompt_toolkit is installed + try: from code_puppy.command_line.prompt_toolkit_completion import ( - get_input_with_combined_completion, get_prompt_with_active_model, + prompt_for_submission, ) - except Exception as e: - from code_puppy.messaging import emit_error, emit_warning - - emit_error(f"Error installing prompt_toolkit: {e}") - emit_warning("Falling back to basic input without tab completion") - - # Autosave loading is now manual - use /autosave_load command - - # Auto-run tutorial on first startup - try: - from code_puppy.command_line.onboarding_wizard import should_show_onboarding - - if should_show_onboarding(): - import concurrent.futures - - from code_puppy.command_line.onboarding_wizard import run_onboarding_wizard - from code_puppy.config import set_model_name - from code_puppy.messaging import emit_info - - with concurrent.futures.ThreadPoolExecutor() as executor: - future = executor.submit(lambda: asyncio.run(run_onboarding_wizard())) - result = future.result(timeout=300) - - if result == "chatgpt": - emit_info("šŸ” Starting ChatGPT OAuth flow...") - from code_puppy.plugins.chatgpt_oauth.oauth_flow import run_oauth_flow + except ImportError: + from code_puppy.messaging import emit_warning - run_oauth_flow() - set_model_name("chatgpt-gpt-5.4") - elif result == "claude": - emit_info("šŸ” Starting Claude Code OAuth flow...") - from code_puppy.plugins.claude_code_oauth.register_callbacks import ( - _perform_authentication, + emit_warning("Warning: prompt_toolkit not installed. Installing now...") + try: + import subprocess + + subprocess.check_call( + [ + sys.executable, + "-m", + "pip", + "install", + "--quiet", + "prompt_toolkit", + ] ) + from code_puppy.messaging import emit_success - _perform_authentication() - set_model_name("claude-code-claude-opus-4-6") - elif result == "completed": - emit_info("šŸŽ‰ Tutorial complete! Happy coding!") - elif result == "skipped": - emit_info("ā­ļø Tutorial skipped. Run /tutorial anytime!") - except Exception as e: - from code_puppy.messaging import emit_warning - - emit_warning(f"Tutorial auto-start failed: {e}") - - # Track the current agent task for cancellation on quit - current_agent_task = None + emit_success("Successfully installed prompt_toolkit") + from code_puppy.command_line.prompt_toolkit_completion import ( + get_prompt_with_active_model, + prompt_for_submission, + ) + except Exception as e: + from code_puppy.messaging import emit_error, emit_warning - while True: - from code_puppy.agents.agent_manager import get_current_agent - from code_puppy.messaging import emit_info + emit_error(f"Error installing prompt_toolkit: {e}") + emit_warning("Falling back to basic input without tab completion") - # Get the custom prompt from the current agent, or use default - current_agent = get_current_agent() - user_prompt = current_agent.get_user_prompt() or "Enter your coding task:" + # Autosave loading is now manual - use /autosave_load command - emit_info(f"{user_prompt}\n") + startup_oauth_command: str | None = None + # Auto-run tutorial on first startup try: - # Use prompt_toolkit for enhanced input with path completion - try: - # Windows-specific: Reset terminal state before prompting - reset_windows_terminal_ansi() + from code_puppy.command_line.onboarding_wizard import should_show_onboarding + + if should_show_onboarding(): + import concurrent.futures - # Use the async version of get_input_with_combined_completion - task = await get_input_with_combined_completion( - get_prompt_with_active_model(), history_file=COMMAND_HISTORY_FILE + from code_puppy.command_line.onboarding_wizard import ( + run_onboarding_wizard, ) + from code_puppy.messaging import emit_info - # Windows+uvx: Re-disable Ctrl+C after prompt_toolkit - # (prompt_toolkit restores console mode which re-enables Ctrl+C) - try: - from code_puppy.terminal_utils import ensure_ctrl_c_disabled + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit( + lambda: asyncio.run(run_onboarding_wizard()) + ) + result = future.result(timeout=300) + + if result == "chatgpt": + emit_info("šŸ” Starting ChatGPT OAuth flow...") + startup_oauth_command = "/chatgpt-auth" + elif result == "claude": + emit_info("šŸ” Starting Claude Code OAuth flow...") + startup_oauth_command = "/claude-code-auth" + elif result == "completed": + emit_info("šŸŽ‰ Tutorial complete! Happy coding!") + elif result == "skipped": + emit_info("ā­ļø Tutorial skipped. Run /tutorial anytime!") + except Exception as e: + from code_puppy.messaging import emit_warning - ensure_ctrl_c_disabled() - except ImportError: - pass - except ImportError: - # Fall back to basic input if prompt_toolkit is not available - task = input(">>> ") + emit_warning(f"Tutorial auto-start failed: {e}") - except KeyboardInterrupt: - # Handle Ctrl+C - cancel input and continue - # Windows-specific: Reset terminal state after interrupt to prevent - # the terminal from becoming unresponsive (can't type characters) - reset_windows_terminal_full() - # Stop wiggum mode on Ctrl+C + queue_start_lock = asyncio.Lock() + shutdown_requested = False + suppress_next_input_cancel_message = False + active_cancel_state = {"reason": None} + + def stop_wiggum_with_notice(message: str) -> bool: + nonlocal suppress_next_input_cancel_message from code_puppy.command_line.wiggum_state import ( is_wiggum_active, stop_wiggum, ) from code_puppy.messaging import emit_warning - if is_wiggum_active(): - stop_wiggum() - emit_warning("\nšŸ© Wiggum loop stopped!") - else: - emit_warning("\nInput cancelled") - continue - except EOFError: - # Handle Ctrl+D - exit the application - from code_puppy.messaging import emit_success - - emit_success("\nGoodbye! (Ctrl+D)") - - # Cancel any running agent task for clean shutdown - if current_agent_task and not current_agent_task.done(): - emit_info("Cancelling running agent task...") - current_agent_task.cancel() - try: - await current_agent_task - except asyncio.CancelledError: - pass # Expected when cancelling + if not is_wiggum_active(): + return False + stop_wiggum() + suppress_next_input_cancel_message = True + emit_warning(message) + return True + + async def cancel_active_run(reason: str) -> None: + """Aggressively stop shell + agent execution and wait for cancellation.""" + try: + from code_puppy.tools.command_runner import ( + get_running_shell_process_count, + kill_all_running_shell_processes, + ) + except ImportError: - break + def get_running_shell_process_count() -> int: + return 0 - # Shell pass-through: ! executes directly, bypassing the agent - from code_puppy.command_line.shell_passthrough import ( - execute_shell_passthrough, - is_shell_passthrough, - ) + def kill_all_running_shell_processes() -> None: + return None - if is_shell_passthrough(task): - execute_shell_passthrough(task) - continue + active_task = runtime.bg_task + active_cancel_hook = runtime.active_cancel_hook - # Check for exit commands (plain text or command form) - if task.strip().lower() in ["exit", "quit"] or task.strip().lower() in [ - "/exit", - "/quit", - ]: - from code_puppy.messaging import emit_success + if active_task is None or active_task.done(): + runtime.mark_idle_if_task(active_task) + return - emit_success("Goodbye!") + runtime.cancelling = True + active_cancel_state["reason"] = reason + if is_manual_cancel_reason(reason): + runtime.suppress_queue_autodrain() + log_event("cancel_start", reason=reason) - # Cancel any running agent task for clean shutdown - if current_agent_task and not current_agent_task.done(): - emit_info("Cancelling running agent task...") - current_agent_task.cancel() + if active_cancel_hook is not None: + try: + active_cancel_hook() + except Exception: + pass try: - await current_agent_task + await asyncio.wait_for(asyncio.shield(active_task), timeout=1.5) + except asyncio.TimeoutError: + pass except asyncio.CancelledError: - pass # Expected when cancelling - - # The renderer is stopped in the finally block of main(). - break + pass + except Exception: + pass + if active_task.done(): + runtime.mark_idle_if_task(active_task) + log_event("cancel_done", reason=reason) + return + + # First kill nested shell activity, repeating briefly if needed. + for _ in range(3): + kill_all_running_shell_processes() + if get_running_shell_process_count() == 0: + break + await asyncio.sleep(0.15) - # Check for clear command (supports both `clear` and `/clear`) - if task.strip().lower() in ("clear", "/clear"): - from code_puppy.command_line.clipboard import get_clipboard_manager - from code_puppy.messaging import ( - emit_info, - emit_system_message, - emit_warning, - ) + # Then cancel the active background agent task and await completion. + if active_task.done(): + runtime.mark_idle_if_task(active_task) + log_event("cancel_done", reason=reason) + return - agent = get_current_agent() - new_session_id = finalize_autosave_session() - agent.clear_message_history() - emit_warning("Conversation history cleared!") - emit_system_message("The agent will not remember previous interactions.") - emit_info(f"Auto-save session rotated to: {new_session_id}") - - # Also clear pending clipboard images - clipboard_manager = get_clipboard_manager() - clipboard_count = clipboard_manager.get_pending_count() - clipboard_manager.clear_pending() - if clipboard_count > 0: - emit_info(f"Cleared {clipboard_count} pending clipboard image(s)") - continue - - # Parse attachments first so leading paths aren't misread as commands - processed_for_commands = parse_prompt_attachments(task) - cleaned_for_commands = (processed_for_commands.prompt or "").strip() - - # Handle / commands based on cleaned prompt (after stripping attachments) - if cleaned_for_commands.startswith("/"): + active_task.cancel() try: - command_result = handle_command(cleaned_for_commands) - except Exception as e: - from code_puppy.messaging import emit_error + await asyncio.wait_for(active_task, timeout=6.0) + except asyncio.CancelledError: + pass + except TimeoutError: + pass + except Exception: + pass + finally: + runtime.mark_idle_if_task(active_task) + log_event("cancel_done", reason=reason) + except Exception: + clear_active_interactive_runtime(runtime) + raise + + async def shutdown_interactive_session(message: str, *, reason: str) -> None: + """Exit interactive mode and cancel active work if needed.""" + nonlocal shutdown_requested + from code_puppy.messaging import emit_info, emit_success + + shutdown_requested = True + emit_success(message) + if ( + runtime.running + and runtime.bg_task is not None + and not runtime.bg_task.done() + ): + emit_info("Cancelling running task...") + await cancel_active_run(reason) + + runtime.set_active_cancel_requester( + lambda reason: asyncio.create_task(cancel_active_run(reason)) + ) - emit_error(f"Command error: {e}") - # Continue interactive loop instead of exiting - continue - if command_result is True: - continue - elif isinstance(command_result, str): - if command_result == "__AUTOSAVE_LOAD__": - # Handle async autosave loading - try: - # Check if we're in a real interactive terminal - # (not pexpect/tests) - interactive picker requires proper TTY - use_interactive_picker = ( - sys.stdin.isatty() and sys.stdout.isatty() - ) + async def restore_autosave_state() -> None: + """Handle the /autosave_load command.""" + try: + # Check if we're in a real interactive terminal + # (not pexpect/tests) - interactive picker requires proper TTY + use_interactive_picker = sys.stdin.isatty() and sys.stdout.isatty() + + # Allow environment variable override for tests + if os.getenv("CODE_PUPPY_NO_TUI") == "1": + use_interactive_picker = False + + if use_interactive_picker: + # Use interactive picker for terminal sessions + from code_puppy.agents.agent_manager import get_current_agent + from code_puppy.command_line.autosave_menu import ( + interactive_autosave_picker, + ) + from code_puppy.config import ( + set_current_autosave_from_session_name, + ) + from code_puppy.messaging import emit_error, emit_success, emit_warning + from code_puppy.session_storage import ( + load_session, + restore_autosave_interactively, + ) - # Allow environment variable override for tests - if os.getenv("CODE_PUPPY_NO_TUI") == "1": - use_interactive_picker = False + chosen_session = await interactive_autosave_picker() - if use_interactive_picker: - # Use interactive picker for terminal sessions - from code_puppy.agents.agent_manager import ( - get_current_agent, - ) - from code_puppy.command_line.autosave_menu import ( - interactive_autosave_picker, - ) - from code_puppy.config import ( - set_current_autosave_from_session_name, - ) - from code_puppy.messaging import ( - emit_error, - emit_success, - emit_warning, - ) - from code_puppy.session_storage import ( - load_session, - restore_autosave_interactively, - ) - - chosen_session = await interactive_autosave_picker() + if not chosen_session: + emit_warning("Autosave load cancelled") + return - if not chosen_session: - emit_warning("Autosave load cancelled") - continue + # Load the session + base_dir = Path(AUTOSAVE_DIR) + history = load_session(chosen_session, base_dir) - # Load the session - base_dir = Path(AUTOSAVE_DIR) - history = load_session(chosen_session, base_dir) + agent = get_current_agent() + agent.set_message_history(history) - agent = get_current_agent() - agent.set_message_history(history) + # Set current autosave session + set_current_autosave_from_session_name(chosen_session) - # Set current autosave session - set_current_autosave_from_session_name(chosen_session) + total_tokens = sum( + agent.estimate_tokens_for_message(msg) for msg in history + ) + session_path = base_dir / f"{chosen_session}.pkl" - total_tokens = sum( - agent.estimate_tokens_for_message(msg) - for msg in history - ) - session_path = base_dir / f"{chosen_session}.pkl" + emit_success( + f"āœ… Autosave loaded: {len(history)} messages ({total_tokens} tokens)\n" + f"šŸ“ From: {session_path}" + ) - emit_success( - f"āœ… Autosave loaded: {len(history)} messages ({total_tokens} tokens)\n" - f"šŸ“ From: {session_path}" - ) + # Display recent message history for context + from code_puppy.command_line.autosave_menu import ( + display_resumed_history, + ) - # Display recent message history for context - from code_puppy.command_line.autosave_menu import ( - display_resumed_history, - ) + display_resumed_history(history) + else: + # Fall back to old text-based picker for tests/non-TTY environments + from code_puppy.session_storage import restore_autosave_interactively - display_resumed_history(history) - else: - # Fall back to old text-based picker for tests/non-TTY environments - await restore_autosave_interactively(Path(AUTOSAVE_DIR)) + await restore_autosave_interactively(Path(AUTOSAVE_DIR)) - except Exception as e: - from code_puppy.messaging import emit_error + except Exception as e: + from code_puppy.messaging import emit_error - emit_error(f"Failed to load autosave: {e}") - continue - else: - # Command returned a prompt to execute - task = command_result - elif command_result is False: - # Command not recognized, continue with normal processing - pass + emit_error(f"Failed to load autosave: {e}") - if task.strip(): - # Write to the secret file for permanent history with timestamp - save_command_to_history(task) + async def clear_conversation_history() -> None: + """Reset the current session history and clipboard attachments.""" + from code_puppy.agents.agent_manager import get_current_agent + from code_puppy.command_line.clipboard import get_clipboard_manager + from code_puppy.messaging import emit_info, emit_system_message, emit_warning + agent = get_current_agent() + new_session_id = finalize_autosave_session() + agent.clear_message_history() + emit_warning("Conversation history cleared!") + emit_system_message("The agent will not remember previous interactions.") + emit_info(f"Auto-save session rotated to: {new_session_id}") + + clipboard_manager = get_clipboard_manager() + clipboard_count = clipboard_manager.get_pending_count() + clipboard_manager.clear_pending() + if clipboard_count > 0: + emit_info(f"Cleared {clipboard_count} pending clipboard image(s)") + + def is_exit_text(text: str) -> bool: + """Check if text should terminate interactive mode.""" + return text.strip().lower() in {"exit", "quit", "/exit", "/quit"} + + def is_manual_cancel_reason(reason: str) -> bool: + """Return whether a cancel reason should pause queue autodrain.""" + return reason in {"ctrl_c", "ctrl+k", "ctrl+q"} + + def queue_level(item: QueuedPrompt) -> str: + """Return the lifecycle level for a queued item.""" + return "warning" if item.kind == "interject" else "success" + + async def emit_queue_dispatch(item: QueuedPrompt) -> None: + """Emit UI markers before a queued/interjected item is dispatched.""" + if item.kind == "queued": try: - # No need to get agent directly - use manager's run methods - - # Use our custom helper to enable attachment handling with spinner support - result, current_agent_task = await run_prompt_with_attachments( - current_agent, - task, - spinner_console=message_renderer.console, + from code_puppy.command_line.prompt_toolkit_completion import ( + render_transcript_notice, ) - # Check if the task was cancelled (but don't show message if we just killed processes) - if result is None: - # Windows-specific: Reset terminal state after cancellation - reset_windows_terminal_ansi() - # Re-disable Ctrl+C if needed (uvx mode) - try: - from code_puppy.terminal_utils import ensure_ctrl_c_disabled + except Exception: + render_transcript_notice = None - ensure_ctrl_c_disabled() - except ImportError: - pass - # Stop wiggum mode on cancellation - from code_puppy.command_line.wiggum_state import ( - is_wiggum_active, - stop_wiggum, + notice_text = f"[QUEUE TRIGGERED] {item.text.strip()}" + if notice_text.strip() and render_transcript_notice is not None: + if runtime.has_prompt_surface(): + rendered = await runtime.run_above_prompt_async( + lambda: render_transcript_notice(notice_text) ) + if not rendered: + render_transcript_notice(notice_text) + else: + render_transcript_notice(notice_text) + emit_interject_queue_lifecycle( + runtime, + "dequeued", + item=item, + level=queue_level(item), + ) + + async def echo_dispatched_prompt(item: QueuedPrompt) -> None: + """Echo queued/interjected prompts into the transcript before launch.""" + try: + from code_puppy.command_line.prompt_toolkit_completion import ( + render_submitted_prompt_echo, + ) + except Exception: + return - if is_wiggum_active(): - stop_wiggum() - from code_puppy.messaging import emit_warning + prompt_text = item.text.strip() + if not prompt_text: + return - emit_warning("šŸ© Wiggum loop stopped due to cancellation") - continue - # Get the structured response - agent_response = result.output + if runtime.has_prompt_surface(): + if await runtime.run_above_prompt_async( + lambda: render_submitted_prompt_echo(prompt_text) + ): + return + + render_submitted_prompt_echo(prompt_text) + + async def echo_direct_prompt_if_needed( + prompt_text: str, *, echo_in_transcript: bool + ) -> None: + """Echo direct submissions only when the prompt line was erased.""" + if not echo_in_transcript: + return + try: + from code_puppy.command_line.prompt_toolkit_completion import ( + render_submitted_prompt_echo, + ) + except Exception: + return + + visible_text = prompt_text.strip() + if not visible_text: + return - # Emit structured message for proper markdown rendering - from code_puppy.messaging import get_message_bus - from code_puppy.messaging.messages import AgentResponseMessage + if runtime.has_prompt_surface(): + if await runtime.run_above_prompt_async( + lambda: render_submitted_prompt_echo(visible_text) + ): + return + + render_submitted_prompt_echo(visible_text) + + def complete_queue_item(item: QueuedPrompt, reason: str) -> None: + """Mark a queued item as handled without launching the agent.""" + emit_interject_queue_lifecycle( + runtime, + "completed", + item=item, + reason=reason, + level=queue_level(item), + ) - response_msg = AgentResponseMessage( - content=agent_response, - is_markdown=True, + async def run_agent_bg(task_text, agent, source_item: QueuedPrompt | None = None): + try: + log_event("agent_start", prompt=task_text) + if source_item: + emit_interject_queue_lifecycle( + runtime, + "started", + item=source_item, + level="warning" if source_item.kind == "interject" else "success", ) - get_message_bus().emit(response_msg) + result, _ = await run_prompt_with_attachments( + agent, + task_text, + spinner_console=message_renderer.console, + ) + if result is None: + cancel_reason = active_cancel_state["reason"] + reset_windows_terminal_ansi() + try: + from code_puppy.terminal_utils import ensure_ctrl_c_disabled - # Update the agent's message history with the complete conversation - # including the final assistant response. The history_processors callback - # may not capture the final message, so we use result.all_messages() - # to ensure the autosave includes the complete conversation. - if hasattr(result, "all_messages"): - current_agent.set_message_history(list(result.all_messages())) + ensure_ctrl_c_disabled() + except ImportError: + pass + if cancel_reason != "interject" and stop_wiggum_with_notice( + "šŸ© Wiggum loop stopped due to cancellation" + ): + log_event("wiggum_stopped", reason="cancelled_active_run") + if source_item: + emit_interject_queue_lifecycle( + runtime, + "cancelled", + item=source_item, + reason="run_cancelled", + level="warning", + ) + return + agent_response = result.output - # Ensure console output is flushed before next prompt - # This fixes the issue where prompt doesn't appear after agent response - if hasattr(display_console.file, "flush"): - display_console.file.flush() + from code_puppy.messaging import get_message_bus + from code_puppy.messaging.messages import AgentResponseMessage - await asyncio.sleep( - 0.1 - ) # Brief pause to ensure all messages are rendered + response_msg = AgentResponseMessage( + content=agent_response, + is_markdown=True, + ) + get_message_bus().emit(response_msg) - except Exception: - from code_puppy.messaging.queue_console import get_queue_console + if hasattr(result, "all_messages"): + agent.set_message_history(list(result.all_messages())) - get_queue_console().print_exception() + if hasattr(display_console.file, "flush"): + display_console.file.flush() - # Auto-save session if enabled (moved outside the try block to avoid being swallowed) + await asyncio.sleep(0.1) from code_puppy.config import auto_save_session_if_enabled auto_save_session_if_enabled() + if source_item: + emit_interject_queue_lifecycle( + runtime, + "completed", + item=source_item, + level="success", + ) - # ================================================================ - # WIGGUM LOOP: Re-run prompt if wiggum mode is active - # ================================================================ - from code_puppy.command_line.wiggum_state import ( - get_wiggum_prompt, - increment_wiggum_count, - is_wiggum_active, - stop_wiggum, + except Exception: + from code_puppy.messaging.queue_console import get_queue_console + + get_queue_console().print_exception() + if source_item: + emit_interject_queue_lifecycle( + runtime, + "failed", + item=source_item, + reason="exception", + level="error", + ) + finally: + active_task = asyncio.current_task() + owns_runtime = runtime.is_active_task(active_task) + was_cancelling = runtime.cancelling if owns_runtime else False + runtime.mark_idle_if_task(active_task) + if owns_runtime: + active_cancel_state["reason"] = None + log_event("agent_end", prompt=task_text) + if not owns_runtime: + return + if was_cancelling: + if shutdown_requested: + log_event( + "queue_autodrain_skipped", + reason="shutdown_requested", + remaining=len(runtime.queue), + ) + return + if runtime.is_queue_autodrain_suppressed(): + log_event( + "queue_autodrain_skipped", + reason="manual_cancel_pause", + remaining=len(runtime.queue), + ) + return + log_event( + "queue_autodrain_skipped", + reason="cancelling", + remaining=len(runtime.queue), + ) + asyncio.create_task( + kick_drain_after_cancel_boundary( + origin="cancel_boundary_fallback", + ) + ) + return + await drain_pending_work_if_idle(origin="run_complete") + + async def run_interactive_command_bg( + command_result: BackgroundInteractiveCommand, + command_text: str, + *, + source_item: QueuedPrompt | None = None, + ) -> None: + """Run a long-lived interactive command without blocking the composer.""" + try: + log_event("interactive_command_start", command=command_text) + if source_item: + emit_interject_queue_lifecycle( + runtime, + "started", + item=source_item, + level="warning" if source_item.kind == "interject" else "success", + ) + + completed = await asyncio.to_thread( + command_result.run, command_result.cancel_event ) - while is_wiggum_active(): - wiggum_prompt = get_wiggum_prompt() - if not wiggum_prompt: - stop_wiggum() - break + if source_item and completed and not command_result.cancel_event.is_set(): + emit_interject_queue_lifecycle( + runtime, + "completed", + item=source_item, + level="success", + ) + except asyncio.CancelledError: + if source_item: + emit_interject_queue_lifecycle( + runtime, + "cancelled", + item=source_item, + reason="run_cancelled", + level="warning", + ) + except Exception: + from code_puppy.messaging.queue_console import get_queue_console + + get_queue_console().print_exception() + if source_item: + emit_interject_queue_lifecycle( + runtime, + "failed", + item=source_item, + reason="exception", + level="error", + ) + finally: + active_task = asyncio.current_task() + owns_runtime = runtime.is_active_task(active_task) + was_cancelling = runtime.cancelling if owns_runtime else False + runtime.mark_idle_if_task(active_task) + if owns_runtime: + active_cancel_state["reason"] = None + log_event("interactive_command_end", command=command_text) + if not owns_runtime: + return + if was_cancelling: + if shutdown_requested: + log_event( + "queue_autodrain_skipped", + reason="shutdown_requested", + remaining=len(runtime.queue), + ) + return + if runtime.is_queue_autodrain_suppressed(): + log_event( + "queue_autodrain_skipped", + reason="manual_cancel_pause", + remaining=len(runtime.queue), + ) + return + log_event( + "queue_autodrain_skipped", + reason="cancelling", + remaining=len(runtime.queue), + ) + asyncio.create_task( + kick_drain_after_cancel_boundary( + origin="cancel_boundary_fallback", + ) + ) + return + await drain_pending_work_if_idle(origin="interactive_command_complete") + + async def dispatch_submission( + task_text: str, + *, + requested_action: Literal["submit", "queue", "interject"] = "submit", + source_item: QueuedPrompt | None = None, + echo_in_transcript: bool = False, + save_history: bool = True, + allow_command_dispatch: bool = True, + ) -> str: + """Normalize a submitted prompt into exit, command handling, or agent work.""" + raw_task = task_text + stripped_task = raw_task.strip() + if not stripped_task: + if source_item: + complete_queue_item(source_item, "empty") + return "noop" + + if source_item is None and is_exit_text(stripped_task): + await shutdown_interactive_session("Goodbye!", reason="user_exit") + return "exit" + + if source_item is None and requested_action in {"queue", "interject"}: + from code_puppy.messaging import emit_warning - # Increment and show debug message - loop_num = increment_wiggum_count() - from code_puppy.messaging import emit_system_message, emit_warning + save_command_to_history(raw_task) + log_event("interject_choice", action=requested_action, prompt=stripped_task) - emit_warning(f"\nšŸ© WIGGUM RELOOPING! (Loop #{loop_num})") - emit_system_message(f"Re-running prompt: {wiggum_prompt}") + if requested_action == "interject": + log_event("interject_banner", text=stripped_task) + ok, position, item = runtime.request_interject( + stripped_task, + allow_command_dispatch=allow_command_dispatch, + ) + if not ok: + emit_warning( + f"Queue full ({get_queue_limit()}). Cannot interject right now." + ) + emit_interject_queue_lifecycle( + runtime, + "rejected", + reason="full_interject", + level="error", + ) + log_event( + "queue_reject", + prompt=stripped_task, + reason="full_interject", + ) + return "consumed" + + await cancel_active_run("interject") + emit_interject_queue_lifecycle( + runtime, + "queued", + item=item, + position=position, + level="warning", + ) + log_event( + "queued_interject", + text=stripped_task, + position=1, + size=len(runtime.queue), + ) + handled = await drain_pending_work_if_idle(origin="interject_enqueued") + log_event( + "interject_queue_kick_attempted", + remaining=len(runtime.queue), + running=runtime.running, + handled=handled, + ) + return "consumed" - # Reset context/history for fresh start - new_session_id = finalize_autosave_session() - current_agent.clear_message_history() - emit_system_message( - f"Context cleared. Session rotated to: {new_session_id}" + ok, position, item = runtime.request_queue( + stripped_task, + allow_command_dispatch=allow_command_dispatch, + ) + if not ok: + emit_warning( + f"Queue full ({get_queue_limit()}). Prompt was not queued." + ) + emit_interject_queue_lifecycle( + runtime, + "rejected", + reason="full", + level="error", ) + log_event("queue_reject", prompt=stripped_task, reason="full") + return "consumed" + + emit_interject_queue_lifecycle( + runtime, + "queued", + item=item, + position=position, + level="info", + ) + log_event( + "queued_prompt", + text=stripped_task, + position=position, + size=len(runtime.queue), + ) + await drain_pending_work_if_idle(origin="queue_enqueued") + return "consumed" + + if source_item is None and runtime.running: + log_event("busy_submission_ignored", prompt=stripped_task) + return "consumed" + + if source_item: + await emit_queue_dispatch(source_item) + await echo_dispatched_prompt(source_item) + else: + await echo_direct_prompt_if_needed( + raw_task, + echo_in_transcript=echo_in_transcript, + ) + + if allow_command_dispatch and stripped_task.lower() in {"clear", "/clear"}: + await clear_conversation_history() + if source_item: + complete_queue_item(source_item, "clear") + return "consumed" - # Small delay to let user see the debug message + candidate_task = raw_task + if allow_command_dispatch: + processed_for_commands = parse_prompt_attachments(raw_task) + cleaned_for_commands = (processed_for_commands.prompt or "").strip() - await asyncio.sleep(0.5) + if source_item and is_exit_text(cleaned_for_commands or stripped_task): + from code_puppy.messaging import emit_warning + emit_warning("Skipping queued exit command. Use /exit directly.") + complete_queue_item(source_item, "exit_skipped") + return "consumed" + + if cleaned_for_commands.startswith("/"): try: - # Re-run the wiggum prompt - result, current_agent_task = await run_prompt_with_attachments( - current_agent, - wiggum_prompt, - spinner_console=message_renderer.console, + command_result = handle_command(cleaned_for_commands) + except Exception as e: + from code_puppy.messaging import emit_error + + emit_error(f"Command error: {e}") + if source_item: + complete_queue_item(source_item, "command_error") + return "consumed" + + if command_result is True: + if source_item: + complete_queue_item(source_item, "command_consumed") + return "consumed" + + if isinstance(command_result, str): + if command_result == "__AUTOSAVE_LOAD__": + await restore_autosave_state() + if source_item: + complete_queue_item(source_item, "autosave_load") + return "consumed" + candidate_task = command_result + elif isinstance(command_result, BackgroundInteractiveCommand): + if save_history: + save_command_to_history(raw_task) + runtime.mark_running( + asyncio.create_task( + run_interactive_command_bg( + command_result, + cleaned_for_commands, + source_item=source_item, + ) + ), + kind="interactive_command", + cancel_hook=command_result.request_cancel, ) + return "launched" - if result is None: - # Cancelled - stop wiggum mode - emit_warning("Wiggum loop cancelled by user") - stop_wiggum() - break + candidate_task = candidate_task.strip() + if not candidate_task: + if source_item: + complete_queue_item(source_item, "empty") + return "noop" - # Get the structured response - agent_response = result.output + if save_history: + save_command_to_history(raw_task) - # Emit structured message for proper markdown rendering - response_msg = AgentResponseMessage( - content=agent_response, - is_markdown=True, + from code_puppy.agents.agent_manager import get_current_agent + + runtime.mark_running( + asyncio.create_task( + run_agent_bg( + candidate_task, + get_current_agent(), + source_item=source_item, + ) + ) + ) + return "launched" + + async def dispatch_wiggum_if_idle() -> str: + """Start the next wiggum loop iteration when no queued work exists.""" + from code_puppy.command_line.wiggum_state import ( + get_wiggum_prompt, + increment_wiggum_count, + is_wiggum_active, + stop_wiggum, + ) + from code_puppy.messaging import emit_system_message, emit_warning + + if not is_wiggum_active(): + return "noop" + + wiggum_prompt = get_wiggum_prompt() + if not wiggum_prompt: + stop_wiggum() + return "consumed" + + loop_num = increment_wiggum_count() + emit_warning(f"\nšŸ© WIGGUM RELOOPING! (Loop #{loop_num})") + emit_system_message(f"Re-running prompt: {wiggum_prompt}") + + current_agent = get_current_agent() + new_session_id = finalize_autosave_session() + current_agent.clear_message_history() + emit_system_message(f"Context cleared. Session rotated to: {new_session_id}") + await asyncio.sleep(0.5) + + try: + return await dispatch_submission( + wiggum_prompt, + save_history=False, + allow_command_dispatch=False, + ) + except KeyboardInterrupt: + runtime.suppress_queue_autodrain() + stop_wiggum_with_notice("\nšŸ© Wiggum loop stopped!") + return "consumed" + except Exception as e: + from code_puppy.messaging import emit_error + + emit_error(f"Wiggum loop error: {e}") + stop_wiggum() + return "consumed" + + async def drain_pending_work_if_idle(*, origin: str) -> bool: + """Single-flight idle drain for queued prompts and wiggum reruns.""" + handled_any = False + + async with queue_start_lock: + while True: + if runtime.has_pending_submission(): + log_event( + "queue_autodrain_noop", + origin=origin, + reason="pending_submission", ) - get_message_bus().emit(response_msg) + return handled_any + + if runtime.is_queue_autodrain_suppressed(): + log_event( + "queue_autodrain_noop", + origin=origin, + reason="manual_cancel_pause", + ) + return handled_any + + if runtime.running: + active_task = runtime.bg_task + if active_task is None or active_task.done(): + runtime.mark_idle() + log_event( + "queue_autodrain_reconciled", + origin=origin, + had_task=active_task is not None, + task_done=active_task.done() + if active_task is not None + else None, + ) + else: + log_event( + "queue_autodrain_noop", origin=origin, reason="running" + ) + return handled_any + + from code_puppy.command_line.wiggum_state import is_wiggum_active + + if is_wiggum_active(): + next_item = runtime.dequeue_next_interject() + if next_item is not None: + outcome = await dispatch_submission( + _build_interject_submission_text(next_item.text), + source_item=next_item, + save_history=False, + allow_command_dispatch=next_item.allow_command_dispatch, + ) + handled_any = True + if outcome == "launched": + log_event( + "queue_autodrain_triggered", + origin=origin, + remaining=len(runtime.queue), + kind=next_item.kind, + text=next_item.text, + ) + return True + continue + + outcome = await dispatch_wiggum_if_idle() + if outcome == "launched": + log_event( + "queue_autodrain_triggered", origin=origin, kind="wiggum" + ) + return True + if outcome == "consumed": + log_event( + "queue_autodrain_consumed", origin=origin, kind="wiggum" + ) + return True - # Update message history - if hasattr(result, "all_messages"): - current_agent.set_message_history(list(result.all_messages())) + log_event( + "queue_autodrain_noop", origin=origin, reason="wiggum_idle" + ) + return handled_any + + next_item = runtime.dequeue() + if next_item is not None: + outcome = await dispatch_submission( + next_item.text + if next_item.kind == "queued" + else _build_interject_submission_text(next_item.text), + source_item=next_item, + save_history=False, + allow_command_dispatch=next_item.allow_command_dispatch, + ) + handled_any = True + if outcome == "launched": + log_event( + "queue_autodrain_triggered", + origin=origin, + remaining=len(runtime.queue), + kind=next_item.kind, + text=next_item.text, + ) + return True + continue - # Flush console - if hasattr(display_console.file, "flush"): - display_console.file.flush() - await asyncio.sleep(0.1) + outcome = await dispatch_wiggum_if_idle() + if outcome == "launched": + log_event("queue_autodrain_triggered", origin=origin, kind="wiggum") + return True + if outcome == "consumed": + log_event("queue_autodrain_consumed", origin=origin, kind="wiggum") + return True - # Auto-save - auto_save_session_if_enabled() + log_event("queue_autodrain_noop", origin=origin, reason="empty") + return handled_any - except KeyboardInterrupt: - emit_warning("\nšŸ© Wiggum loop interrupted by Ctrl+C") - stop_wiggum() - break - except Exception as e: - from code_puppy.messaging import emit_error + async def kick_drain_after_cancel_boundary(*, origin: str) -> bool: + """Yield once before draining, so cancellation state fully settles.""" + await asyncio.sleep(0) + return await drain_pending_work_if_idle(origin=origin) - emit_error(f"Wiggum loop error: {e}") - stop_wiggum() - break + try: + if startup_oauth_command: + startup_outcome = await dispatch_submission( + startup_oauth_command, + save_history=False, + allow_command_dispatch=True, + ) + if startup_outcome == "exit": + return + + while True: + from code_puppy.agents.agent_manager import get_current_agent + from code_puppy.messaging import emit_info + + # Get the custom prompt from the current agent, or use default + current_agent = get_current_agent() + user_prompt = current_agent.get_user_prompt() or "Enter your coding task:" + if not runtime.running: + handled = await drain_pending_work_if_idle(origin="loop_idle_check") + if handled: + continue + + if not runtime.running and not runtime.has_pending_submission(): + emit_info(f"{user_prompt}\n") - # Re-disable Ctrl+C if needed (uvx mode) - must be done after - # each iteration as various operations may restore console mode try: - from code_puppy.terminal_utils import ensure_ctrl_c_disabled + # Use prompt_toolkit for enhanced input with path completion + try: + # Windows-specific: Reset terminal state before prompting + reset_windows_terminal_ansi() - ensure_ctrl_c_disabled() - except ImportError: - pass + submission = await prompt_for_submission( + get_prompt_with_active_model, + history_file=COMMAND_HISTORY_FILE, + erase_when_done=runtime.running, + ) + log_event( + "input_received", + action=submission.action, + text=submission.text, + ) + if submission.text.strip(): + suppress_next_input_cancel_message = False + + # Windows+uvx: Re-disable Ctrl+C after prompt_toolkit + # (prompt_toolkit restores console mode which re-enables Ctrl+C) + try: + from code_puppy.terminal_utils import ensure_ctrl_c_disabled + + ensure_ctrl_c_disabled() + except ImportError: + pass + except ImportError: + # Fall back to basic input if prompt_toolkit is not available + from code_puppy.command_line.prompt_toolkit_completion import ( + PromptSubmission, + ) + + submission = PromptSubmission(action="submit", text=input(">>> ")) + + except KeyboardInterrupt: + # Handle Ctrl+C - cancel input and continue + # Windows-specific: Reset terminal state after interrupt to prevent + # the terminal from becoming unresponsive (can't type characters) + reset_windows_terminal_full() + from code_puppy.messaging import emit_warning + + if stop_wiggum_with_notice("\nšŸ© Wiggum loop stopped!"): + runtime.suppress_queue_autodrain() + continue + if suppress_next_input_cancel_message: + suppress_next_input_cancel_message = False + else: + emit_warning("\nInput cancelled") + continue + except EOFError: + # Handle Ctrl+D - exit the application + await shutdown_interactive_session( + "\nGoodbye! (Ctrl+D)", reason="ctrl_d" + ) + break + + # Shell pass-through: ! executes directly, bypassing the agent. + from code_puppy.command_line.shell_passthrough import ( + execute_shell_passthrough, + is_shell_passthrough, + ) + + if is_shell_passthrough(submission.text): + if submission.text.strip(): + runtime.clear_queue_autodrain_suppression() + execute_shell_passthrough(submission.text) + continue + + if submission.text.strip() and runtime.is_queue_autodrain_suppressed(): + runtime.clear_queue_autodrain_suppression() + log_event( + "queue_autodrain_resumed", + reason="explicit_submission", + text=submission.text.strip(), + ) + + outcome = await dispatch_submission( + submission.text, + requested_action=submission.action, + echo_in_transcript=submission.echo_in_transcript, + allow_command_dispatch=submission.allow_command_dispatch, + ) + if outcome == "exit": + break + if outcome == "launched": + await asyncio.sleep(0) + finally: + clear_active_interactive_runtime(runtime) async def run_prompt_with_attachments( @@ -931,6 +1754,9 @@ async def run_prompt_with_attachments( import re from code_puppy.messaging import emit_system_message, emit_warning + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) processed_prompt = parse_prompt_attachments(raw_prompt) @@ -980,6 +1806,11 @@ async def run_prompt_with_attachments( from code_puppy.agents.event_stream_handler import set_streaming_console set_streaming_console(spinner_console) + _seed_spinner_context( + agent, + cleaned_prompt, + link_attachments=link_attachments, + ) # Create the agent task first so we can track and cancel it agent_task = asyncio.create_task( @@ -990,7 +1821,10 @@ async def run_prompt_with_attachments( ) ) - if use_spinner and spinner_console is not None: + runtime = get_active_interactive_runtime() + spinner_allowed = not (runtime is not None and runtime.running) + + if use_spinner and spinner_console is not None and spinner_allowed: from code_puppy.messaging.spinner import ConsoleSpinner with ConsoleSpinner(console=spinner_console): @@ -1011,7 +1845,10 @@ async def run_prompt_with_attachments( async def execute_single_prompt(prompt: str, message_renderer) -> None: """Execute a single prompt and exit (for -p flag).""" - # Shell pass-through: ! bypasses the agent even in -p mode + from code_puppy.messaging import emit_info + + emit_info(f"Executing prompt: {prompt}") + from code_puppy.command_line.shell_passthrough import ( execute_shell_passthrough, is_shell_passthrough, @@ -1021,22 +1858,18 @@ async def execute_single_prompt(prompt: str, message_renderer) -> None: execute_shell_passthrough(prompt) return - from code_puppy.messaging import emit_info - - emit_info(f"Executing prompt: {prompt}") - try: # Get agent through runtime manager and use helper for attachments agent = get_current_agent() - result, _agent_task = await run_prompt_with_attachments( + response, _task = await run_prompt_with_attachments( agent, prompt, spinner_console=message_renderer.console, ) - if result is None: + if response is None: return - agent_response = result.output + agent_response = response.output # Emit structured message for proper markdown rendering from code_puppy.messaging import get_message_bus diff --git a/code_puppy/command_line/command_handler.py b/code_puppy/command_line/command_handler.py index 642ff2424..7ccba1650 100644 --- a/code_puppy/command_line/command_handler.py +++ b/code_puppy/command_line/command_handler.py @@ -181,6 +181,7 @@ def handle_command(command: str): """ from rich.text import Text + from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand from code_puppy.command_line.command_registry import get_command from code_puppy.messaging import emit_info, emit_warning @@ -256,6 +257,8 @@ def handle_command(command: str): for res in results: if res is True: return True + if isinstance(res, BackgroundInteractiveCommand): + return res if MarkdownCommandResult and isinstance(res, MarkdownCommandResult): # Special case: markdown command that should be processed as input # Replace the command with the markdown content and let it be processed diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index 8724ef24e..d013b7560 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -42,6 +42,7 @@ def handle_show_command(command: str) -> bool: get_owner_name, get_protected_token_count, get_puppy_name, + get_queue_limit, get_resume_message_count, get_temperature, get_use_dbos, @@ -77,6 +78,7 @@ def handle_show_command(command: str) -> bool: [bold]YOLO_MODE:[/bold] {"[red]ON[/red]" if yolo_mode else "[yellow]off[/yellow]"} [bold]DBOS:[/bold] {"[green]enabled[/green]" if get_use_dbos() else "[yellow]disabled[/yellow]"} (toggle: /set enable_dbos true|false) [bold]auto_save_session:[/bold] {"[green]enabled[/green]" if auto_save else "[yellow]disabled[/yellow]"} +[bold]queue_limit:[/bold] [cyan]{get_queue_limit()}[/cyan] queued prompts/interjects max [bold]protected_tokens:[/bold] [cyan]{protected_tokens:,}[/cyan] recent tokens preserved [bold]compaction_threshold:[/bold] [cyan]{compaction_threshold:.1%}[/cyan] context usage triggers compaction [bold]compaction_strategy:[/bold] [cyan]{compaction_strategy}[/cyan] (summarization or truncation) @@ -242,6 +244,17 @@ def handle_set_command(command: str) -> bool: ) ) + if key == "queue_limit": + try: + normalized_limit = int(value.strip()) + except ValueError: + emit_error("Invalid queue_limit. Enter a whole number >= 1.") + return True + if normalized_limit < 1: + emit_error("Invalid queue_limit. Enter a whole number >= 1.") + return True + value = str(normalized_limit) + set_config_value(key, value) emit_success(f'Set {key} = "{value}" in puppy.cfg!') diff --git a/code_puppy/command_line/core_commands.py b/code_puppy/command_line/core_commands.py index afd7bdfbc..dd0f7863c 100644 --- a/code_puppy/command_line/core_commands.py +++ b/code_puppy/command_line/core_commands.py @@ -173,7 +173,7 @@ def handle_paste_command(command: str) -> bool: usage="/tutorial", category="core", ) -def handle_tutorial_command(command: str) -> bool: +def handle_tutorial_command(command: str): """Run the interactive tutorial wizard. Usage: @@ -182,11 +182,11 @@ def handle_tutorial_command(command: str) -> bool: import asyncio import concurrent.futures + from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand from code_puppy.command_line.onboarding_wizard import ( reset_onboarding, run_onboarding_wizard, ) - from code_puppy.model_switching import set_model_and_reload_agent # Always reset so user can re-run the tutorial anytime reset_onboarding() @@ -198,18 +198,18 @@ def handle_tutorial_command(command: str) -> bool: if result == "chatgpt": emit_info("šŸ” Starting ChatGPT OAuth flow...") - from code_puppy.plugins.chatgpt_oauth.oauth_flow import run_oauth_flow + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + start_chatgpt_oauth_setup, + ) - run_oauth_flow() - set_model_and_reload_agent("chatgpt-gpt-5.4") + return BackgroundInteractiveCommand(run=start_chatgpt_oauth_setup) elif result == "claude": emit_info("šŸ” Starting Claude Code OAuth flow...") from code_puppy.plugins.claude_code_oauth.register_callbacks import ( - _perform_authentication, + start_claude_code_oauth_setup, ) - _perform_authentication() - set_model_and_reload_agent("claude-code-claude-opus-4-6") + return BackgroundInteractiveCommand(run=start_claude_code_oauth_setup) elif result == "completed": emit_info("šŸŽ‰ Tutorial complete! Happy coding!") elif result == "skipped": diff --git a/code_puppy/command_line/interactive_command.py b/code_puppy/command_line/interactive_command.py new file mode 100644 index 000000000..052995d9c --- /dev/null +++ b/code_puppy/command_line/interactive_command.py @@ -0,0 +1,19 @@ +"""Helpers for long-running interactive commands that need cooperative cancel.""" + +from __future__ import annotations + +import threading +from dataclasses import dataclass, field +from typing import Callable + + +@dataclass +class BackgroundInteractiveCommand: + """Background command work that should keep the composer alive.""" + + run: Callable[[threading.Event], object | None] + cancel_event: threading.Event = field(default_factory=threading.Event) + + def request_cancel(self) -> None: + """Signal the background command to stop cooperatively.""" + self.cancel_event.set() diff --git a/code_puppy/command_line/interactive_runtime.py b/code_puppy/command_line/interactive_runtime.py new file mode 100644 index 000000000..8d032c433 --- /dev/null +++ b/code_puppy/command_line/interactive_runtime.py @@ -0,0 +1,440 @@ +"""Shared runtime state for interactive prompt, queue, and shell coordination.""" + +from __future__ import annotations + +import asyncio +import contextvars +import time +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Callable, Literal + +from code_puppy.config import get_queue_limit + +DEFAULT_PROMPT_QUEUE_LIMIT = 25 +PROMPT_STATUS_FRAME_INTERVAL = 0.09 +PROMPT_STATUS_BACKOFF_WINDOW = 0.045 +_ABOVE_PROMPT_RENDER_ACTIVE: contextvars.ContextVar[bool] = contextvars.ContextVar( + "above_prompt_render_active", + default=False, +) + + +@dataclass +class QueuedPrompt: + """Normalized queued prompt payload.""" + + kind: Literal["queued", "interject"] + text: str + allow_command_dispatch: bool = True + created_at: str = field( + default_factory=lambda: datetime.now(timezone.utc).isoformat() + ) + + def preview_text(self) -> str: + if self.kind == "interject": + return f"[INTERJECT] {self.text}" + return self.text + + +@dataclass +class PromptRuntimeState: + """Single source of truth for interactive prompt state.""" + + queue: list[QueuedPrompt] = field(default_factory=list) + running: bool = False + cancelling: bool = False + bg_task: asyncio.Task | None = None + shell_depth: int = 0 + queue_view_offset: int = 0 + pending_submission: str | None = None + pending_submission_allow_command_dispatch: bool = True + prompt_surface_kind: Literal["main"] | None = None + prompt_session: object | None = None + prompt_status_started_at: float | None = None + prompt_status_task: asyncio.Task | None = None + prompt_ephemeral_status: str | None = None + prompt_ephemeral_preview: str | None = None + above_prompt_lock: asyncio.Lock | None = field(default=None, init=False, repr=False) + above_prompt_lock_loop: asyncio.AbstractEventLoop | None = field( + default=None, + init=False, + repr=False, + ) + last_prompt_invalidation_at: float = 0.0 + last_spinner_invalidation_at: float = 0.0 + active_run_kind: Literal["agent", "interactive_command"] | None = None + active_cancel_hook: Callable[[], None] | None = None + active_cancel_requester: Callable[[str], None] | None = None + queue_autodrain_suppressed: bool = False + + def mark_running( + self, + task: asyncio.Task, + *, + kind: Literal["agent", "interactive_command"] = "agent", + cancel_hook: Callable[[], None] | None = None, + ) -> None: + self.running = True + self.cancelling = False + self.bg_task = task + self.active_run_kind = kind + self.active_cancel_hook = cancel_hook + self.prompt_status_started_at = time.monotonic() + self._ensure_prompt_status_task() + self.invalidate_prompt() + + def mark_idle(self) -> None: + self.running = False + self.cancelling = False + self.bg_task = None + self.active_run_kind = None + self.active_cancel_hook = None + self.prompt_status_started_at = None + self.prompt_ephemeral_status = None + self.prompt_ephemeral_preview = None + self._stop_prompt_status_task() + self.invalidate_prompt() + + def is_active_task(self, task: asyncio.Task | None) -> bool: + return task is not None and self.bg_task is task + + def mark_idle_if_task(self, task: asyncio.Task | None) -> bool: + if not self.is_active_task(task): + return False + self.mark_idle() + return True + + def _can_enqueue(self) -> bool: + return len(self.queue) < get_queue_limit(default=DEFAULT_PROMPT_QUEUE_LIMIT) + + def _clamp_queue_view_offset(self, *, max_visible: int = 3) -> None: + max_start = max(0, len(self.queue) - max_visible) + self.queue_view_offset = max(0, min(self.queue_view_offset, max_start)) + + def request_queue( + self, prompt: str, *, allow_command_dispatch: bool = True + ) -> tuple[bool, int, QueuedPrompt | None]: + if not self._can_enqueue(): + return False, len(self.queue), None + item = QueuedPrompt( + kind="queued", + text=prompt, + allow_command_dispatch=allow_command_dispatch, + ) + self.queue.append(item) + self._clamp_queue_view_offset() + self.invalidate_prompt() + return True, len(self.queue), item + + def request_interject( + self, prompt: str, *, allow_command_dispatch: bool = True + ) -> tuple[bool, int, QueuedPrompt | None]: + if not self._can_enqueue(): + return False, len(self.queue), None + item = QueuedPrompt( + kind="interject", + text=prompt, + allow_command_dispatch=allow_command_dispatch, + ) + self.queue.insert(0, item) + self._clamp_queue_view_offset() + self.invalidate_prompt() + return True, 1, item + + def dequeue(self) -> QueuedPrompt | None: + if not self.queue: + return None + value = self.queue.pop(0) + self._clamp_queue_view_offset() + self.invalidate_prompt() + return value + + def dequeue_next_interject(self) -> QueuedPrompt | None: + for index, item in enumerate(self.queue): + if item.kind != "interject": + continue + value = self.queue.pop(index) + self._clamp_queue_view_offset() + self.invalidate_prompt() + return value + return None + + def queue_preview_texts(self) -> list[str]: + return [item.preview_text() for item in self.queue] + + def has_pending_submission(self) -> bool: + return bool(self.pending_submission) + + def set_pending_submission( + self, text: str | None, *, allow_command_dispatch: bool = True + ) -> None: + self.pending_submission = text + self.pending_submission_allow_command_dispatch = allow_command_dispatch + self.invalidate_prompt() + + def take_pending_submission(self) -> str | None: + text, _ = self.take_pending_submission_with_policy() + return text + + def take_pending_submission_with_policy(self) -> tuple[str | None, bool]: + text = self.pending_submission + allow_command_dispatch = self.pending_submission_allow_command_dispatch + self.pending_submission = None + self.pending_submission_allow_command_dispatch = True + self.invalidate_prompt() + return text, allow_command_dispatch + + def has_active_shell(self) -> bool: + return self.shell_depth > 0 + + def notify_shell_started(self) -> None: + self.shell_depth += 1 + self.invalidate_prompt() + + def notify_shell_finished(self) -> None: + if self.shell_depth > 0: + self.shell_depth -= 1 + self.invalidate_prompt() + + def has_active_interactive_command(self) -> bool: + return self.active_run_kind == "interactive_command" and self.running + + def set_prompt_ephemeral_status(self, text: str | None) -> None: + normalized = text.strip() if text and text.strip() else None + if normalized == self.prompt_ephemeral_status: + return + self.prompt_ephemeral_status = normalized + self.invalidate_prompt_for_spinner() + + def clear_prompt_ephemeral_status(self) -> None: + self.set_prompt_ephemeral_status(None) + + def set_prompt_ephemeral_preview(self, text: str | None) -> None: + normalized = text if text and text.strip() else None + if normalized == self.prompt_ephemeral_preview: + return + self.prompt_ephemeral_preview = normalized + self.invalidate_prompt_for_spinner() + + def clear_prompt_ephemeral_preview(self) -> None: + self.set_prompt_ephemeral_preview(None) + + def set_active_cancel_requester( + self, requester: Callable[[str], None] | None + ) -> None: + self.active_cancel_requester = requester + + def request_active_cancel(self, reason: str) -> bool: + if self.active_cancel_requester is None: + return False + self.active_cancel_requester(reason) + return True + + def suppress_queue_autodrain(self) -> None: + self.queue_autodrain_suppressed = True + + def clear_queue_autodrain_suppression(self) -> None: + self.queue_autodrain_suppressed = False + + def is_queue_autodrain_suppressed(self) -> bool: + return self.queue_autodrain_suppressed + + def shift_queue_view_offset(self, delta: int, *, max_visible: int = 3) -> bool: + old_offset = self.queue_view_offset + self._clamp_queue_view_offset(max_visible=max_visible) + max_start = max(0, len(self.queue) - max_visible) + self.queue_view_offset = max(0, min(self.queue_view_offset + delta, max_start)) + changed = self.queue_view_offset != old_offset + if changed: + self.invalidate_prompt() + return changed + + def register_prompt_surface( + self, session: object, kind: Literal["main"] = "main" + ) -> None: + self.prompt_surface_kind = kind + self.prompt_session = session + self._ensure_prompt_status_task() + self.invalidate_prompt() + + def clear_prompt_surface(self, session: object | None = None) -> None: + if session is not None and self.prompt_session is not session: + return + self.prompt_surface_kind = None + self.prompt_session = None + self._stop_prompt_status_task() + + def has_prompt_surface(self) -> bool: + return self.prompt_session is not None + + def is_rendering_above_prompt(self) -> bool: + return _ABOVE_PROMPT_RENDER_ACTIVE.get() + + def get_prompt_status_frame(self) -> str: + from code_puppy.messaging.spinner.spinner_base import SpinnerBase + + if self.prompt_status_started_at is None: + return SpinnerBase.FRAMES[0] + + elapsed = max(0.0, time.monotonic() - self.prompt_status_started_at) + frame_index = int(elapsed / PROMPT_STATUS_FRAME_INTERVAL) % len( + SpinnerBase.FRAMES + ) + return SpinnerBase.FRAMES[frame_index] + + def invalidate_prompt(self) -> None: + self._invalidate_prompt(low_priority=False) + + def invalidate_prompt_for_spinner(self) -> None: + self._invalidate_prompt(low_priority=True) + + def _invalidate_prompt(self, *, low_priority: bool) -> None: + app = getattr(self.prompt_session, "app", None) + if app is None: + return + + now = time.monotonic() + if low_priority: + if now - self.last_prompt_invalidation_at < PROMPT_STATUS_BACKOFF_WINDOW: + return + if ( + self.last_spinner_invalidation_at > 0 + and now - self.last_spinner_invalidation_at + < PROMPT_STATUS_FRAME_INTERVAL + ): + return + + try: + app.invalidate() + if low_priority: + self.last_spinner_invalidation_at = now + else: + self.last_prompt_invalidation_at = now + except Exception: + pass + + def _should_refresh_prompt_status(self) -> bool: + return self.running and self.has_prompt_surface() + + def _get_above_prompt_lock(self, loop: asyncio.AbstractEventLoop) -> asyncio.Lock: + if self.above_prompt_lock is None or self.above_prompt_lock_loop is not loop: + self.above_prompt_lock = asyncio.Lock() + self.above_prompt_lock_loop = loop + return self.above_prompt_lock + + async def _run_above_prompt_serialized(self, func: Callable[[], None]) -> None: + from prompt_toolkit.application import run_in_terminal + + loop = asyncio.get_running_loop() + lock = self._get_above_prompt_lock(loop) + async with lock: + token = _ABOVE_PROMPT_RENDER_ACTIVE.set(True) + try: + await run_in_terminal(func) + finally: + _ABOVE_PROMPT_RENDER_ACTIVE.reset(token) + + def _ensure_prompt_status_task(self) -> None: + if not self._should_refresh_prompt_status(): + return + if self.prompt_status_task is not None and not self.prompt_status_task.done(): + return + try: + loop = asyncio.get_running_loop() + except RuntimeError: + return + self.prompt_status_task = loop.create_task(self._prompt_status_loop()) + + def _stop_prompt_status_task(self) -> None: + task = self.prompt_status_task + if task is None: + return + self.prompt_status_task = None + if not task.done(): + task.cancel() + + async def _prompt_status_loop(self) -> None: + current_task = asyncio.current_task() + try: + while self._should_refresh_prompt_status(): + self.invalidate_prompt_for_spinner() + await asyncio.sleep(PROMPT_STATUS_FRAME_INTERVAL) + except asyncio.CancelledError: + pass + finally: + if self.prompt_status_task is current_task: + self.prompt_status_task = None + self.invalidate_prompt() + + def run_above_prompt( + self, func: Callable[[], None], *, timeout: float = 5.0 + ) -> bool: + """Run a synchronous callback above the mounted prompt surface.""" + app = getattr(self.prompt_session, "app", None) + loop = getattr(app, "loop", None) + if app is None or loop is None or not loop.is_running(): + return False + + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + current_loop = None + if current_loop is loop: + return False + + async def _runner() -> None: + await self._run_above_prompt_serialized(func) + + future = asyncio.run_coroutine_threadsafe(_runner(), loop) + try: + future.result(timeout=timeout) + return True + except Exception: + future.cancel() + return False + + async def run_above_prompt_async(self, func: Callable[[], None]) -> bool: + """Run a synchronous callback above the mounted prompt from async code.""" + app = getattr(self.prompt_session, "app", None) + loop = getattr(app, "loop", None) + if app is None or loop is None or not loop.is_running(): + return False + + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + return False + + async def _runner() -> None: + await self._run_above_prompt_serialized(func) + + try: + if current_loop is loop: + await _runner() + return True + + future = asyncio.run_coroutine_threadsafe(_runner(), loop) + await asyncio.wrap_future(future) + return True + except Exception: + return False + + +_ACTIVE_RUNTIME: PromptRuntimeState | None = None + + +def register_active_interactive_runtime(runtime: PromptRuntimeState) -> None: + global _ACTIVE_RUNTIME + _ACTIVE_RUNTIME = runtime + + +def get_active_interactive_runtime() -> PromptRuntimeState | None: + return _ACTIVE_RUNTIME + + +def clear_active_interactive_runtime(runtime: PromptRuntimeState | None = None) -> None: + global _ACTIVE_RUNTIME + if runtime is not None and _ACTIVE_RUNTIME is not runtime: + return + _ACTIVE_RUNTIME = None diff --git a/code_puppy/command_line/prompt_toolkit_completion.py b/code_puppy/command_line/prompt_toolkit_completion.py index 11b16d070..8730c3557 100644 --- a/code_puppy/command_line/prompt_toolkit_completion.py +++ b/code_puppy/command_line/prompt_toolkit_completion.py @@ -9,16 +9,25 @@ import asyncio import os import sys -from typing import Optional +from dataclasses import dataclass +from typing import Literal, Optional from prompt_toolkit import PromptSession -from prompt_toolkit.completion import Completer, Completion, merge_completers -from prompt_toolkit.filters import is_searching +from prompt_toolkit.patch_stdout import patch_stdout +from prompt_toolkit.completion import ( + Completer, + Completion, + ConditionalCompleter, + merge_completers, +) +from prompt_toolkit.document import Document +from prompt_toolkit.filters import Condition, is_searching from prompt_toolkit.formatted_text import FormattedText from prompt_toolkit.history import FileHistory from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.keys import Keys from prompt_toolkit.layout.processors import Processor, Transformation +from prompt_toolkit.shortcuts import print_formatted_text from prompt_toolkit.styles import Style from code_puppy.command_line.attachments import ( @@ -33,6 +42,10 @@ ) from code_puppy.command_line.command_registry import get_unique_commands from code_puppy.command_line.file_path_completion import FilePathCompleter +from code_puppy.command_line.interactive_runtime import ( + PromptRuntimeState, + get_active_interactive_runtime, +) from code_puppy.command_line.load_context_completion import LoadContextCompleter from code_puppy.command_line.mcp_completion import MCPCompleter from code_puppy.command_line.model_picker_completion import ( @@ -48,6 +61,173 @@ get_puppy_name, get_value, ) +from code_puppy.messaging.spinner.spinner_base import SpinnerBase + + +@dataclass(frozen=True) +class PromptSubmission: + action: Literal["submit", "queue", "interject"] + text: str + echo_in_transcript: bool = False + allow_command_dispatch: bool = True + + +def _get_runtime() -> PromptRuntimeState | None: + return get_active_interactive_runtime() + + +def _get_current_agent_for_prompt(): + """Best-effort current-agent lookup for prompt rendering. + + Prompt painting should not fail just because optional agent dependencies + are unavailable in the current environment. + """ + try: + from code_puppy.agents.agent_manager import get_current_agent + except Exception: + return None + + try: + return get_current_agent() + except Exception: + return None + + +def register_active_prompt_surface( + kind: Literal["main", "interject"], session: PromptSession +) -> None: + runtime = _get_runtime() + if runtime is None: + return + runtime.register_prompt_surface(session, kind="main") + + +def clear_active_prompt_surface(session: PromptSession | None = None) -> None: + runtime = _get_runtime() + if runtime is None: + return + runtime.clear_prompt_surface(session) + + +def get_active_prompt_surface_kind() -> Literal["main", "interject"] | None: + runtime = _get_runtime() + if runtime is None: + return None + if runtime.prompt_surface_kind == "main": + return "main" + return None + + +def has_active_prompt_surface() -> bool: + runtime = _get_runtime() + return runtime.has_prompt_surface() if runtime is not None else False + + +def is_shell_prompt_suspended() -> bool: + runtime = _get_runtime() + return runtime.has_active_shell() if runtime is not None else False + + +def set_shell_prompt_suspended(suspended: bool) -> None: + """Compatibility shim for tests that now maps to shell-active state.""" + runtime = _get_runtime() + if runtime is None: + return + if suspended: + if not runtime.has_active_shell(): + runtime.notify_shell_started() + return + while runtime.has_active_shell(): + runtime.notify_shell_finished() + + +def _interrupt_shell_from_prompt(label: str) -> None: + from code_puppy.messaging import emit_warning + from code_puppy.tools.command_runner import kill_all_running_shell_processes + + emit_warning(f"\nšŸ›‘ {label} detected! Interrupting shell command...") + kill_all_running_shell_processes() + + +def _truncate_queue_line(text: str, max_len: int) -> str: + if max_len <= 2: + return ".." + if len(text) <= max_len: + return text + return text[: max_len - 2] + ".." + + +def _get_queue_preview( + prompts: list[str], term_width: int, max_visible: int = 3 +) -> tuple[list[str], int]: + """Return visible queue lines and hidden count from current offset.""" + runtime = _get_runtime() + queue_offset = runtime.queue_view_offset if runtime is not None else 0 + if not prompts: + if runtime is not None: + runtime.queue_view_offset = 0 + return [], 0 + + max_start = max(0, len(prompts) - max_visible) + queue_offset = max(0, min(queue_offset, max_start)) + if runtime is not None: + runtime.queue_view_offset = queue_offset + start = queue_offset + visible = prompts[start : start + max_visible] + lines: list[str] = [] + line_room = max(8, term_width - 8) + for idx, prompt in enumerate(visible, start=start + 1): + lines.append(f" [{idx}] {_truncate_queue_line(prompt, line_room)}") + hidden = max(0, len(prompts) - (start + len(visible))) + return lines, hidden + + +def _is_exit_text(text: str) -> bool: + return text.strip().lower() in {"exit", "quit", "/exit", "/quit"} + + +def _allows_busy_command_dispatch(text: str) -> bool: + stripped = text.strip() + return not stripped.startswith("/") or _is_exit_text(stripped) + + +def _run_text_clipboard_command(command: list[str]) -> str | None: + import subprocess + + try: + result = subprocess.run(command, capture_output=True, text=True, timeout=2) + except FileNotFoundError: + return None + + if result.returncode == 0: + return result.stdout + return None + + +def _read_text_clipboard_fallback() -> str | None: + import platform + + system = platform.system() + if system == "Darwin": + return _run_text_clipboard_command(["pbpaste"]) + if system == "Windows": + for command in ( + ["pwsh", "-NoProfile", "-Command", "Get-Clipboard -Raw"], + ["powershell", "-NoProfile", "-Command", "Get-Clipboard -Raw"], + ): + text = _run_text_clipboard_command(command) + if text is not None: + return text + return None + + for command in ( + ["xclip", "-selection", "clipboard", "-o"], + ["xsel", "--clipboard", "--output"], + ): + text = _run_text_clipboard_command(command) + if text is not None: + return text + return None def _sanitize_for_encoding(text: str) -> str: @@ -175,6 +355,10 @@ class AttachmentPlaceholderProcessor(Processor): _MAX_TEXT_LENGTH_FOR_REALTIME = 500 def apply_transformation(self, transformation_input): + runtime = _get_runtime() + if runtime is not None and runtime.has_pending_submission(): + return Transformation(list(transformation_input.fragments)) + document = transformation_input.document text = document.text if not text: @@ -512,14 +696,98 @@ def get_completions(self, document, complete_event): ) -def get_prompt_with_active_model(base: str = ">>> "): - from code_puppy.agents.agent_manager import get_current_agent +def get_prompt_with_active_model(base: str = ">>> ", is_interject: bool = False): + return FormattedText( + _build_prompt_parts( + is_interject=is_interject, + include_queue_preview=True, + include_pending_hint=True, + ) + ) + +def _build_prompt_style() -> Style: + return Style.from_dict( + { + # Keys must AVOID the 'class:' prefix – that prefix is used only when + # tagging tokens in `FormattedText`. See prompt_toolkit docs. + "puppy": "bold ansibrightcyan", + "owner": "bold ansibrightblue", + "agent": "bold ansibrightblue", + "model": "bold ansibrightcyan", + "cwd": "bold ansibrightgreen", + "arrow": "bold ansibrightcyan", + "separator": "bold ansigray", + "attachment-placeholder": "italic ansicyan", + "queue-item": "italic ansiyellow", + "thinking": "bold ansibrightcyan", + "thinking-context": "bold white", + } + ) + + +def _truncate_prompt_preview( + text: str, *, max_chars: int = 1000, max_lines: int = 6 +) -> str: + normalized = text.rstrip("\n") + if not normalized.strip(): + return "" + + char_trimmed = len(normalized) > max_chars + if char_trimmed: + normalized = normalized[-max_chars:] + + lines = normalized.splitlines() + if not lines: + lines = [normalized] + + line_trimmed = len(lines) > max_lines + if line_trimmed: + lines = lines[-max_lines:] + + preview = "\n".join(lines) + if char_trimmed or line_trimmed: + preview = f"...\n{preview}" + return preview + + +def _build_prompt_status_parts(runtime: PromptRuntimeState) -> list[tuple[str, str]]: + """Build the lightweight thinking line shown above the prompt separator.""" + parts: list[tuple[str, str]] = [ + ("class:thinking", f"{get_puppy_name().title()} is thinking... "), + ("class:thinking", runtime.get_prompt_status_frame()), + ] + context_info = SpinnerBase.get_context_info() + if context_info: + parts.append(("", " ")) + parts.append(("class:thinking-context", context_info)) + parts.append(("", "\n")) + if runtime.prompt_ephemeral_status: + parts.append(("class:thinking-context", runtime.prompt_ephemeral_status)) + parts.append(("", "\n")) + if runtime.prompt_ephemeral_preview: + parts.append( + ( + "class:thinking-context", + _truncate_prompt_preview(runtime.prompt_ephemeral_preview), + ) + ) + parts.append(("", "\n")) + return parts + + +def _build_prompt_parts( + *, + is_interject: bool, + include_queue_preview: bool, + include_pending_hint: bool, +) -> list[tuple[str, str]]: puppy = get_puppy_name() global_model = get_active_model() or "(default)" + runtime = _get_runtime() # Get current agent information - current_agent = get_current_agent() + current_agent = _get_current_agent_for_prompt() agent_display = current_agent.display_name if current_agent else "code-puppy" # Check if current agent has a pinned model @@ -544,54 +812,274 @@ def get_prompt_with_active_model(base: str = ">>> "): cwd_display = "~" + cwd[len(home) :] else: cwd_display = cwd - return FormattedText( + + # We add a visual top border using terminal width + import shutil + + term_width = shutil.get_terminal_size().columns + sep_line = "─" * term_width + + parts = [("", "\n")] + + if runtime is not None and runtime.running: + parts.extend(_build_prompt_status_parts(runtime)) + + parts.append(("class:separator", f"{sep_line}\n")) + + queue_preview = runtime.queue_preview_texts() if runtime is not None else [] + if include_queue_preview and queue_preview: + preview_lines, hidden = _get_queue_preview(queue_preview, term_width=term_width) + for line in preview_lines: + parts.append(("class:queue-item", f"{line}\n")) + if hidden: + parts.append(("class:queue-item", f" ... and {hidden} more\n")) + + parts.extend( [ + ("class:separator", "╭─ "), ("bold", "🐶 "), ("class:puppy", f"{puppy}"), ("", " "), ("class:agent", f"[{agent_display}] "), ("class:model", model_display + " "), - ("class:cwd", "(" + str(cwd_display) + ") "), - ("class:arrow", str(base)), + ("class:cwd", "(" + str(cwd_display) + ") \n"), + ] + ) + + if include_pending_hint and ( + is_interject or (runtime is not None and runtime.has_pending_submission()) + ): + # Add hint above the prompt line to keep the cursor position consistent + parts.append( + ( + "class:queue-item", + " [i]nterject [q]ueue [e]dit [esc]ape\n", + ) + ) + + parts.extend( + [ + ("class:separator", "╰─"), + ("class:arrow", "āÆ "), ] ) + return parts -async def get_input_with_combined_completion( - prompt_str=">>> ", history_file: Optional[str] = None -) -> str: + +def render_submitted_prompt_echo(text: str) -> None: + """Print a submitted prompt using the same prompt chrome as the composer.""" + echo_text = text.rstrip("\n") + if not echo_text: + return + + parts = _build_prompt_parts( + is_interject=False, + include_queue_preview=False, + include_pending_hint=False, + ) + parts.append(("", echo_text)) + parts.append(("", "\n")) + formatted = FormattedText(parts) + style = _build_prompt_style() + runtime = _get_runtime() + + def _print_echo() -> None: + from prompt_toolkit.output.defaults import create_output + + out = create_output(stdout=sys.__stdout__) + if hasattr(out, "enable_cpr"): + out.enable_cpr = False + print_formatted_text(formatted, style=style, output=out) + + if ( + runtime is not None + and runtime.has_prompt_surface() + and not runtime.is_rendering_above_prompt() + ): + try: + if runtime.run_above_prompt(_print_echo): + return + except Exception: + pass + + _print_echo() + + +def render_transcript_notice(text: str) -> None: + """Print a plain transcript line above the composer without prompt chrome.""" + notice_text = text.rstrip("\n") + if not notice_text: + return + + formatted = FormattedText([("", notice_text), ("", "\n")]) + runtime = _get_runtime() + + def _print_notice() -> None: + from prompt_toolkit.output.defaults import create_output + + out = create_output(stdout=sys.__stdout__) + if hasattr(out, "enable_cpr"): + out.enable_cpr = False + print_formatted_text(formatted, output=out) + + if ( + runtime is not None + and runtime.has_prompt_surface() + and not runtime.is_rendering_above_prompt() + ): + try: + if runtime.run_above_prompt(_print_notice): + return + except Exception: + pass + + _print_notice() + + +async def prompt_for_submission( + prompt_str=">>> ", history_file: Optional[str] = None, erase_when_done: bool = False +) -> PromptSubmission: # Use SafeFileHistory to handle encoding errors gracefully on Windows history = SafeFileHistory(history_file) if history_file else None - # Build the base completer list, then bolt on any plugin completers. + runtime = _get_runtime() from code_puppy.plugins.ollama_setup.completer import OllamaSetupCompleter + # Add custom key bindings and multiline toggle + bindings = KeyBindings() + recalled_queue_item = {"item": None} + recalled_queue_allow_command_dispatch = {"value": True} + pending_decision_filter = Condition( + lambda: runtime is not None and runtime.has_pending_submission() + ) + busy_run_filter = Condition(lambda: runtime is not None and runtime.running) + command_completion_filter = Condition( + lambda: runtime is None + or not (runtime.running or runtime.has_pending_submission()) + ) + attachment_completion_filter = Condition( + lambda: runtime is None or not runtime.has_pending_submission() + ) completer = merge_completers( [ - FilePathCompleter(symbol="@"), - ModelNameCompleter(trigger="/model"), - ModelNameCompleter(trigger="/m"), - CDCompleter(trigger="/cd"), - SetCompleter(trigger="/set"), - LoadContextCompleter(trigger="/load_context"), - PinCompleter(trigger="/pin_model"), - UnpinCompleter(trigger="/unpin"), - AgentCompleter(trigger="/agent"), - AgentCompleter(trigger="/a"), - MCPCompleter(trigger="/mcp"), - SkillsCompleter(trigger="/skills"), - OllamaSetupCompleter(), - SlashCompleter(), + ConditionalCompleter( + FilePathCompleter(symbol="@"), + filter=attachment_completion_filter, + ), + ConditionalCompleter( + ModelNameCompleter(trigger="/model"), + filter=command_completion_filter, + ), + ConditionalCompleter( + ModelNameCompleter(trigger="/m"), + filter=command_completion_filter, + ), + ConditionalCompleter( + CDCompleter(trigger="/cd"), + filter=command_completion_filter, + ), + ConditionalCompleter( + SetCompleter(trigger="/set"), + filter=command_completion_filter, + ), + ConditionalCompleter( + LoadContextCompleter(trigger="/load_context"), + filter=command_completion_filter, + ), + ConditionalCompleter( + PinCompleter(trigger="/pin_model"), + filter=command_completion_filter, + ), + ConditionalCompleter( + UnpinCompleter(trigger="/unpin"), + filter=command_completion_filter, + ), + ConditionalCompleter( + AgentCompleter(trigger="/agent"), + filter=command_completion_filter, + ), + ConditionalCompleter( + AgentCompleter(trigger="/a"), + filter=command_completion_filter, + ), + ConditionalCompleter( + MCPCompleter(trigger="/mcp"), + filter=command_completion_filter, + ), + ConditionalCompleter( + SkillsCompleter(trigger="/skills"), + filter=command_completion_filter, + ), + ConditionalCompleter( + OllamaSetupCompleter(), + filter=command_completion_filter, + ), + ConditionalCompleter( + SlashCompleter(), + filter=command_completion_filter, + ), ] ) - # Add custom key bindings and multiline toggle - bindings = KeyBindings() # Multiline mode state multiline = {"enabled": False} - # Ctrl+X keybinding - exit with KeyboardInterrupt for shell command cancellation + def awaiting_decision() -> bool: + return runtime is not None and runtime.has_pending_submission() + + def clear_chooser_input(event) -> None: + try: + event.app.current_buffer.reset() + except Exception: + pass + + def restore_pending_submission_to_buffer(event) -> None: + if runtime is None: + return + + text = runtime.take_pending_submission() or "" + try: + event.app.current_buffer.document = Document( + text=text, + cursor_position=len(text), + ) + except Exception: + try: + event.app.current_buffer.text = text + except Exception: + pass + + def recall_next_paused_queue_to_buffer(event) -> bool: + if runtime is None: + return False + if runtime.running or runtime.has_pending_submission(): + return False + if not runtime.is_queue_autodrain_suppressed(): + return False + if not runtime.queue: + return False + + item = runtime.queue[0] + recalled_queue_item["item"] = item + recalled_queue_allow_command_dispatch["value"] = item.allow_command_dispatch + try: + event.app.current_buffer.document = Document( + text=item.text, + cursor_position=len(item.text), + ) + except Exception: + try: + event.app.current_buffer.text = item.text + except Exception: + return False + return True + + # Ctrl+X keybinding - exit with KeyboardInterrupt for input cancellation @bindings.add(Keys.ControlX) def _(event): + if runtime is not None and runtime.has_active_shell(): + _interrupt_shell_from_prompt("Ctrl-X") + return try: event.app.exit(exception=KeyboardInterrupt) except Exception: @@ -599,23 +1087,103 @@ def _(event): # This happens when user presses multiple exit keys in quick succession pass + @bindings.add("c-c", filter=busy_run_filter, eager=True) + def _(event): + if runtime is not None and runtime.has_active_shell(): + _interrupt_shell_from_prompt("Ctrl-C") + runtime.suppress_queue_autodrain() + runtime.set_pending_submission(None) + clear_chooser_input(event) + return + if runtime is not None: + runtime.set_pending_submission(None) + clear_chooser_input(event) + if runtime.request_active_cancel("ctrl_c"): + return + try: + event.app.exit(exception=KeyboardInterrupt) + except Exception: + pass + + configured_cancel_key = str(get_value("cancel_agent_key") or "ctrl+c").lower() + configured_binding = {"ctrl+k": "c-k", "ctrl+q": "c-q"}.get(configured_cancel_key) + if configured_binding is not None: + + @bindings.add(configured_binding, filter=busy_run_filter, eager=True) + def _(event): + if runtime is not None and runtime.has_active_shell(): + _interrupt_shell_from_prompt(configured_cancel_key.upper()) + runtime.suppress_queue_autodrain() + runtime.set_pending_submission(None) + clear_chooser_input(event) + return + if runtime is not None: + runtime.set_pending_submission(None) + clear_chooser_input(event) + if runtime.request_active_cancel(configured_cancel_key): + return + try: + event.app.exit(exception=KeyboardInterrupt) + except Exception: + pass + # Escape keybinding - exit with KeyboardInterrupt @bindings.add(Keys.Escape) def _(event): + if awaiting_decision(): + runtime.set_pending_submission(None) + clear_chooser_input(event) + return + if runtime is not None and runtime.has_active_shell(): + return try: event.app.exit(exception=KeyboardInterrupt) except Exception: # Ignore "Return value already set" errors when exit was already called pass - # NOTE: We intentionally do NOT override Ctrl+C here. - # prompt_toolkit's default Ctrl+C handler properly resets the terminal state on Windows. - # Overriding it with event.app.exit(exception=KeyboardInterrupt) can leave the terminal - # in a bad state where characters cannot be typed. Let prompt_toolkit handle Ctrl+C natively. + # Idle Ctrl+C is still left to prompt_toolkit. + # We only intercept it while work is actively running so busy-state cancel stays local + # to the interactive runtime instead of tearing down the terminal session. + + @bindings.add("i", filter=pending_decision_filter, eager=True) + @bindings.add("I", filter=pending_decision_filter, eager=True) + def _(event): + text, allow_command_dispatch = runtime.take_pending_submission_with_policy() + clear_chooser_input(event) + event.app.exit( + result=PromptSubmission( + action="interject", + text=text or "", + allow_command_dispatch=allow_command_dispatch, + ) + ) + + @bindings.add("q", filter=pending_decision_filter, eager=True) + @bindings.add("Q", filter=pending_decision_filter, eager=True) + def _(event): + text, allow_command_dispatch = runtime.take_pending_submission_with_policy() + clear_chooser_input(event) + event.app.exit( + result=PromptSubmission( + action="queue", + text=text or "", + allow_command_dispatch=allow_command_dispatch, + ) + ) + + @bindings.add("e", filter=pending_decision_filter, eager=True) + @bindings.add("E", filter=pending_decision_filter, eager=True) + @bindings.add("up", filter=pending_decision_filter, eager=True) + def _(event): + clear_chooser_input(event) + restore_pending_submission_to_buffer(event) # Toggle multiline with Alt+M @bindings.add(Keys.Escape, "m") def _(event): + if awaiting_decision(): + return multiline["enabled"] = not multiline["enabled"] status = "ON" if multiline["enabled"] else "OFF" # Print status for user feedback (version-agnostic) @@ -626,6 +1194,8 @@ def _(event): # Also toggle multiline with F2 (more reliable across platforms) @bindings.add("f2") def _(event): + if awaiting_decision(): + return multiline["enabled"] = not multiline["enabled"] status = "ON" if multiline["enabled"] else "OFF" sys.stdout.write(f"[multiline] {status}\n") @@ -635,6 +1205,8 @@ def _(event): # Ctrl+J (line feed) works in virtually all terminals; mark eager so it wins @bindings.add("c-j", eager=True) def _(event): + if awaiting_decision(): + return event.app.current_buffer.insert_text("\n") # Also allow Ctrl+Enter for newline (terminal-dependent) @@ -642,6 +1214,8 @@ def _(event): @bindings.add("c-enter", eager=True) def _(event): + if awaiting_decision(): + return event.app.current_buffer.insert_text("\n") except Exception: pass @@ -649,6 +1223,34 @@ def _(event): # Enter behavior depends on multiline mode @bindings.add("enter", filter=~is_searching, eager=True) def _(event): + if awaiting_decision(): + choice = event.app.current_buffer.text.strip() + if _is_exit_text(choice): + runtime.set_pending_submission(None) + clear_chooser_input(event) + event.app.exit( + result=PromptSubmission( + action="submit", + text=choice, + allow_command_dispatch=True, + ) + ) + return + text = event.app.current_buffer.text + if not text.strip() and recall_next_paused_queue_to_buffer(event): + return + if ( + runtime is not None + and runtime.running + and text.strip() + and not _is_exit_text(text) + ): + runtime.set_pending_submission( + text, + allow_command_dispatch=_allows_busy_command_dispatch(text), + ) + clear_chooser_input(event) + return if multiline["enabled"]: event.app.current_buffer.insert_text("\n") else: @@ -660,24 +1262,40 @@ def _(event): @bindings.add("c-h", eager=True) # Backspace (Ctrl+H) @bindings.add("backspace", eager=True) def handle_backspace_with_completion(event): + if awaiting_decision(): + return buffer = event.app.current_buffer # Perform the deletion first buffer.delete_before_cursor(count=1) # Then trigger completion if text starts with '/' text = buffer.text.lstrip() - if text.startswith("/"): + if text.startswith("/") and command_completion_filter(): buffer.start_completion(select_first=False) @bindings.add("delete", eager=True) def handle_delete_with_completion(event): + if awaiting_decision(): + return buffer = event.app.current_buffer # Perform the deletion first buffer.delete(count=1) # Then trigger completion if text starts with '/' text = buffer.text.lstrip() - if text.startswith("/"): + if text.startswith("/") and command_completion_filter(): buffer.start_completion(select_first=False) + @bindings.add("c-up", eager=True) + def handle_queue_scroll_up(event): + if runtime is None or len(runtime.queue) <= 3: + return + runtime.shift_queue_view_offset(-1) + + @bindings.add("c-down", eager=True) + def handle_queue_scroll_down(event): + if runtime is None or len(runtime.queue) <= 3: + return + runtime.shift_queue_view_offset(1) + # Handle bracketed paste - smart detection for text vs images. # Most terminals (Windows included!) send Ctrl+V through bracketed paste. # - If there's meaningful text content → paste as text (drag-and-drop file paths, copied text) @@ -685,6 +1303,8 @@ def handle_delete_with_completion(event): @bindings.add(Keys.BracketedPaste) def handle_bracketed_paste(event): """Handle bracketed paste - smart text vs image detection.""" + if awaiting_decision(): + return pasted_data = event.data # If we have meaningful text content, paste it (don't check for images) @@ -715,6 +1335,8 @@ def handle_bracketed_paste(event): @bindings.add("c-v", eager=True) def handle_smart_paste(event): """Handle Ctrl+V - auto-detect image vs text in clipboard.""" + if awaiting_decision(): + return try: # Check for image first if has_image_in_clipboard(): @@ -731,43 +1353,7 @@ def handle_smart_paste(event): # No image (or error) - do normal text paste # prompt_toolkit doesn't have built-in paste, so we handle it manually try: - import platform - import subprocess - - text = None - system = platform.system() - - if system == "Darwin": # macOS - result = subprocess.run( - ["pbpaste"], capture_output=True, text=True, timeout=2 - ) - if result.returncode == 0: - text = result.stdout - elif system == "Windows": - # Windows - use powershell - result = subprocess.run( - ["powershell", "-command", "Get-Clipboard"], - capture_output=True, - text=True, - timeout=2, - ) - if result.returncode == 0: - text = result.stdout - else: # Linux - # Try xclip first, then xsel - for cmd in [ - ["xclip", "-selection", "clipboard", "-o"], - ["xsel", "--clipboard", "--output"], - ]: - try: - result = subprocess.run( - cmd, capture_output=True, text=True, timeout=2 - ) - if result.returncode == 0: - text = result.stdout - break - except FileNotFoundError: - continue + text = _read_text_clipboard_fallback() if text: # Normalize Windows line endings to Unix style @@ -782,6 +1368,8 @@ def handle_smart_paste(event): @bindings.add("f3") def handle_image_paste_f3(event): """Handle F3 - paste image from clipboard (image-only, shows error if none).""" + if awaiting_decision(): + return try: if has_image_in_clipboard(): placeholder = capture_clipboard_image_to_pending() @@ -798,37 +1386,91 @@ def handle_image_paste_f3(event): event.app.current_buffer.insert_text("[āŒ clipboard error] ") event.app.output.bell() + from prompt_toolkit.output.defaults import create_output + + out = create_output(stdout=sys.stdout) + if hasattr(out, "enable_cpr"): + out.enable_cpr = False session = PromptSession( completer=completer, history=history, complete_while_typing=True, key_bindings=bindings, input_processors=[AttachmentPlaceholderProcessor()], + output=out, + erase_when_done=erase_when_done, ) + # Keep the chooser truly modal: while a pending submission exists, only the + # explicit chooser bindings should work and the buffer should reject edits. + session.default_buffer.read_only = pending_decision_filter # If they pass a string, backward-compat: convert it to formatted_text if isinstance(prompt_str, str): from prompt_toolkit.formatted_text import FormattedText prompt_str = FormattedText([(None, prompt_str)]) - style = Style.from_dict( - { - # Keys must AVOID the 'class:' prefix – that prefix is used only when - # tagging tokens in `FormattedText`. See prompt_toolkit docs. - "puppy": "bold ansibrightcyan", - "owner": "bold ansibrightblue", - "agent": "bold ansibrightblue", - "model": "bold ansibrightcyan", - "cwd": "bold ansibrightgreen", - "arrow": "bold ansibrightblue", - "attachment-placeholder": "italic ansicyan", - } - ) - text = await session.prompt_async(prompt_str, style=style) + style = _build_prompt_style() + register_active_prompt_surface("main", session) + try: + with patch_stdout(): + result = await session.prompt_async(prompt_str, style=style) + except (KeyboardInterrupt, EOFError): + if runtime is not None: + runtime.set_pending_submission(None) + raise + finally: + clear_active_prompt_surface(session) + if isinstance(result, PromptSubmission): + return PromptSubmission( + action=result.action, + text=result.text, + echo_in_transcript=erase_when_done, + allow_command_dispatch=result.allow_command_dispatch, + ) + allow_command_dispatch = True + recalled_item = recalled_queue_item["item"] + if recalled_item is not None: + allow_command_dispatch = recalled_queue_allow_command_dispatch["value"] + if ( + runtime is not None + and result.strip() + and runtime.queue + and runtime.queue[0] is recalled_item + ): + runtime.dequeue() # NOTE: We used to call update_model_in_input(text) here to handle /model and /m # commands at the prompt level, but that prevented the command handler from running # and emitting success messages. Now we let all /model commands fall through to # the command handler in main.py for consistent handling. - return text + return PromptSubmission( + action="submit", + text=result, + echo_in_transcript=erase_when_done, + allow_command_dispatch=allow_command_dispatch, + ) + + +async def get_input_with_combined_completion( + prompt_str=">>> ", history_file: Optional[str] = None, erase_when_done: bool = False +) -> str: + submission = await prompt_for_submission( + prompt_str=prompt_str, + history_file=history_file, + erase_when_done=erase_when_done, + ) + return submission.text + + +async def get_interject_action() -> str: + """Compatibility shim for tests; interactive_mode no longer uses this.""" + submission = await prompt_for_submission( + prompt_str=lambda: get_prompt_with_active_model(is_interject=True), + erase_when_done=True, + ) + if submission.action == "interject": + return "i" + if submission.action == "queue": + return "q" + return "" if __name__ == "__main__": diff --git a/code_puppy/config.py b/code_puppy/config.py index 61d2a1c4a..c06a17a33 100644 --- a/code_puppy/config.py +++ b/code_puppy/config.py @@ -284,6 +284,7 @@ def get_config_keys(): "protected_token_count", "compaction_threshold", "message_limit", + "queue_limit", "allow_recursion", "openai_reasoning_effort", "openai_reasoning_summary", @@ -1236,6 +1237,20 @@ def get_message_limit(default: int = 1000) -> int: return default +def get_queue_limit(default: int = 25) -> int: + """ + Returns the user-configured interactive prompt queue limit. + Defaults to 25 if unset or misconfigured. + Configurable by 'queue_limit' key. + """ + val = get_value("queue_limit") + try: + parsed = int(val) if val else default + except (ValueError, TypeError): + return default + return max(1, parsed) + + def save_command_to_history(command: str): """Save a command to the history file with an ISO format timestamp. diff --git a/code_puppy/messaging/legacy_bridge.py b/code_puppy/messaging/legacy_bridge.py new file mode 100644 index 000000000..4b4fdce0c --- /dev/null +++ b/code_puppy/messaging/legacy_bridge.py @@ -0,0 +1,50 @@ +"""Bridge legacy MessageQueue output into the structured MessageBus.""" + +from __future__ import annotations + +import logging + +from .bus import MessageBus +from .message_queue import MessageQueue, MessageType, UIMessage +from .messages import LegacyQueueMessage + +logger = logging.getLogger(__name__) + + +class LegacyQueueToBusBridge: + """Forward legacy queue messages into the structured bus.""" + + def __init__(self, queue: MessageQueue, bus: MessageBus) -> None: + self._queue = queue + self._bus = bus + self._started = False + + def start(self) -> None: + if self._started: + return + self._started = True + + for message in self._queue.get_buffered_messages(): + self._forward_message(message) + self._queue.clear_startup_buffer() + self._queue.add_listener(self._forward_message) + + def stop(self) -> None: + if not self._started: + return + self._started = False + self._queue.remove_listener(self._forward_message) + + def _forward_message(self, message: UIMessage) -> None: + if message.type == MessageType.HUMAN_INPUT_REQUEST: + logger.debug("Skipping legacy human-input queue message in bridge") + return + + self._bus.emit( + LegacyQueueMessage( + legacy_type=message.type.value, + content=message.content, + legacy_metadata=dict(message.metadata or {}), + legacy_timestamp=message.timestamp, + ) + ) diff --git a/code_puppy/messaging/messages.py b/code_puppy/messaging/messages.py index 9efa4f394..e53c61a9e 100644 --- a/code_puppy/messaging/messages.py +++ b/code_puppy/messaging/messages.py @@ -7,7 +7,7 @@ from datetime import datetime, timezone from enum import Enum -from typing import Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from uuid import uuid4 from pydantic import BaseModel, Field @@ -77,6 +77,28 @@ class TextMessage(BaseMessage): text: str = Field(description="Plain text content - NO Rich markup allowed") +class LegacyQueueMessage(BaseMessage): + """Wrapped legacy MessageQueue output for prompt-safe bus rendering.""" + + category: MessageCategory = MessageCategory.SYSTEM + legacy_type: str = Field(description="Original legacy MessageType value") + content: Any = Field(description="Original legacy content object") + legacy_metadata: Dict[str, Any] = Field( + default_factory=dict, + description="Original legacy UIMessage metadata", + ) + legacy_timestamp: Optional[datetime] = Field( + default=None, + description="Original legacy UIMessage timestamp", + ) + + model_config = { + "frozen": False, + "extra": "forbid", + "arbitrary_types_allowed": True, + } + + # ============================================================================= # File Operation Messages # ============================================================================= @@ -456,6 +478,13 @@ class SkillEntry(BaseModel): model_config = {"frozen": True, "extra": "forbid"} +class AgentListMessage(BaseMessage): + """Summary banner for list_agents tool output.""" + + category: MessageCategory = MessageCategory.TOOL_OUTPUT + agent_count: int = Field(ge=0, description="Total number of available agents") + + class SkillListMessage(BaseMessage): """Result of listing or searching skills.""" @@ -491,6 +520,7 @@ class SkillActivateMessage(BaseMessage): # All concrete message types (excludes BaseMessage itself) AnyMessage = Union[ TextMessage, + LegacyQueueMessage, FileListingMessage, FileContentMessage, GrepResultMessage, @@ -511,6 +541,7 @@ class SkillActivateMessage(BaseMessage): DividerMessage, StatusPanelMessage, VersionCheckMessage, + AgentListMessage, SkillListMessage, SkillActivateMessage, ] @@ -529,6 +560,7 @@ class SkillActivateMessage(BaseMessage): "BaseMessage", # Text "TextMessage", + "LegacyQueueMessage", # File operations "FileEntry", "FileListingMessage", @@ -560,6 +592,7 @@ class SkillActivateMessage(BaseMessage): # Status "StatusPanelMessage", "VersionCheckMessage", + "AgentListMessage", # Skills "SkillEntry", "SkillListMessage", diff --git a/code_puppy/messaging/renderers.py b/code_puppy/messaging/renderers.py index b20570079..2caed282d 100644 --- a/code_puppy/messaging/renderers.py +++ b/code_puppy/messaging/renderers.py @@ -17,6 +17,81 @@ from .message_queue import MessageQueue, MessageType, UIMessage +def render_legacy_ui_message( + console: Console, message: UIMessage, *, allow_human_input: bool = True +) -> None: + """Render a legacy UIMessage using the old interactive semantics.""" + if message.type == MessageType.HUMAN_INPUT_REQUEST: + if not allow_human_input: + safe_content = escape_rich_markup(str(message.content)) + console.print(f"[bold cyan]INPUT REQUESTED:[/bold cyan] {safe_content}") + if hasattr(console.file, "flush"): + console.file.flush() + return + + prompt_id = message.metadata.get("prompt_id") if message.metadata else None + if not prompt_id: + console.print("[bold red]Error: Invalid human input request[/bold red]") + return + + safe_content = escape_rich_markup(str(message.content)) + console.print(f"[bold cyan]{safe_content}[/bold cyan]") + if hasattr(console.file, "flush"): + console.file.flush() + + try: + response = input(">>> ") + from .message_queue import provide_prompt_response + + provide_prompt_response(prompt_id, response) + except (EOFError, KeyboardInterrupt): + provide_prompt_response(prompt_id, "") + except Exception as e: + console.print(f"[bold red]Error getting input: {e}[/bold red]") + provide_prompt_response(prompt_id, "") + return + + if message.type == MessageType.ERROR: + style = "bold red" + elif message.type == MessageType.WARNING: + style = "yellow" + elif message.type == MessageType.SUCCESS: + style = "green" + elif message.type == MessageType.TOOL_OUTPUT: + style = "blue" + elif message.type == MessageType.AGENT_REASONING: + style = None + elif message.type == MessageType.PLANNED_NEXT_STEPS: + style = None + elif message.type == MessageType.AGENT_RESPONSE: + style = None + elif message.type == MessageType.SYSTEM: + style = "dim" + else: + style = None + + if isinstance(message.content, str) and ( + "Current version:" in message.content or "Latest version:" in message.content + ): + style = "dim" + + if isinstance(message.content, str): + if message.type == MessageType.AGENT_RESPONSE: + try: + console.print(Markdown(message.content)) + except Exception: + console.print(escape_rich_markup(message.content)) + elif style: + console.print(escape_rich_markup(message.content), style=style) + else: + console.print(escape_rich_markup(message.content)) + else: + console.print(message.content) + + if hasattr(console.file, "flush"): + console.file.flush() + + class MessageRenderer(ABC): """Base class for message renderers.""" @@ -84,66 +159,7 @@ def __init__(self, queue: MessageQueue, console: Optional[Console] = None): async def render_message(self, message: UIMessage): """Render a message using Rich console.""" - # Handle human input requests - if message.type == MessageType.HUMAN_INPUT_REQUEST: - await self._handle_human_input_request(message) - return - - # Convert message type to appropriate Rich styling - if message.type == MessageType.ERROR: - style = "bold red" - elif message.type == MessageType.WARNING: - style = "yellow" - elif message.type == MessageType.SUCCESS: - style = "green" - elif message.type == MessageType.TOOL_OUTPUT: - style = "blue" - elif message.type == MessageType.AGENT_REASONING: - style = None - elif message.type == MessageType.PLANNED_NEXT_STEPS: - style = None - elif message.type == MessageType.AGENT_RESPONSE: - # Special handling for agent responses - they'll be rendered as markdown - style = None - elif message.type == MessageType.SYSTEM: - style = "dim" - else: - style = None - - # Make version messages dim regardless of message type - if isinstance(message.content, str): - if ( - "Current version:" in message.content - or "Latest version:" in message.content - ): - style = "dim" - - # Render the content - if isinstance(message.content, str): - if message.type == MessageType.AGENT_RESPONSE: - # Render agent responses as markdown - try: - markdown = Markdown(message.content) - self.console.print(markdown) - except Exception: - # Fallback to plain text if markdown parsing fails - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content) - elif style: - # Escape Rich markup to prevent crashes from malformed tags - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content, style=style) - else: - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content) - else: - # For complex Rich objects (Tables, Markdown, Text, etc.) - self.console.print(message.content) - - # Ensure output is immediately flushed to the terminal - # This fixes the issue where messages don't appear until user input - if hasattr(self.console.file, "flush"): - self.console.file.flush() + render_legacy_ui_message(self.console, message, allow_human_input=False) async def _handle_human_input_request(self, message: UIMessage): """Handle a human input request in async mode.""" @@ -218,94 +234,4 @@ def _consume_messages(self): def _render_message(self, message: UIMessage): """Render a message using Rich console.""" - # Handle human input requests - if message.type == MessageType.HUMAN_INPUT_REQUEST: - self._handle_human_input_request(message) - return - - # Convert message type to appropriate Rich styling - if message.type == MessageType.ERROR: - style = "bold red" - elif message.type == MessageType.WARNING: - style = "yellow" - elif message.type == MessageType.SUCCESS: - style = "green" - elif message.type == MessageType.TOOL_OUTPUT: - style = "blue" - elif message.type == MessageType.AGENT_REASONING: - style = None - elif message.type == MessageType.AGENT_RESPONSE: - # Special handling for agent responses - they'll be rendered as markdown - style = None - elif message.type == MessageType.SYSTEM: - style = "dim" - else: - style = None - - # Make version messages dim regardless of message type - if isinstance(message.content, str): - if ( - "Current version:" in message.content - or "Latest version:" in message.content - ): - style = "dim" - - # Render the content - if isinstance(message.content, str): - if message.type == MessageType.AGENT_RESPONSE: - # Render agent responses as markdown - try: - markdown = Markdown(message.content) - self.console.print(markdown) - except Exception: - # Fallback to plain text if markdown parsing fails - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content) - elif style: - # Escape Rich markup to prevent crashes from malformed tags - # in shell output or other user-provided content - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content, style=style) - else: - safe_content = escape_rich_markup(message.content) - self.console.print(safe_content) - else: - # For complex Rich objects (Tables, Markdown, Text, etc.) - self.console.print(message.content) - - # Ensure output is immediately flushed to the terminal - # This fixes the issue where messages don't appear until user input - if hasattr(self.console.file, "flush"): - self.console.file.flush() - - def _handle_human_input_request(self, message: UIMessage): - """Handle a human input request in interactive mode.""" - prompt_id = message.metadata.get("prompt_id") if message.metadata else None - if not prompt_id: - self.console.print( - "[bold red]Error: Invalid human input request[/bold red]" - ) - return - - # Display the prompt - escape to prevent markup injection - safe_content = escape_rich_markup(str(message.content)) - self.console.print(f"[bold cyan]{safe_content}[/bold cyan]") - if hasattr(self.console.file, "flush"): - self.console.file.flush() - - # Get user input - try: - # Use basic input for now - could be enhanced with prompt_toolkit later - response = input(">>> ") - - # Provide the response back to the queue - from .message_queue import provide_prompt_response - - provide_prompt_response(prompt_id, response) - - except (EOFError, KeyboardInterrupt): - # Handle Ctrl+C or Ctrl+D - provide_prompt_response(prompt_id, "") - except Exception as e: - self.console.print(f"[bold red]Error getting input: {e}[/bold red]") - provide_prompt_response(prompt_id, "") + render_legacy_ui_message(self.console, message, allow_human_input=True) diff --git a/code_puppy/messaging/rich_renderer.py b/code_puppy/messaging/rich_renderer.py index 2b43e562c..474e0065e 100644 --- a/code_puppy/messaging/rich_renderer.py +++ b/code_puppy/messaging/rich_renderer.py @@ -7,6 +7,8 @@ only structured data with no formatting hints. """ +import sys + from typing import Dict, Optional, Protocol, runtime_checkable from rich.console import Console @@ -19,6 +21,7 @@ from rich.table import Table from code_puppy.config import get_subagent_verbose +from code_puppy.terminal_utils import supports_live_terminal_updates from code_puppy.tools.common import format_diff_with_colors from code_puppy.tools.subagent_context import is_subagent @@ -29,6 +32,7 @@ UserInputResponse, ) from .messages import ( + AgentListMessage, AgentReasoningMessage, AgentResponseMessage, AnyMessage, @@ -38,6 +42,7 @@ FileContentMessage, FileListingMessage, GrepResultMessage, + LegacyQueueMessage, MessageLevel, SelectionRequest, ShellLineMessage, @@ -54,6 +59,8 @@ UserInputRequest, VersionCheckMessage, ) +from .message_queue import MessageType, UIMessage +from .renderers import render_legacy_ui_message # Note: Text and Tree were removed - no longer used in this implementation @@ -172,6 +179,124 @@ def _should_suppress_subagent_output(self) -> bool: """ return is_subagent() and not get_subagent_verbose() + def _get_prompt_runtime(self): + try: + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + + return get_active_interactive_runtime() + except Exception: + return None + + def _is_background_session_message(self, message: AnyMessage | None) -> bool: + return bool(getattr(message, "session_id", None)) + + def _set_prompt_ephemeral_status( + self, text: str | None, message: AnyMessage | None = None + ) -> None: + if self._is_background_session_message(message): + return + runtime = self._get_prompt_runtime() + if runtime is None: + return + try: + runtime.set_prompt_ephemeral_status(text) + except Exception: + pass + + def _clear_prompt_ephemeral_status( + self, message: AnyMessage | None = None + ) -> None: + self._set_prompt_ephemeral_status(None, message=message) + + def _clear_prompt_ephemeral_preview( + self, message: AnyMessage | None = None + ) -> None: + if self._is_background_session_message(message): + return + runtime = self._get_prompt_runtime() + if runtime is None: + return + try: + runtime.clear_prompt_ephemeral_preview() + except Exception: + pass + + def _should_render_agent_response(self) -> bool: + """Render final agent responses when the interactive prompt is mounted.""" + runtime = self._get_prompt_runtime() + return runtime.has_prompt_surface() if runtime is not None else False + + def _build_prompt_safe_console(self) -> Console: + """Create a console that writes to the real terminal, not patched stdout.""" + return Console( + file=sys.__stdout__, + force_terminal=self._console.is_terminal, + width=self._console.width, + color_system=self._console.color_system, + soft_wrap=self._console.soft_wrap, + legacy_windows=self._console.legacy_windows, + ) + + def _should_render_above_prompt(self, message: AnyMessage) -> bool: + """Render styled structured output above the live prompt surface.""" + runtime = self._get_prompt_runtime() + if runtime is None or not runtime.has_prompt_surface(): + return False + + return not isinstance( + message, + ( + ConfirmationRequest, + SelectionRequest, + ShellLineMessage, + SpinnerControl, + UserInputRequest, + ), + ) + + def _render_message_with_console( + self, message: AnyMessage, console: Console + ) -> None: + """Temporarily swap consoles so direct renderers can reuse their logic.""" + original_console = self._console + self._console = console + try: + self._do_render_direct(message) + finally: + self._console = original_console + + def _render_message_above_prompt(self, message: AnyMessage) -> bool: + """Render a structured message above the live prompt.""" + runtime = self._get_prompt_runtime() + if runtime is None or not runtime.has_prompt_surface(): + return False + + if isinstance(message, AgentResponseMessage): + self._clear_prompt_ephemeral_preview(message=message) + + console = self._build_prompt_safe_console() + return runtime.run_above_prompt( + lambda: self._render_message_with_console(message, console) + ) + + def _render_agent_response_to_console( + self, console: Console, msg: AgentResponseMessage + ) -> None: + """Render the final agent response using the supplied console.""" + banner = self._format_banner("agent_response", "AGENT RESPONSE") + console.print(f"\n{banner}\n") + + if msg.is_markdown: + console.print(Markdown(msg.content)) + else: + console.print(msg.content) + + def _render_agent_response_above_prompt(self, msg: AgentResponseMessage) -> bool: + """Render above the mounted prompt so Rich markup is not escaped.""" + return self._render_message_above_prompt(msg) + # ========================================================================= # Lifecycle (Synchronous - for compatibility with main.py) # ========================================================================= @@ -258,12 +383,8 @@ async def stop_async(self) -> None: # Main Dispatch # ========================================================================= - def _do_render(self, message: AnyMessage) -> None: - """Synchronously render a message by dispatching to the appropriate handler. - - Note: User input requests are skipped in sync mode as they require async. - """ - # Dispatch based on message type + def _do_render_direct(self, message: AnyMessage) -> None: + """Synchronously render a message without prompt-surface handoff.""" if isinstance(message, TextMessage): self._render_text(message) elif isinstance(message, FileListingMessage): @@ -283,8 +404,7 @@ def _do_render(self, message: AnyMessage) -> None: elif isinstance(message, AgentReasoningMessage): self._render_agent_reasoning(message) elif isinstance(message, AgentResponseMessage): - # Skip rendering - we now stream agent responses via event_stream_handler - pass + self._render_agent_response(message) elif isinstance(message, SubAgentInvocationMessage): self._render_subagent_invocation(message) elif isinstance(message, SubAgentResponseMessage): @@ -309,6 +429,10 @@ def _do_render(self, message: AnyMessage) -> None: self._render_status_panel(message) elif isinstance(message, VersionCheckMessage): self._render_version_check(message) + elif isinstance(message, AgentListMessage): + self._render_agent_list(message) + elif isinstance(message, LegacyQueueMessage): + self._render_legacy_queue_message(message) elif isinstance(message, SkillListMessage): self._render_skill_list(message) elif isinstance(message, SkillActivateMessage): @@ -317,6 +441,17 @@ def _do_render(self, message: AnyMessage) -> None: # Unknown message type - render as debug self._console.print(f"[dim]Unknown message: {type(message).__name__}[/dim]") + def _do_render(self, message: AnyMessage) -> None: + """Synchronously render a message by dispatching to the appropriate handler. + + Note: User input requests are skipped in sync mode as they require async. + """ + if self._should_render_above_prompt(message): + if self._render_message_above_prompt(message): + return + + self._do_render_direct(message) + async def render(self, message: AnyMessage) -> None: """Render a message asynchronously (supports user input requests).""" # Handle async-only message types @@ -352,6 +487,27 @@ def _render_text(self, msg: TextMessage) -> None: safe_text = escape_rich_markup(msg.text) self._console.print(f"{prefix}{safe_text}", style=style) + def _render_legacy_queue_message(self, msg: LegacyQueueMessage) -> None: + """Render wrapped legacy queue output with old semantics.""" + try: + legacy_type = MessageType(msg.legacy_type) + except ValueError: + legacy_type = MessageType.DEBUG + + legacy_message = UIMessage( + type=legacy_type, + content=msg.content, + metadata=dict(msg.legacy_metadata or {}), + ) + if msg.legacy_timestamp is not None: + legacy_message.timestamp = msg.legacy_timestamp + + render_legacy_ui_message( + self._console, + legacy_message, + allow_human_input=False, + ) + def _get_level_prefix(self, level: MessageLevel) -> str: """Get a prefix icon for the message level.""" prefixes = { @@ -693,12 +849,30 @@ def _render_shell_line(self, msg: ShellLineMessage) -> None: from rich.text import Text + runtime = self._get_prompt_runtime() + if runtime is not None and runtime.has_prompt_surface(): + if "\r" in msg.line: + normalized = Text.from_ansi(msg.line.split("\r")[-1]).plain + normalized = normalized.replace("\n", " ") + normalized = "".join( + char for char in normalized if char == "\t" or char.isprintable() + ).strip() + self._set_prompt_ephemeral_status(normalized or None, message=msg) + return + sys.stdout.write(msg.line + "\n") + sys.stdout.flush() + return + # Check if line contains carriage return (progress bar style output) if "\r" in msg.line: - # Bypass Rich entirely - write directly to stdout so terminal interprets \r - # Apply dim styling manually via ANSI codes - sys.stdout.write(f"\033[2m{msg.line}\033[0m") - sys.stdout.flush() + if supports_live_terminal_updates(self._console): + # Bypass Rich entirely - write directly to stdout so terminal interprets \r + # Apply dim styling manually via ANSI codes + sys.stdout.write(f"\033[2m{msg.line}\033[0m") + sys.stdout.flush() + else: + normalized = Text.from_ansi(msg.line.split("\r")[-1]) + self._console.print(normalized, style="dim") else: # Normal line: use Rich for nice formatting text = Text.from_ansi(msg.line) @@ -710,6 +884,7 @@ def _render_shell_output(self, msg: ShellOutputMessage) -> None: Shell command results are already returned to the LLM via tool responses, so we don't need to clutter the UI with redundant output. """ + self._clear_prompt_ephemeral_status(message=msg) # Just print trailing newline for spinner separation self._console.print() @@ -740,16 +915,7 @@ def _render_agent_reasoning(self, msg: AgentReasoningMessage) -> None: def _render_agent_response(self, msg: AgentResponseMessage) -> None: """Render agent response with header and markdown formatting.""" - # Header - banner = self._format_banner("agent_response", "AGENT RESPONSE") - self._console.print(f"\n{banner}\n") - - # Content (markdown or plain) - if msg.is_markdown: - md = Markdown(msg.content) - self._console.print(md) - else: - self._console.print(msg.content) + self._render_agent_response_to_console(self._console, msg) def _render_subagent_invocation(self, msg: SubAgentInvocationMessage) -> None: """Render sub-agent invocation header with nice formatting.""" @@ -1068,6 +1234,18 @@ def _get_file_icon(self, file_path: str) -> str: } return icons.get(ext, "šŸ“„") + # ========================================================================= + # Agent Lists + # ========================================================================= + + def _render_agent_list(self, msg: AgentListMessage) -> None: + """Render the list_agents summary banner.""" + if self._should_suppress_subagent_output(): + return + + banner = self._format_banner("list_agents", "LIST AGENTS") + self._console.print(f"\n{banner} [dim]Found {msg.agent_count} agent(s).[/dim]") + # ========================================================================= # Skills # ========================================================================= diff --git a/code_puppy/messaging/spinner/__init__.py b/code_puppy/messaging/spinner/__init__.py index c6880a762..2fcd1a201 100644 --- a/code_puppy/messaging/spinner/__init__.py +++ b/code_puppy/messaging/spinner/__init__.py @@ -64,11 +64,31 @@ def resume_all_spinners(): def update_spinner_context(info: str) -> None: """Update the shared context information displayed beside active spinners.""" SpinnerBase.set_context_info(info) + try: + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + + runtime = get_active_interactive_runtime() + if runtime is not None: + runtime.invalidate_prompt() + except Exception: + pass def clear_spinner_context() -> None: """Clear any context information displayed beside active spinners.""" SpinnerBase.clear_context_info() + try: + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + + runtime = get_active_interactive_runtime() + if runtime is not None: + runtime.invalidate_prompt() + except Exception: + pass __all__ = [ diff --git a/code_puppy/messaging/spinner/console_spinner.py b/code_puppy/messaging/spinner/console_spinner.py index 114c41800..a5930d46a 100644 --- a/code_puppy/messaging/spinner/console_spinner.py +++ b/code_puppy/messaging/spinner/console_spinner.py @@ -10,6 +10,13 @@ from rich.live import Live from rich.text import Text +from code_puppy.terminal_utils import ( + clear_live_terminal_line, + flush_windows_keyboard_buffer, + reset_windows_terminal_ansi, + supports_live_terminal_updates, +) + from .spinner_base import SpinnerBase @@ -29,6 +36,7 @@ def __init__(self, console=None): self._stop_event = threading.Event() self._paused = False self._live = None + self._last_render_at = 0.0 # Register this spinner for global management from . import register_spinner @@ -40,18 +48,21 @@ def start(self): super().start() self._stop_event.clear() + if not supports_live_terminal_updates(self.console): + return + # Don't start a new thread if one is already running if self._thread and self._thread.is_alive(): return - # Print blank line before spinner for visual separation from content + # Print blank line before spinner for visual separation. self.console.print() # Create a Live display for the spinner self._live = Live( self._generate_spinner_panel(), console=self.console, - refresh_per_second=20, + refresh_per_second=10, transient=True, # Clear the spinner line when stopped (no puppy litter!) auto_refresh=False, # Don't auto-refresh to avoid wiping out user input ) @@ -79,32 +90,10 @@ def stop(self): self._thread = None - # Windows-specific cleanup: Rich's Live display can leave terminal in corrupted state if platform.system() == "Windows": - import sys - - try: - # Reset ANSI formatting for both stdout and stderr - sys.stdout.write("\x1b[0m") # Reset all attributes - sys.stdout.flush() - sys.stderr.write("\x1b[0m") - sys.stderr.flush() - - # Clear the line and reposition cursor - sys.stdout.write("\r") # Return to start of line - sys.stdout.write("\x1b[K") # Clear to end of line - sys.stdout.flush() - - # Flush keyboard input buffer to clear any stuck keys - try: - import msvcrt - - while msvcrt.kbhit(): - msvcrt.getch() - except ImportError: - pass # msvcrt not available (not Windows or different Python impl) - except Exception: - pass # Fail silently if cleanup doesn't work + reset_windows_terminal_ansi() + clear_live_terminal_line() + flush_windows_keyboard_buffer() # Unregister this spinner from global management from . import unregister_spinner @@ -151,9 +140,13 @@ def _update_spinner(self): # Update the live display only if not paused and not awaiting input if self._live and not self._paused and not awaiting_input: - # Manually refresh instead of auto-refresh to avoid wiping input - self._live.update(self._generate_spinner_panel()) - self._live.refresh() + # Throttle refresh to reduce redraw churn/flicker. + now = time.time() + if now - self._last_render_at >= 0.09: + # Manually refresh instead of auto-refresh to avoid wiping input + self._live.update(self._generate_spinner_panel()) + self._live.refresh() + self._last_render_at = now # Short sleep to control animation speed time.sleep(0.05) @@ -173,12 +166,7 @@ def pause(self): try: self._live.stop() self._live = None - # Clear the line to remove any artifacts - import sys - - sys.stdout.write("\r") # Return to start of line - sys.stdout.write("\x1b[K") # Clear to end of line - sys.stdout.flush() + clear_live_terminal_line(console=self.console) except Exception: pass @@ -192,23 +180,20 @@ def resume(self): if self._is_spinning and self._paused: self._paused = False + if not supports_live_terminal_updates(self.console): + return # Restart the live display if it was stopped during pause if not self._live: try: - # Clear any leftover artifacts before starting - import sys - - sys.stdout.write("\r") # Return to start of line - sys.stdout.write("\x1b[K") # Clear to end of line - sys.stdout.flush() + clear_live_terminal_line(console=self.console) - # Print blank line before spinner for visual separation + # Print blank line before spinner for visual separation. self.console.print() self._live = Live( self._generate_spinner_panel(), console=self.console, - refresh_per_second=20, + refresh_per_second=10, transient=True, # Clear spinner line when stopped auto_refresh=False, ) @@ -220,10 +205,7 @@ def resume(self): try: # Force Rich to reset any cached console state if hasattr(self.console, "_buffer"): - # Clear Rich's internal buffer to prevent artifacts - self.console.file.write("\r") # Return to start - self.console.file.write("\x1b[K") # Clear line - self.console.file.flush() + clear_live_terminal_line(stream=self.console.file, console=self.console) self._live.update(self._generate_spinner_panel()) self._live.refresh() diff --git a/code_puppy/plugins/antigravity_oauth/register_callbacks.py b/code_puppy/plugins/antigravity_oauth/register_callbacks.py index c170038f3..8e7e742f1 100644 --- a/code_puppy/plugins/antigravity_oauth/register_callbacks.py +++ b/code_puppy/plugins/antigravity_oauth/register_callbacks.py @@ -14,10 +14,12 @@ from urllib.parse import parse_qs, urlparse from code_puppy.callbacks import register_callback +from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning from code_puppy.model_switching import set_model_and_reload_agent from ..oauth_puppy_html import oauth_failure_html, oauth_success_html +from ..oauth_control import wait_for_event_or_cancel from .accounts import AccountManager from .config import ( ANTIGRAVITY_OAUTH_CONFIG, @@ -125,7 +127,9 @@ def run_server(server=server) -> None: return None -def _await_callback(context: Any) -> Optional[Tuple[str, str, str]]: +def _await_callback( + context: Any, cancel_event: threading.Event | None = None +) -> Optional[Tuple[str, str, str]]: """Wait for OAuth callback and return (code, state, redirect_uri).""" timeout = ANTIGRAVITY_OAUTH_CONFIG["callback_timeout"] @@ -157,7 +161,17 @@ def _await_callback(context: Any) -> Optional[Tuple[str, str, str]]: emit_info(f"ā³ Waiting for callback on {redirect_uri}") - if not event.wait(timeout=timeout): + wait_result = wait_for_event_or_cancel( + event, + timeout=timeout, + cancel_event=cancel_event, + ) + if wait_result == "cancelled": + emit_info("Antigravity OAuth authentication cancelled.") + server.shutdown() + return None + + if wait_result == "timeout": emit_error("OAuth callback timed out. Please try again.") server.shutdown() return None @@ -174,6 +188,7 @@ def _await_callback(context: Any) -> Optional[Tuple[str, str, str]]: def _perform_authentication( add_account: bool = False, reload_agent: bool = True, + cancel_event: threading.Event | None = None, ) -> bool: """Run the OAuth authentication flow. @@ -182,10 +197,13 @@ def _perform_authentication( reload_agent: Whether to reload the current agent after auth. """ context = prepare_oauth_context() - callback_result = _await_callback(context) + callback_result = _await_callback(context, cancel_event=cancel_event) if not callback_result: return False + if cancel_event is not None and cancel_event.is_set(): + emit_info("Antigravity OAuth authentication cancelled.") + return False code, state, redirect_uri = callback_result @@ -195,6 +213,9 @@ def _perform_authentication( if not isinstance(result, TokenExchangeSuccess): emit_error(f"Token exchange failed: {result.error}") return False + if cancel_event is not None and cancel_event.is_set(): + emit_info("Antigravity OAuth authentication cancelled.") + return False # Save tokens tokens = { @@ -231,6 +252,9 @@ def _perform_authentication( # Add models emit_info("šŸ“¦ Configuring available models…") + if cancel_event is not None and cancel_event.is_set(): + emit_info("Antigravity OAuth authentication cancelled.") + return False if add_models_to_config(result.access_token, result.project_id): model_count = len(ANTIGRAVITY_MODELS) emit_success(f"āœ… {model_count} Antigravity models configured!") @@ -239,12 +263,26 @@ def _perform_authentication( ) else: emit_warning("Failed to configure models. Try running /antigravity-auth again.") + return False if reload_agent: reload_current_agent() return True +def start_antigravity_oauth_setup( + cancel_event: threading.Event, *, add_account: bool = False +) -> bool: + success = _perform_authentication( + add_account=add_account, + reload_agent=False, + cancel_event=cancel_event, + ) + if success and not cancel_event.is_set() and not add_account: + set_model_and_reload_agent("antigravity-gemini-3-pro-high") + return success + + def _custom_help() -> List[Tuple[str, str]]: """Return help entries for Antigravity commands.""" return [ @@ -379,7 +417,7 @@ def _handle_logout() -> None: emit_success("šŸ‘‹ Antigravity logout complete") -def _handle_custom_command(command: str, name: str) -> Optional[bool]: +def _handle_custom_command(command: str, name: str) -> object | None: """Handle Antigravity custom commands.""" if not name: return None @@ -391,17 +429,18 @@ def _handle_custom_command(command: str, name: str) -> Optional[bool]: emit_warning( "Existing tokens found. This will refresh your authentication." ) - - if _perform_authentication(reload_agent=False): - set_model_and_reload_agent("antigravity-gemini-3-pro-high") - return True + return BackgroundInteractiveCommand(run=start_antigravity_oauth_setup) if name == "antigravity-add": emit_info("āž• Adding another Google account…") manager = AccountManager.load_from_disk() emit_info(f"Current accounts: {manager.account_count}") - _perform_authentication(add_account=True) - return True + return BackgroundInteractiveCommand( + run=lambda cancel_event: start_antigravity_oauth_setup( + cancel_event, + add_account=True, + ) + ) if name == "antigravity-status": _handle_status() diff --git a/code_puppy/plugins/chatgpt_oauth/oauth_flow.py b/code_puppy/plugins/chatgpt_oauth/oauth_flow.py index 216b40966..27d95deef 100644 --- a/code_puppy/plugins/chatgpt_oauth/oauth_flow.py +++ b/code_puppy/plugins/chatgpt_oauth/oauth_flow.py @@ -15,6 +15,7 @@ from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning from ..oauth_puppy_html import oauth_failure_html, oauth_success_html +from ..oauth_control import wait_for_predicate_or_cancel from .config import CHATGPT_OAUTH_CONFIG from .utils import ( add_models_to_extra_config, @@ -248,7 +249,7 @@ def _later() -> None: threading.Thread(target=_later, daemon=True).start() -def run_oauth_flow() -> None: +def run_oauth_flow(cancel_event: threading.Event | None = None) -> bool: existing_tokens = load_stored_tokens() if existing_tokens and existing_tokens.get("access_token"): emit_warning("Existing ChatGPT tokens will be overwritten.") @@ -258,7 +259,7 @@ def run_oauth_flow() -> None: except OSError as exc: emit_error(f"Could not start OAuth server on port {REQUIRED_PORT}: {exc}") emit_info(f"Use `lsof -ti:{REQUIRED_PORT} | xargs kill` to free the port.") - return + return False auth_url = server.auth_url() emit_info(f"Open this URL in your browser: {auth_url}") @@ -284,26 +285,31 @@ def run_oauth_flow() -> None: emit_info("Waiting for authentication callback…") - elapsed = 0.0 - timeout = CHATGPT_OAUTH_CONFIG["callback_timeout"] - interval = 0.25 - while elapsed < timeout: - time.sleep(interval) - elapsed += interval - if server.exit_code == 0: - break + wait_result = wait_for_predicate_or_cancel( + lambda: server.exit_code == 0, + timeout=CHATGPT_OAUTH_CONFIG["callback_timeout"], + cancel_event=cancel_event, + poll_interval=0.25, + ) server.shutdown() server_thread.join(timeout=5) - if server.exit_code != 0: + if wait_result == "cancelled": + emit_info("ChatGPT OAuth authentication cancelled.") + return False + + if wait_result == "timeout" or server.exit_code != 0: emit_error("Authentication failed or timed out.") - return + return False + if cancel_event is not None and cancel_event.is_set(): + emit_info("ChatGPT OAuth authentication cancelled.") + return False tokens = load_stored_tokens() if not tokens: emit_error("Tokens saved during OAuth flow could not be loaded.") - return + return False api_key = tokens.get("api_key") if api_key: @@ -323,7 +329,11 @@ def run_oauth_flow() -> None: account_id = tokens.get("account_id", "") models = fetch_chatgpt_models(api_key, account_id) if models: + if cancel_event is not None and cancel_event.is_set(): + emit_info("ChatGPT OAuth authentication cancelled.") + return False if add_models_to_extra_config(models): emit_success( "ChatGPT models registered. Use the `chatgpt-` prefix in /model." ) + return True diff --git a/code_puppy/plugins/chatgpt_oauth/register_callbacks.py b/code_puppy/plugins/chatgpt_oauth/register_callbacks.py index 53d32a508..d90bed3ad 100644 --- a/code_puppy/plugins/chatgpt_oauth/register_callbacks.py +++ b/code_puppy/plugins/chatgpt_oauth/register_callbacks.py @@ -7,9 +7,11 @@ from __future__ import annotations import os -from typing import Any, Dict, List, Optional, Tuple +import threading +from typing import Any, Dict, List, Tuple from code_puppy.callbacks import register_callback +from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand from code_puppy.messaging import emit_info, emit_success, emit_warning from code_puppy.model_switching import set_model_and_reload_agent @@ -79,14 +81,19 @@ def _handle_chatgpt_logout() -> None: emit_success("ChatGPT logout complete") -def _handle_custom_command(command: str, name: str) -> Optional[bool]: +def start_chatgpt_oauth_setup(cancel_event: threading.Event) -> bool: + success = run_oauth_flow(cancel_event=cancel_event) + if success and not cancel_event.is_set(): + set_model_and_reload_agent("chatgpt-gpt-5.3-codex") + return success + + +def _handle_custom_command(command: str, name: str) -> object | None: if not name: return None if name == "chatgpt-auth": - run_oauth_flow() - set_model_and_reload_agent("chatgpt-gpt-5.4") - return True + return BackgroundInteractiveCommand(run=start_chatgpt_oauth_setup) if name == "chatgpt-status": _handle_chatgpt_status() diff --git a/code_puppy/plugins/claude_code_oauth/register_callbacks.py b/code_puppy/plugins/claude_code_oauth/register_callbacks.py index 10deaadb0..f2233d627 100644 --- a/code_puppy/plugins/claude_code_oauth/register_callbacks.py +++ b/code_puppy/plugins/claude_code_oauth/register_callbacks.py @@ -15,6 +15,7 @@ from urllib.parse import parse_qs, urlparse from code_puppy.callbacks import register_callback +from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning from code_puppy.model_switching import set_model_and_reload_agent from code_puppy.provider_identity import ( @@ -23,6 +24,7 @@ ) from ..oauth_puppy_html import oauth_failure_html, oauth_success_html +from ..oauth_control import wait_for_event_or_cancel from .config import CLAUDE_CODE_OAUTH_CONFIG, get_token_storage_path from .utils import ( OAuthContext, @@ -116,7 +118,9 @@ def run_server(server=server) -> None: return None -def _await_callback(context: OAuthContext) -> Optional[str]: +def _await_callback( + context: OAuthContext, cancel_event: threading.Event | None = None +) -> Optional[str]: timeout = CLAUDE_CODE_OAUTH_CONFIG["callback_timeout"] started = _start_callback_server(context) @@ -157,7 +161,17 @@ def _await_callback(context: OAuthContext) -> Optional[str]: "and paste it back into Code Puppy." ) - if not event.wait(timeout=timeout): + wait_result = wait_for_event_or_cancel( + event, + timeout=timeout, + cancel_event=cancel_event, + ) + if wait_result == "cancelled": + emit_info("Claude Code OAuth authentication cancelled.") + server.shutdown() + return None + + if wait_result == "timeout": emit_error("OAuth callback timed out. Please try again.") server.shutdown() return None @@ -189,30 +203,36 @@ def _custom_help() -> List[Tuple[str, str]]: ] -def _perform_authentication() -> None: +def _perform_authentication(cancel_event: threading.Event | None = None) -> bool: context = prepare_oauth_context() - code = _await_callback(context) + code = _await_callback(context, cancel_event=cancel_event) if not code: - return + return False + if cancel_event is not None and cancel_event.is_set(): + emit_info("Claude Code OAuth authentication cancelled.") + return False emit_info("Exchanging authorization code for tokens…") tokens = exchange_code_for_tokens(code, context) if not tokens: emit_error("Token exchange failed. Please retry the authentication flow.") - return + return False + if cancel_event is not None and cancel_event.is_set(): + emit_info("Claude Code OAuth authentication cancelled.") + return False if not save_tokens(tokens): emit_error( "Tokens retrieved but failed to save locally. Check file permissions." ) - return + return False emit_success("Claude Code OAuth authentication successful!") access_token = tokens.get("access_token") if not access_token: emit_warning("No access token returned; skipping model discovery.") - return + return True emit_info("Fetching available Claude Code models…") models = fetch_claude_code_models(access_token) @@ -220,16 +240,27 @@ def _perform_authentication() -> None: emit_warning( "Claude Code authentication succeeded but no models were returned." ) - return + return True + if cancel_event is not None and cancel_event.is_set(): + emit_info("Claude Code OAuth authentication cancelled.") + return False emit_info(f"Discovered {len(models)} models: {', '.join(models)}") if add_models_to_extra_config(models): emit_success( "Claude Code models added to your configuration. Use the `claude-code-` prefix!" ) + return True + + +def start_claude_code_oauth_setup(cancel_event: threading.Event) -> bool: + success = _perform_authentication(cancel_event=cancel_event) + if success and not cancel_event.is_set(): + set_model_and_reload_agent("claude-code-claude-opus-4-6") + return success -def _handle_custom_command(command: str, name: str) -> Optional[bool]: +def _handle_custom_command(command: str, name: str) -> object | None: if not name: return None @@ -240,9 +271,7 @@ def _handle_custom_command(command: str, name: str) -> Optional[bool]: emit_warning( "Existing Claude Code tokens found. Continuing will overwrite them." ) - _perform_authentication() - set_model_and_reload_agent("claude-code-claude-opus-4-6") - return True + return BackgroundInteractiveCommand(run=start_claude_code_oauth_setup) if name == "claude-code-status": tokens = load_stored_tokens() diff --git a/code_puppy/plugins/oauth_control.py b/code_puppy/plugins/oauth_control.py new file mode 100644 index 000000000..9f52a07ea --- /dev/null +++ b/code_puppy/plugins/oauth_control.py @@ -0,0 +1,49 @@ +"""Shared cooperative-cancel helpers for OAuth callback waits.""" + +from __future__ import annotations + +import threading +import time +from typing import Callable, Literal + + +WaitResult = Literal["completed", "timeout", "cancelled"] + + +def wait_for_event_or_cancel( + event: threading.Event, + *, + timeout: float, + cancel_event: threading.Event | None, + poll_interval: float = 0.1, +) -> WaitResult: + """Wait for an event while also honoring cooperative cancellation.""" + deadline = time.monotonic() + timeout + while True: + if cancel_event is not None and cancel_event.is_set(): + return "cancelled" + if event.wait( + timeout=min(poll_interval, max(0.0, deadline - time.monotonic())) + ): + return "completed" + if time.monotonic() >= deadline: + return "timeout" + + +def wait_for_predicate_or_cancel( + predicate: Callable[[], bool], + *, + timeout: float, + cancel_event: threading.Event | None, + poll_interval: float = 0.25, +) -> WaitResult: + """Poll a predicate while also honoring cooperative cancellation.""" + deadline = time.monotonic() + timeout + while True: + if cancel_event is not None and cancel_event.is_set(): + return "cancelled" + if predicate(): + return "completed" + if time.monotonic() >= deadline: + return "timeout" + time.sleep(min(poll_interval, max(0.0, deadline - time.monotonic()))) diff --git a/code_puppy/terminal_utils.py b/code_puppy/terminal_utils.py index 6efb37e02..14d250347 100644 --- a/code_puppy/terminal_utils.py +++ b/code_puppy/terminal_utils.py @@ -7,6 +7,7 @@ import platform import subprocess import sys +from dataclasses import dataclass from typing import TYPE_CHECKING, Callable, Optional if TYPE_CHECKING: @@ -15,6 +16,144 @@ # Store the original console ctrl handler so we can restore it if needed _original_ctrl_handler: Optional[Callable] = None +_TRUECOLOR_TERM_MARKERS = ( + "xterm-direct", + "xterm-ghostty", + "xterm-truecolor", + "iterm2", + "vte-256color", +) + + +@dataclass(frozen=True) +class TerminalProfile: + terminal_family: str + supports_truecolor: bool + live_updates_safe: bool + + +def _get_terminal_stream(console: Optional["Console"] = None): + if console is not None and getattr(console, "file", None) is not None: + return console.file + return getattr(sys, "__stdout__", None) or sys.stdout + + +def _stream_is_tty(stream) -> bool: + try: + isatty = getattr(stream, "isatty", None) + return bool(isatty and isatty()) + except Exception: + return False + + +def _detect_terminal_family() -> str: + term = os.environ.get("TERM", "").lower() + term_program = os.environ.get("TERM_PROGRAM", "").lower() + + if os.environ.get("WT_SESSION"): + return "windows_terminal" + if term_program == "apple_terminal": + return "terminal_app" + if ( + term_program == "ghostty" + or "xterm-ghostty" in term + or os.environ.get("GHOSTTY_RESOURCES_DIR") + ): + return "ghostty" + if ( + os.environ.get("ITERM_SESSION_ID") + or term_program == "iterm.app" + or "iterm2" in term + ): + return "iterm2" + if os.environ.get("KITTY_WINDOW_ID"): + return "kitty" + if os.environ.get("ALACRITTY_SOCKET"): + return "alacritty" + return "unknown" + + +def _detect_rich_truecolor(console: Optional["Console"] = None) -> bool: + stream = _get_terminal_stream(console) + if not _stream_is_tty(stream): + return False + + try: + if console is None: + from rich.console import Console + + console = Console(file=stream) + except Exception: + return False + + return (console.color_system or "").lower() == "truecolor" + + +def get_terminal_profile(console: Optional["Console"] = None) -> TerminalProfile: + """Return the best-effort terminal profile for the active session.""" + terminal_family = _detect_terminal_family() + stream = _get_terminal_stream(console) + is_interactive = _stream_is_tty(stream) + + colorterm = os.environ.get("COLORTERM", "").lower() + term = os.environ.get("TERM", "").lower() + + supports_truecolor = False + if colorterm in ("truecolor", "24bit"): + supports_truecolor = True + elif any(marker in term for marker in _TRUECOLOR_TERM_MARKERS): + supports_truecolor = True + elif terminal_family in {"ghostty", "iterm2"}: + supports_truecolor = True + elif any( + os.environ.get(var) + for var in ( + "ITERM_SESSION_ID", + "KITTY_WINDOW_ID", + "ALACRITTY_SOCKET", + "WT_SESSION", + ) + ): + supports_truecolor = True + else: + supports_truecolor = _detect_rich_truecolor(console) + + live_updates_safe = is_interactive and os.environ.get("CI", "").lower() not in { + "1", + "true", + "yes", + } + if platform.system() == "Windows": + live_updates_safe = live_updates_safe and terminal_family == "windows_terminal" + + return TerminalProfile( + terminal_family=terminal_family, + supports_truecolor=supports_truecolor, + live_updates_safe=live_updates_safe, + ) + + +def supports_live_terminal_updates(console: Optional["Console"] = None) -> bool: + """Return whether live CR/ANSI redraws are safe for the active terminal.""" + return get_terminal_profile(console).live_updates_safe + + +def clear_live_terminal_line( + stream=None, console: Optional["Console"] = None +) -> bool: + """Clear the current live terminal line when CR-based redraw is supported.""" + target = stream or _get_terminal_stream(console) + if not supports_live_terminal_updates(console) or not _stream_is_tty(target): + return False + + try: + target.write("\r") + target.write("\x1b[K") + target.flush() + return True + except Exception: + return False + def reset_windows_terminal_ansi() -> None: """Reset ANSI formatting on Windows stdout/stderr. @@ -298,51 +437,19 @@ def ensure_ctrl_c_disabled() -> bool: def detect_truecolor_support() -> bool: """Detect if the terminal supports truecolor (24-bit color). - Checks multiple indicators: - 1. COLORTERM environment variable (most reliable) - 2. TERM environment variable patterns - 3. Rich's Console color_system detection as fallback - Returns: True if truecolor is supported, False otherwise. """ - # Check COLORTERM - this is the most reliable indicator - colorterm = os.environ.get("COLORTERM", "").lower() - if colorterm in ("truecolor", "24bit"): - return True - - # Check TERM for known truecolor-capable terminals - term = os.environ.get("TERM", "").lower() - truecolor_terms = ( - "xterm-direct", - "xterm-truecolor", - "iterm2", - "vte-256color", # Many modern terminals set this - ) - if any(t in term for t in truecolor_terms): - return True - - # Some terminals like iTerm2, Kitty, Alacritty set specific env vars - if os.environ.get("ITERM_SESSION_ID"): - return True - if os.environ.get("KITTY_WINDOW_ID"): - return True - if os.environ.get("ALACRITTY_SOCKET"): - return True - if os.environ.get("WT_SESSION"): # Windows Terminal - return True + return get_terminal_profile().supports_truecolor - # Use Rich's detection as a fallback - try: - from rich.console import Console - - console = Console(force_terminal=True) - color_system = console.color_system - return color_system == "truecolor" - except Exception: - pass - return False +def _terminal_display_name(terminal_family: str) -> str: + return { + "ghostty": "Ghostty", + "iterm2": "iTerm2", + "terminal_app": "Terminal.app", + "windows_terminal": "Windows Terminal", + }.get(terminal_family, terminal_family) def print_truecolor_warning(console: Optional["Console"] = None) -> None: @@ -353,66 +460,85 @@ def print_truecolor_warning(console: Optional["Console"] = None) -> None: """ if detect_truecolor_support(): return # All good, no warning needed + profile = get_terminal_profile(console) if console is None: try: from rich.console import Console - console = Console() + console = Console(file=_get_terminal_stream()) except ImportError: # Rich not available, fall back to plain print print("\n" + "=" * 70) - print("āš ļø WARNING: TERMINAL DOES NOT SUPPORT TRUECOLOR (24-BIT COLOR)") - print("=" * 70) - print("Code Puppy looks best with truecolor support.") - print("Consider using a modern terminal like:") - print(" • iTerm2 (macOS)") - print(" • Windows Terminal (Windows)") - print(" • Kitty, Alacritty, or any modern terminal emulator") - print("") - print("You can also try setting: export COLORTERM=truecolor") - print("") - print("Note: The built-in macOS Terminal.app does not support truecolor") - print("(Sequoia and earlier). You'll need a different terminal app.") + if profile.terminal_family == "terminal_app": + print("NOTICE: Terminal.app works, but colors will be reduced.") + print("=" * 70) + print("Consider iTerm2 or Ghostty for full color fidelity on macOS.") + else: + print("āš ļø WARNING: TERMINAL DOES NOT SUPPORT TRUECOLOR (24-BIT COLOR)") + print("=" * 70) + print("Code Puppy looks best with truecolor support.") + print("Consider using a modern terminal like:") + print(" • iTerm2 (macOS)") + print(" • Ghostty (macOS)") + print(" • Windows Terminal (Windows)") + print(" • Kitty, Alacritty, or Warp") + print("") + print("You can also try setting: export COLORTERM=truecolor") print("=" * 70 + "\n") return # Get detected color system for diagnostic info color_system = console.color_system or "unknown" - # Build the warning box - warning_lines = [ - "", - "[bold bright_red on red]" + "━" * 72 + "[/]", - "[bold bright_red on red]ā”ƒ[/][bold bright_white on red]" - + " " * 70 - + "[/][bold bright_red on red]ā”ƒ[/]", - "[bold bright_red on red]ā”ƒ[/][bold bright_white on red] āš ļø WARNING: TERMINAL DOES NOT SUPPORT TRUECOLOR (24-BIT COLOR) āš ļø [/][bold bright_red on red]ā”ƒ[/]", - "[bold bright_red on red]ā”ƒ[/][bold bright_white on red]" - + " " * 70 - + "[/][bold bright_red on red]ā”ƒ[/]", - "[bold bright_red on red]" + "━" * 72 + "[/]", - "", - f"[yellow]Detected color system:[/] [bold]{color_system}[/]", - "", - "[bold white]Code Puppy uses rich colors and will look degraded without truecolor.[/]", - "", - "[cyan]Consider using a modern terminal emulator:[/]", - " [green]•[/] [bold]iTerm2[/] (macOS) - https://iterm2.com", - " [green]•[/] [bold]Windows Terminal[/] (Windows) - Built into Windows 11", - " [green]•[/] [bold]Kitty[/] - https://sw.kovidgoyal.net/kitty", - " [green]•[/] [bold]Alacritty[/] - https://alacritty.org", - " [green]•[/] [bold]Warp[/] (macOS) - https://warp.dev", - "", - "[cyan]Or try setting the COLORTERM environment variable:[/]", - " [dim]export COLORTERM=truecolor[/]", - "", - "[dim italic]Note: The built-in macOS Terminal.app does not support truecolor (Sequoia and earlier).[/]", - "[dim italic]Setting COLORTERM=truecolor won't help - you'll need a different terminal app.[/]", - "", - "[bold bright_red]" + "─" * 72 + "[/]", - "", - ] + if profile.terminal_family == "terminal_app": + warning_lines = [ + "", + "[bold yellow]" + "━" * 72 + "[/]", + "[bold yellow]NOTICE: TERMINAL.APP COLORS WILL BE REDUCED[/]", + "", + f"[yellow]Detected terminal:[/] [bold]{_terminal_display_name(profile.terminal_family)}[/]", + f"[yellow]Detected color system:[/] [bold]{color_system}[/]", + "", + "[bold white]Code Puppy should work normally here, but Terminal.app does not advertise truecolor.[/]", + "", + "[cyan]For full color fidelity on macOS, consider iTerm2 or Ghostty.[/]", + "", + "[bold yellow]" + "─" * 72 + "[/]", + "", + ] + else: + warning_lines = [ + "", + "[bold bright_red on red]" + "━" * 72 + "[/]", + "[bold bright_red on red]ā”ƒ[/][bold bright_white on red]" + + " " * 70 + + "[/][bold bright_red on red]ā”ƒ[/]", + "[bold bright_red on red]ā”ƒ[/][bold bright_white on red] āš ļø WARNING: TERMINAL DOES NOT SUPPORT TRUECOLOR (24-BIT COLOR) āš ļø [/][bold bright_red on red]ā”ƒ[/]", + "[bold bright_red on red]ā”ƒ[/][bold bright_white on red]" + + " " * 70 + + "[/][bold bright_red on red]ā”ƒ[/]", + "[bold bright_red on red]" + "━" * 72 + "[/]", + "", + f"[yellow]Detected terminal:[/] [bold]{_terminal_display_name(profile.terminal_family)}[/]", + f"[yellow]Detected color system:[/] [bold]{color_system}[/]", + "", + "[bold white]Code Puppy uses rich colors and will look degraded without truecolor.[/]", + "", + "[cyan]Consider using a modern terminal emulator:[/]", + " [green]•[/] [bold]iTerm2[/] (macOS) - https://iterm2.com", + " [green]•[/] [bold]Ghostty[/] (macOS) - https://ghostty.org", + " [green]•[/] [bold]Windows Terminal[/] (Windows) - Built into Windows 11", + " [green]•[/] [bold]Kitty[/] - https://sw.kovidgoyal.net/kitty", + " [green]•[/] [bold]Alacritty[/] - https://alacritty.org", + " [green]•[/] [bold]Warp[/] (macOS) - https://warp.dev", + "", + "[cyan]Or try setting the COLORTERM environment variable:[/]", + " [dim]export COLORTERM=truecolor[/]", + "", + "[bold bright_red]" + "─" * 72 + "[/]", + "", + ] for line in warning_lines: console.print(line) diff --git a/code_puppy/tools/agent_tools.py b/code_puppy/tools/agent_tools.py index 56879cc8f..305147cc5 100644 --- a/code_puppy/tools/agent_tools.py +++ b/code_puppy/tools/agent_tools.py @@ -25,21 +25,27 @@ get_value, ) from code_puppy.messaging import ( + MessageLevel, SubAgentInvocationMessage, SubAgentResponseMessage, + TextMessage, emit_error, - emit_info, + emit_info as _emit_info, emit_success, get_message_bus, get_session_context, set_session_context, ) +from code_puppy.messaging.messages import AgentListMessage from code_puppy.tools.common import generate_group_id from code_puppy.tools.subagent_context import subagent_context # Set to track active subagent invocation tasks _active_subagent_tasks: Set[asyncio.Task] = set() +# Preserve the historical module-level symbol for tests and call sites that patch it. +emit_info = _emit_info + # Atomic counter for DBOS workflow IDs - ensures uniqueness even in rapid back-to-back calls # itertools.count() is thread-safe for next() calls _dbos_workflow_counter = itertools.count() @@ -240,15 +246,6 @@ def register_list_agents(agent): @agent.tool def list_agents(context: RunContext) -> ListAgentsOutput: """List all available sub-agents that can be invoked.""" - # Generate a group ID for this tool execution - group_id = generate_group_id("list_agents") - - from rich.text import Text - - from code_puppy.config import get_banner_color - - list_agents_color = get_banner_color("list_agents") - try: from code_puppy.agents import get_agent_descriptions, get_available_agents @@ -266,21 +263,15 @@ def list_agents(context: RunContext) -> ListAgentsOutput: for name, display_name in agents_dict.items() ] - # Quiet output - banner and count on same line - agent_count = len(agents) - emit_info( - Text.from_markup( - f"[bold white on {list_agents_color}] LIST AGENTS [/bold white on {list_agents_color}] " - f"[dim]Found {agent_count} agent(s).[/dim]" - ), - message_group=group_id, - ) + get_message_bus().emit(AgentListMessage(agent_count=len(agents))) return ListAgentsOutput(agents=agents) except Exception as e: error_msg = f"Error listing agents: {str(e)}" - emit_error(error_msg, message_group=group_id) + get_message_bus().emit( + TextMessage(level=MessageLevel.ERROR, text=error_msg) + ) return ListAgentsOutput(agents=[], error=error_msg) return list_agents @@ -299,6 +290,13 @@ async def invoke_agent( ) -> AgentInvokeOutput: """Invoke a specific sub-agent with a given prompt. + Args: + agent_name: The name of the agent to invoke + prompt: The prompt to send to the agent + session_id: Optional session ID for maintaining conversation memory across invocations. + Must be kebab-case. Hash suffix auto-appended for new sessions. + To continue a session, use the full session_id from the previous response. + Returns: AgentInvokeOutput: Contains response, agent_name, session_id, and error fields. """ diff --git a/code_puppy/tools/command_runner.py b/code_puppy/tools/command_runner.py index aa773acea..23b0838aa 100644 --- a/code_puppy/tools/command_runner.py +++ b/code_puppy/tools/command_runner.py @@ -285,6 +285,16 @@ def set_awaiting_user_input(awaiting=True): pass # Spinner functionality not available +def _normalize_shell_cwd(cwd: str | None) -> str | None: + """Normalize empty shell cwd values to None.""" + if cwd is None: + return None + normalized = cwd.strip() + if not normalized: + return None + return cwd + + class ShellCommandOutput(BaseModel): success: bool command: str | None @@ -338,6 +348,12 @@ def _listen_for_ctrl_x_windows( # Note: msvcrt.getwch() returns unicode string on Windows key = msvcrt.getwch() + if key in {"\x00", "\xe0"}: + # Discard the follow-up code for special keys. + if msvcrt.kbhit(): + msvcrt.getwch() + continue + # Check for Ctrl+X (\x18) or other interrupt keys # Some terminals might not send \x18, so also check for 'x' with modifier if key == "\x18": # Standard Ctrl+X @@ -347,6 +363,7 @@ def _listen_for_ctrl_x_windows( emit_warning( "Ctrl+X handler raised unexpectedly; Ctrl+C still works." ) + continue # Note: In some Windows terminals, Ctrl+X might not be captured # Users can use Ctrl+C as alternative, which is handled by signal handler except (OSError, ValueError): @@ -399,8 +416,12 @@ def _listen_for_ctrl_x_posix( emit_warning( "Ctrl+X handler raised unexpectedly; Ctrl+C still works." ) + continue finally: - termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs) + try: + termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs) + except Exception: + pass def _spawn_ctrl_x_key_listener( @@ -934,10 +955,12 @@ def nuclear_kill(proc): async def run_shell_command( context: RunContext, command: str, - cwd: str = None, + cwd: str | None = None, timeout: int = 60, background: bool = False, ) -> ShellCommandOutput: + cwd = _normalize_shell_cwd(cwd) + # Generate unique group_id for this command execution group_id = generate_group_id("shell_command", command) @@ -1163,6 +1186,8 @@ async def _execute_shell_command( Returns: ShellCommandOutput with execution results """ + cwd = _normalize_shell_cwd(cwd) + # Always emit the ShellStartMessage banner (even for sub-agents) bus = get_message_bus() bus.emit( @@ -1178,14 +1203,44 @@ async def _execute_shell_command( pause_all_spinners() - # Acquire shared keyboard context - Ctrl-X/Ctrl-C will kill ALL running commands - # This is reference-counted: listener starts on first command, stops on last - _acquire_keyboard_context() + interactive_runtime = None + release_keyboard_context = False + + try: + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + except ImportError: + + def get_active_interactive_runtime(): # type: ignore[no-redef] + return None + + interactive_runtime = get_active_interactive_runtime() + if interactive_runtime is None: + # Acquire shared keyboard context - Ctrl-X/Ctrl-C will kill ALL running commands + # This is reference-counted: listener starts on first command, stops on last + _acquire_keyboard_context() + release_keyboard_context = True + try: + if interactive_runtime is not None: + try: + interactive_runtime.notify_shell_started() + except Exception: + interactive_runtime = None + _acquire_keyboard_context() + release_keyboard_context = True return await _run_command_inner(command, cwd, timeout, group_id, silent=silent) finally: - _release_keyboard_context() - resume_all_spinners() + try: + if interactive_runtime is not None: + interactive_runtime.notify_shell_finished() + except Exception: + pass + finally: + if release_keyboard_context: + _release_keyboard_context() + resume_all_spinners() def _run_command_sync( @@ -1319,7 +1374,7 @@ def register_agent_run_shell_command(agent): async def agent_run_shell_command( context: RunContext, command: str = "", - cwd: str = None, + cwd: str | None = None, timeout: int = 60, background: bool = False, ) -> ShellCommandOutput: diff --git a/code_puppy/tools/file_modifications.py b/code_puppy/tools/file_modifications.py index f3fa8643d..e0ca06eb5 100644 --- a/code_puppy/tools/file_modifications.py +++ b/code_puppy/tools/file_modifications.py @@ -829,10 +829,28 @@ def replace_in_file( Replacements are applied sequentially. Prefer this over full file rewrites. """ group_id = generate_group_id("replace_in_file", file_path) - # replacements arrive as plain dicts — pass them straight through - replacements_dict = [ - {"old_str": r["old_str"], "new_str": r["new_str"]} for r in replacements - ] + invalid_payload_result = { + "success": False, + "path": os.path.abspath(file_path) if file_path else file_path, + "message": ( + "Invalid replacements payload: each replacement must include " + "string 'old_str' and 'new_str' fields." + ), + "changed": False, + } + try: + replacements_dict = [ + {"old_str": r["old_str"], "new_str": r["new_str"]} + for r in replacements + ] + except (KeyError, TypeError): + return invalid_payload_result + if any( + not isinstance(r["old_str"], str) or not isinstance(r["new_str"], str) + for r in replacements_dict + ): + return invalid_payload_result + result = _replace_in_file_helper( context, file_path, replacements_dict, message_group=group_id ) @@ -844,7 +862,7 @@ def replace_in_file( file_path=file_path, replacements=[ Replacement(old_str=r["old_str"], new_str=r["new_str"]) - for r in replacements + for r in replacements_dict ], ) enhanced_results = on_edit_file(context, result, payload) diff --git a/docs/INTERACTIVE_REGRESSION_CHECKLIST.md b/docs/INTERACTIVE_REGRESSION_CHECKLIST.md new file mode 100644 index 000000000..3c26fcfae --- /dev/null +++ b/docs/INTERACTIVE_REGRESSION_CHECKLIST.md @@ -0,0 +1,11 @@ +# Interactive Regression Checklist + +- Prompt-surface runs show `AGENT REASONING` above the textbox when `agent_share_your_reasoning` is used. +- Prompt-surface runs do not show `Calling agent_share_your_reasoning... N token(s)` above the textbox or in the prompt-local ephemeral status strip. +- Prompt-surface runs show ordinary mutable tool progress in the prompt-local ephemeral status strip without moving the prompt box or adding transcript spam; structured tool outputs like `DIRECTORY LISTING` still render normally. +- Prompt-surface tool progress must not leak raw ANSI or flash the prompt surface on each delta. +- Prompt-surface runs may show live response text only in the prompt-local ephemeral preview, and the final `AGENT RESPONSE` still renders once. +- Prompt-surface shell carriage-return progress updates in place in the prompt-local ephemeral status strip and clears on completion without transcript spam. +- Parallel sub-agents must not overwrite or clear the foreground prompt-local ephemeral status/preview. +- Windows text clipboard fallback must work through `pwsh` first and then Windows PowerShell without changing paste semantics. +- Unknown, CI, or non-interactive terminals must degrade away from risky live CR/ANSI redraw behavior instead of leaking raw control sequences. diff --git a/tests/agents/test_event_stream_handler.py b/tests/agents/test_event_stream_handler.py index 2b9fa4222..b60c316f7 100644 --- a/tests/agents/test_event_stream_handler.py +++ b/tests/agents/test_event_stream_handler.py @@ -10,7 +10,7 @@ """ from io import StringIO -from unittest.mock import MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, patch import pytest from pydantic_ai import PartDeltaEvent, PartEndEvent, PartStartEvent, RunContext @@ -522,6 +522,299 @@ async def event_stream(): # Should have printed something with token(s) assert any("token(s)" in str(call) for call in call_args_list) + @pytest.mark.asyncio + async def test_tool_call_prompt_surface_mode_hides_tool_progress( + self, mock_ctx + ): + """Prompt-surface mode should route tool progress into transient prompt state.""" + tool_part = ToolCallPart(tool_call_id="tool_1", tool_name="test_tool", args={}) + start_event = PartStartEvent(index=0, part=tool_part) + delta_event = PartDeltaEvent( + index=0, + delta=ToolCallPartDelta(tool_name_delta="test_tool", args_delta="{}"), + ) + end_event = PartEndEvent(index=0, part=tool_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield delta_event + yield end_event + + console = MagicMock(spec=Console) + set_streaming_console(console) + safe_console = MagicMock(spec=Console) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.set_prompt_ephemeral_status = MagicMock() + runtime.clear_prompt_ephemeral_status = MagicMock() + + async def _run_above_prompt(func): + func() + return True + + runtime.run_above_prompt_async = AsyncMock(side_effect=_run_above_prompt) + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.agents.event_stream_handler._build_prompt_safe_console", + return_value=safe_console, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + await event_stream_handler(mock_ctx, event_stream()) + + console.print.assert_not_called() + safe_console.print.assert_not_called() + runtime.run_above_prompt_async.assert_not_awaited() + runtime.set_prompt_ephemeral_status.assert_called_once_with( + "šŸ”§ Calling test_tool... 1 token(s)" + ) + runtime.clear_prompt_ephemeral_status.assert_called_once() + + @pytest.mark.asyncio + async def test_reasoning_tool_prompt_surface_mode_hides_tool_progress( + self, mock_ctx + ): + """Prompt-surface mode should suppress reasoning-tool status spam.""" + tool_part = ToolCallPart( + tool_call_id="tool_1", tool_name="agent_share_your_reasoning", args={} + ) + start_event = PartStartEvent(index=0, part=tool_part) + delta_event = PartDeltaEvent( + index=0, + delta=ToolCallPartDelta( + tool_name_delta="agent_share_your_reasoning", + args_delta='{"reasoning":"thinking"}', + ), + ) + end_event = PartEndEvent(index=0, part=tool_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield delta_event + yield end_event + + console = MagicMock(spec=Console) + set_streaming_console(console) + safe_console = MagicMock(spec=Console) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.set_prompt_ephemeral_status = MagicMock() + runtime.clear_prompt_ephemeral_status = MagicMock() + runtime.run_above_prompt_async = AsyncMock(return_value=True) + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.agents.event_stream_handler._build_prompt_safe_console", + return_value=safe_console, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + await event_stream_handler(mock_ctx, event_stream()) + + console.print.assert_not_called() + safe_console.print.assert_not_called() + runtime.run_above_prompt_async.assert_not_awaited() + runtime.set_prompt_ephemeral_status.assert_not_called() + runtime.clear_prompt_ephemeral_status.assert_called_once() + + def test_merge_tool_name_ignores_repeated_prefix_delta(self): + from code_puppy.agents.event_stream_handler import _merge_tool_name + + assert ( + _merge_tool_name("agent_share_your_reasoning", "agent_share") + == "agent_share_your_reasoning" + ) + + @pytest.mark.asyncio + async def test_reasoning_tool_partial_prefix_stays_suppressed_on_prompt_surface( + self, mock_ctx + ): + tool_part = ToolCallPart(tool_call_id="tool_1", tool_name="", args={}) + start_event = PartStartEvent(index=0, part=tool_part) + delta_event = PartDeltaEvent( + index=0, + delta=ToolCallPartDelta(tool_name_delta="agent_share", args_delta="{}"), + ) + end_event = PartEndEvent(index=0, part=tool_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield delta_event + yield end_event + + console = MagicMock(spec=Console) + set_streaming_console(console) + safe_console = MagicMock(spec=Console) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.set_prompt_ephemeral_status = MagicMock() + runtime.clear_prompt_ephemeral_status = MagicMock() + runtime.run_above_prompt_async = AsyncMock(return_value=True) + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.agents.event_stream_handler._build_prompt_safe_console", + return_value=safe_console, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + await event_stream_handler(mock_ctx, event_stream()) + + runtime.set_prompt_ephemeral_status.assert_not_called() + runtime.clear_prompt_ephemeral_status.assert_called_once() + + @pytest.mark.asyncio + async def test_thinking_prompt_surface_mode_renders_above_prompt(self, mock_ctx): + """Prompt-surface mode should show thinking output above the prompt.""" + thinking_part = ThinkingPart(content="") + start_event = PartStartEvent(index=0, part=thinking_part) + delta_event = PartDeltaEvent( + index=0, delta=ThinkingPartDelta(content_delta="Think...") + ) + end_event = PartEndEvent(index=0, part=thinking_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield delta_event + yield end_event + + console = MagicMock(spec=Console) + set_streaming_console(console) + safe_console = MagicMock(spec=Console) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + + async def _run_above_prompt(func): + func() + return True + + runtime.run_above_prompt_async = AsyncMock(side_effect=_run_above_prompt) + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.agents.event_stream_handler._build_prompt_safe_console", + return_value=safe_console, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + with patch( + "code_puppy.agents.event_stream_handler.get_banner_color", + return_value="blue", + ): + await event_stream_handler(mock_ctx, event_stream()) + + call_args_list = [str(call) for call in safe_console.print.call_args_list] + assert any("THINKING" in call for call in call_args_list) + assert any("Think..." in call for call in call_args_list) + console.print.assert_not_called() + assert safe_console.print.called + assert runtime.run_above_prompt_async.await_count >= 2 + + @pytest.mark.asyncio + async def test_text_banner_prompt_surface_mode_skips_clear_line(self, mock_ctx): + """Prompt-surface mode should not emit response banners from the stream handler.""" + text_part = TextPart(content="hello") + start_event = PartStartEvent(index=0, part=text_part) + end_event = PartEndEvent(index=0, part=text_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield end_event + + console = MagicMock(spec=Console, width=80) + console.file = StringIO() + set_streaming_console(console) + + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt_async = AsyncMock(return_value=True) + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + with patch( + "code_puppy.agents.event_stream_handler.get_banner_color", + return_value="blue", + ): + with patch("termflow.Parser") as mock_parser_cls: + mock_parser = MagicMock() + mock_parser.parse_line.return_value = [] + mock_parser.finalize.return_value = [] + mock_parser_cls.return_value = mock_parser + + with patch("termflow.Renderer"): + await event_stream_handler(mock_ctx, event_stream()) + + console.print.assert_not_called() + runtime.run_above_prompt_async.assert_not_awaited() + + @pytest.mark.asyncio + async def test_prompt_surface_mode_streams_plain_text_response(self, mock_ctx): + """Prompt-surface mode should route live text into transient preview state.""" + text_part = TextPart(content="") + start_event = PartStartEvent(index=0, part=text_part) + delta_event = PartDeltaEvent( + index=0, delta=TextPartDelta(content_delta="hello") + ) + end_event = PartEndEvent(index=0, part=text_part, next_part_kind=None) + + async def event_stream(): + yield start_event + yield delta_event + yield end_event + + console = MagicMock(spec=Console, width=80) + console.file = StringIO() + set_streaming_console(console) + + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt_async = AsyncMock(return_value=True) + runtime.set_prompt_ephemeral_preview = MagicMock() + + with patch( + "code_puppy.agents.event_stream_handler._get_active_prompt_runtime", + return_value=runtime, + ): + with patch("code_puppy.agents.event_stream_handler.pause_all_spinners"): + with patch( + "code_puppy.agents.event_stream_handler.resume_all_spinners" + ): + with patch("termflow.Parser") as mock_parser_cls: + with patch("termflow.Renderer"): + await event_stream_handler(mock_ctx, event_stream()) + + console.print.assert_not_called() + mock_parser_cls.assert_not_called() + runtime.run_above_prompt_async.assert_not_awaited() + runtime.set_prompt_ephemeral_preview.assert_called_once_with("hello") + @pytest.mark.asyncio async def test_thinking_part_without_initial_content_defers_banner(self, mock_ctx): """Test that thinking banner is deferred if no initial content.""" diff --git a/tests/command_line/test_add_model_menu_coverage.py b/tests/command_line/test_add_model_menu_coverage.py index f62e12b1a..2682cacd5 100644 --- a/tests/command_line/test_add_model_menu_coverage.py +++ b/tests/command_line/test_add_model_menu_coverage.py @@ -840,6 +840,7 @@ def test_run_pending_credentials_success( menu = _make_menu_with_providers([p]) mock_app = MagicMock() mock_app_cls.return_value = mock_app + mock_input.return_value = "test-credential" def run_side_effect(**kwargs): menu.result = "pending_credentials" diff --git a/tests/command_line/test_config_commands_full_coverage.py b/tests/command_line/test_config_commands_full_coverage.py index 9e0b15603..912f70ad7 100644 --- a/tests/command_line/test_config_commands_full_coverage.py +++ b/tests/command_line/test_config_commands_full_coverage.py @@ -42,6 +42,7 @@ def _show_patches(self, effective_temp=0.7, global_temp=0.7, yolo=True, dbos=Fal ), patch("code_puppy.config.get_default_agent", return_value="code-puppy"), patch("code_puppy.config.get_use_dbos", return_value=dbos), + patch("code_puppy.config.get_queue_limit", return_value=25), patch("code_puppy.config.get_resume_message_count", return_value=50), patch( "code_puppy.config.get_openai_reasoning_effort", return_value="medium" @@ -76,6 +77,7 @@ def test_show_command(self): patches[15], patches[16], patches[17], + patches[18], ): assert handle_show_command("/show") is True @@ -102,6 +104,7 @@ def test_show_effective_temp_none(self): patches[15], patches[16], patches[17], + patches[18], ): assert handle_show_command("/show") is True @@ -259,6 +262,26 @@ def test_cancel_agent_key_invalid(self): assert handle_set_command("/set cancel_agent_key bad_key") is True err.assert_called_once() + def test_queue_limit_valid(self): + from code_puppy.command_line.config_commands import handle_set_command + + mock_agent = MagicMock() + with ( + patch("code_puppy.config.set_config_value") as set_value, + patch("code_puppy.messaging.emit_success"), + patch("code_puppy.messaging.emit_info"), + patch("code_puppy.agents.get_current_agent", return_value=mock_agent), + ): + assert handle_set_command("/set queue_limit 7") is True + set_value.assert_called_once_with("queue_limit", "7") + + def test_queue_limit_invalid(self): + from code_puppy.command_line.config_commands import handle_set_command + + with patch("code_puppy.messaging.emit_error") as err: + assert handle_set_command("/set queue_limit 0") is True + err.assert_called_once() + def test_agent_reload_failure(self): from code_puppy.command_line.config_commands import handle_set_command diff --git a/tests/command_line/test_core_commands_full_coverage.py b/tests/command_line/test_core_commands_full_coverage.py index 41e115a69..3decc4830 100644 --- a/tests/command_line/test_core_commands_full_coverage.py +++ b/tests/command_line/test_core_commands_full_coverage.py @@ -168,6 +168,12 @@ def test_failure(self): class TestHandleTutorialCommand: def test_chatgpt(self): from code_puppy.command_line.core_commands import handle_tutorial_command + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + start_chatgpt_oauth_setup, + ) with ( patch("code_puppy.command_line.onboarding_wizard.reset_onboarding"), @@ -181,10 +187,19 @@ def test_chatgpt(self): pool.return_value.__enter__ = MagicMock(return_value=pool.return_value) pool.return_value.__exit__ = MagicMock(return_value=False) pool.return_value.submit.return_value = mock_future - assert handle_tutorial_command("/tutorial") is True + result = handle_tutorial_command("/tutorial") + + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_chatgpt_oauth_setup def test_claude(self): from code_puppy.command_line.core_commands import handle_tutorial_command + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + from code_puppy.plugins.claude_code_oauth.register_callbacks import ( + start_claude_code_oauth_setup, + ) with ( patch("code_puppy.command_line.onboarding_wizard.reset_onboarding"), @@ -200,7 +215,10 @@ def test_claude(self): pool.return_value.__enter__ = MagicMock(return_value=pool.return_value) pool.return_value.__exit__ = MagicMock(return_value=False) pool.return_value.submit.return_value = mock_future - assert handle_tutorial_command("/tutorial") is True + result = handle_tutorial_command("/tutorial") + + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_claude_code_oauth_setup def test_completed(self): from code_puppy.command_line.core_commands import handle_tutorial_command diff --git a/tests/command_line/test_prompt_toolkit_coverage.py b/tests/command_line/test_prompt_toolkit_coverage.py index 3b87313a8..72cfbb739 100644 --- a/tests/command_line/test_prompt_toolkit_coverage.py +++ b/tests/command_line/test_prompt_toolkit_coverage.py @@ -865,10 +865,47 @@ def test_ctrl_v_no_image_windows(self, captured_bindings): return_value=False, ), patch("platform.system", return_value="Windows"), - patch("subprocess.run", return_value=mock_result), + patch("subprocess.run", return_value=mock_result) as mock_run, ): handler(event) event.app.current_buffer.insert_text.assert_called() + assert mock_run.call_args.args[0] == [ + "pwsh", + "-NoProfile", + "-Command", + "Get-Clipboard -Raw", + ] + + def test_ctrl_v_no_image_windows_falls_back_to_powershell( + self, captured_bindings + ): + handler = self._find_handler(captured_bindings, "c-v") + event = MagicMock() + + def run_side_effect(cmd, **kwargs): + if cmd[0] == "pwsh": + raise FileNotFoundError() + result = MagicMock() + result.returncode = 0 + result.stdout = "windows text\r\n" + return result + + with ( + patch( + "code_puppy.command_line.prompt_toolkit_completion.has_image_in_clipboard", + return_value=False, + ), + patch("platform.system", return_value="Windows"), + patch("subprocess.run", side_effect=run_side_effect) as mock_run, + ): + handler(event) + event.app.current_buffer.insert_text.assert_called_with("windows text") + assert mock_run.call_args.args[0] == [ + "powershell", + "-NoProfile", + "-Command", + "Get-Clipboard -Raw", + ] def test_ctrl_v_no_image_linux(self, captured_bindings): handler = self._find_handler(captured_bindings, "c-v") diff --git a/tests/command_line/test_tutorial.py b/tests/command_line/test_tutorial.py index 93cab4f5d..9171f01ae 100644 --- a/tests/command_line/test_tutorial.py +++ b/tests/command_line/test_tutorial.py @@ -7,8 +7,6 @@ import pytest -from code_puppy.command_line.core_commands import handle_tutorial_command - def _mock_tutorial_result(mock_executor_class: Any, result: str) -> None: mock_future = MagicMock() @@ -22,6 +20,12 @@ def _mock_tutorial_result(mock_executor_class: Any, result: str) -> None: def test_tutorial_chatgpt_flow() -> None: """Test tutorial triggers ChatGPT OAuth and model switch.""" + from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand + from code_puppy.command_line.core_commands import handle_tutorial_command + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + start_chatgpt_oauth_setup, + ) + with patch("concurrent.futures.ThreadPoolExecutor") as mock_executor_class: _mock_tutorial_result(mock_executor_class, "chatgpt") @@ -37,14 +41,21 @@ def test_tutorial_chatgpt_flow() -> None: with patch("code_puppy.command_line.core_commands.emit_info"): result = handle_tutorial_command("/tutorial") - assert result is True + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_chatgpt_oauth_setup mock_reset.assert_called_once() - mock_oauth.assert_called_once() - mock_set_model.assert_called_once_with("chatgpt-gpt-5.4") + mock_oauth.assert_not_called() + mock_set_model.assert_not_called() def test_tutorial_claude_flow() -> None: """Test tutorial triggers Claude Code OAuth and model switch.""" + from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand + from code_puppy.command_line.core_commands import handle_tutorial_command + from code_puppy.plugins.claude_code_oauth.register_callbacks import ( + start_claude_code_oauth_setup, + ) + with patch("concurrent.futures.ThreadPoolExecutor") as mock_executor_class: _mock_tutorial_result(mock_executor_class, "claude") @@ -60,10 +71,11 @@ def test_tutorial_claude_flow() -> None: with patch("code_puppy.command_line.core_commands.emit_info"): result = handle_tutorial_command("/tutorial") - assert result is True + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_claude_code_oauth_setup mock_reset.assert_called_once() - mock_auth.assert_called_once() - mock_set_model.assert_called_once_with("claude-code-claude-opus-4-6") + mock_auth.assert_not_called() + mock_set_model.assert_not_called() @pytest.mark.parametrize( @@ -75,6 +87,8 @@ def test_tutorial_claude_flow() -> None: ) def test_tutorial_terminal_paths(wizard_result: str, expected_message: str) -> None: """Test tutorial completion and skip paths.""" + from code_puppy.command_line.core_commands import handle_tutorial_command + with patch("concurrent.futures.ThreadPoolExecutor") as mock_executor_class: _mock_tutorial_result(mock_executor_class, wizard_result) diff --git a/tests/messaging/spinner/test_spinner_init.py b/tests/messaging/spinner/test_spinner_init.py index 2db72814b..0f13ce1e2 100644 --- a/tests/messaging/spinner/test_spinner_init.py +++ b/tests/messaging/spinner/test_spinner_init.py @@ -8,6 +8,11 @@ import pytest +from code_puppy.command_line.interactive_runtime import ( + PromptRuntimeState, + clear_active_interactive_runtime, + register_active_interactive_runtime, +) from code_puppy.messaging.spinner import ( _active_spinners, clear_spinner_context, @@ -234,3 +239,32 @@ def test_update_context_overwrites_previous(self): update_spinner_context("Second") assert SpinnerBase.get_context_info() == "Second" + + def test_update_spinner_context_invalidates_active_prompt(self): + runtime = PromptRuntimeState() + register_active_interactive_runtime(runtime) + session = MagicMock() + session.app = MagicMock() + runtime.register_prompt_surface(session) + + try: + session.app.invalidate.reset_mock() + update_spinner_context("Tokens: 42/100 (42.0% used)") + session.app.invalidate.assert_called_once() + finally: + clear_active_interactive_runtime(runtime) + + def test_clear_spinner_context_invalidates_active_prompt(self): + runtime = PromptRuntimeState() + register_active_interactive_runtime(runtime) + session = MagicMock() + session.app = MagicMock() + runtime.register_prompt_surface(session) + + try: + update_spinner_context("Tokens: 42/100 (42.0% used)") + session.app.invalidate.reset_mock() + clear_spinner_context() + session.app.invalidate.assert_called_once() + finally: + clear_active_interactive_runtime(runtime) diff --git a/tests/messaging/test_legacy_bridge.py b/tests/messaging/test_legacy_bridge.py new file mode 100644 index 000000000..967268eea --- /dev/null +++ b/tests/messaging/test_legacy_bridge.py @@ -0,0 +1,72 @@ +from io import StringIO +from unittest.mock import MagicMock + +from rich.console import Console +from rich.table import Table + +from code_puppy.messaging.bus import MessageBus +from code_puppy.messaging.legacy_bridge import LegacyQueueToBusBridge +from code_puppy.messaging.message_queue import MessageQueue, MessageType, UIMessage +from code_puppy.messaging.messages import LegacyQueueMessage +from code_puppy.messaging.rich_renderer import RichConsoleRenderer + + +def _console(): + return Console(file=StringIO(), force_terminal=False, width=100) + + +def test_legacy_bridge_replays_buffered_messages_into_bus(): + queue = MessageQueue() + bus = MagicMock(spec=MessageBus) + bridge = LegacyQueueToBusBridge(queue, bus) + + queue.emit_simple(MessageType.INFO, "hello") + queue.emit_simple(MessageType.DIVIDER, "---") + + bridge.start() + + emitted = [call.args[0] for call in bus.emit.call_args_list] + assert len(emitted) == 2 + assert all(isinstance(message, LegacyQueueMessage) for message in emitted) + assert emitted[0].legacy_type == "info" + assert emitted[0].content == "hello" + assert emitted[1].legacy_type == "divider" + + +def test_legacy_bridge_skips_human_input_messages(): + queue = MessageQueue() + bus = MagicMock(spec=MessageBus) + bridge = LegacyQueueToBusBridge(queue, bus) + + queue.emit( + UIMessage( + type=MessageType.HUMAN_INPUT_REQUEST, + content="Enter value", + metadata={"prompt_id": "p1"}, + ) + ) + + bridge.start() + + bus.emit.assert_not_called() + + +def test_rich_renderer_renders_wrapped_legacy_renderables(): + console = _console() + renderer = RichConsoleRenderer(MessageBus(), console=console) + + table = Table() + table.add_column("Col") + table.add_row("value") + + renderer._render_sync( + LegacyQueueMessage( + legacy_type="info", + content=table, + legacy_metadata={}, + ) + ) + + output = console.file.getvalue() + assert "Col" in output + assert "value" in output diff --git a/tests/messaging/test_rich_renderer.py b/tests/messaging/test_rich_renderer.py index 33955641c..917a59628 100644 --- a/tests/messaging/test_rich_renderer.py +++ b/tests/messaging/test_rich_renderer.py @@ -10,6 +10,7 @@ from code_puppy.messaging.bus import MessageBus from code_puppy.messaging.messages import ( + AgentListMessage, AgentReasoningMessage, AgentResponseMessage, ConfirmationRequest, @@ -374,11 +375,76 @@ def test_render_shell_line(renderer, console): assert "hello output" in out +def test_render_shell_line_with_prompt_surface_uses_plain_stdout(renderer): + msg = ShellLineMessage(line="hello output", stream="stdout") + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + mock_stdout = MagicMock() + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch("sys.stdout", mock_stdout), + ): + renderer._render_shell_line(msg) + + mock_stdout.write.assert_called_once_with("hello output\n") + mock_stdout.flush.assert_called_once() + + def test_render_shell_line_with_cr(renderer, console): msg = ShellLineMessage(line="progress\r50%", stream="stdout") renderer._render_shell_line(msg) +def test_render_shell_line_with_cr_without_live_updates_uses_console(renderer, console): + msg = ShellLineMessage(line="progress\r50%", stream="stdout") + with patch( + "code_puppy.messaging.rich_renderer.supports_live_terminal_updates", + return_value=False, + ): + renderer._render_shell_line(msg) + out = output(console) + assert "50%" in out + + +def test_render_shell_line_with_cr_and_prompt_surface_uses_ephemeral_status(renderer): + msg = ShellLineMessage(line="\x1b[2Kprogress\r50%", stream="stdout") + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + mock_stdout = MagicMock() + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch("sys.stdout", mock_stdout), + ): + renderer._render_shell_line(msg) + + runtime.set_prompt_ephemeral_status.assert_called_once_with("50%") + mock_stdout.write.assert_not_called() + mock_stdout.flush.assert_not_called() + + +def test_render_shell_line_with_cr_and_prompt_surface_skips_subagent_ephemeral_status( + renderer, +): + msg = ShellLineMessage( + line="\x1b[2Kprogress\r50%", stream="stdout", session_id="subagent-1" + ) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + mock_stdout = MagicMock() + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch("sys.stdout", mock_stdout), + ): + renderer._render_shell_line(msg) + + runtime.set_prompt_ephemeral_status.assert_not_called() + mock_stdout.write.assert_not_called() + mock_stdout.flush.assert_not_called() + + def test_render_shell_output(renderer, console): msg = ShellOutputMessage( command="ls", exit_code=0, stdout="", stderr="", duration_seconds=0.5 @@ -386,6 +452,39 @@ def test_render_shell_output(renderer, console): renderer._render_shell_output(msg) +def test_render_shell_output_clears_ephemeral_status_when_prompt_surface_active( + renderer, +): + msg = ShellOutputMessage( + command="ls", exit_code=0, stdout="", stderr="", duration_seconds=0.5 + ) + runtime = MagicMock() + + with patch.object(renderer, "_get_prompt_runtime", return_value=runtime): + renderer._render_shell_output(msg) + + runtime.set_prompt_ephemeral_status.assert_called_once_with(None) + + +def test_render_shell_output_with_session_id_does_not_clear_foreground_ephemeral_status( + renderer, +): + msg = ShellOutputMessage( + command="ls", + exit_code=0, + stdout="", + stderr="", + duration_seconds=0.5, + session_id="subagent-1", + ) + runtime = MagicMock() + + with patch.object(renderer, "_get_prompt_runtime", return_value=runtime): + renderer._render_shell_output(msg) + + runtime.set_prompt_ephemeral_status.assert_not_called() + + # ========================================================================= # Agent Messages # ========================================================================= @@ -415,6 +514,93 @@ def test_render_agent_response_plain(renderer, console): renderer._render_agent_response(msg) +def test_do_render_agent_response_when_prompt_surface_active(renderer, console): + msg = AgentResponseMessage(content="plain text", is_markdown=False) + + with patch.object(renderer, "_should_render_above_prompt", return_value=False): + renderer._do_render(msg) + + out = output(console) + assert "AGENT RESPONSE" in out + + +def test_do_render_agent_response_uses_prompt_runtime(renderer): + msg = AgentResponseMessage(content="plain text", is_markdown=False) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt.return_value = True + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch.object(renderer, "_render_agent_response") as mock_render, + ): + renderer._do_render(msg) + + runtime.run_above_prompt.assert_called_once() + runtime.clear_prompt_ephemeral_preview.assert_called_once() + mock_render.assert_not_called() + + +def test_do_render_session_tagged_agent_response_does_not_clear_foreground_preview( + renderer, +): + msg = AgentResponseMessage( + content="plain text", is_markdown=False, session_id="subagent-1" + ) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt.return_value = True + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch.object(renderer, "_render_agent_response") as mock_render, + ): + renderer._do_render(msg) + + runtime.run_above_prompt.assert_called_once() + runtime.clear_prompt_ephemeral_preview.assert_not_called() + mock_render.assert_not_called() + + +def test_do_render_agent_reasoning_uses_prompt_runtime(renderer): + msg = AgentReasoningMessage(reasoning="Because", next_steps="Do X") + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt.return_value = True + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch.object(renderer, "_render_agent_reasoning") as mock_render, + ): + renderer._do_render(msg) + + runtime.run_above_prompt.assert_called_once() + mock_render.assert_not_called() + + +def test_do_render_file_listing_uses_prompt_runtime(renderer): + msg = FileListingMessage( + directory="/tmp", + files=[], + recursive=False, + total_size=0, + dir_count=0, + file_count=0, + ) + runtime = MagicMock() + runtime.has_prompt_surface.return_value = True + runtime.run_above_prompt.return_value = True + + with ( + patch.object(renderer, "_get_prompt_runtime", return_value=runtime), + patch.object(renderer, "_render_file_listing") as mock_render, + ): + renderer._do_render(msg) + + runtime.run_above_prompt.assert_called_once() + mock_render.assert_not_called() + + @patch("code_puppy.messaging.rich_renderer.is_subagent", return_value=False) def test_render_subagent_invocation(mock_sub, renderer, console): msg = SubAgentInvocationMessage( @@ -676,6 +862,36 @@ def test_render_version_check_current(renderer, console): assert "latest" in out +# ========================================================================= +# Agent Lists +# ========================================================================= + + +@patch("code_puppy.messaging.rich_renderer.is_subagent", return_value=False) +def test_render_agent_list(mock_sub, renderer, console): + msg = AgentListMessage(agent_count=3) + renderer._render_agent_list(msg) + out = output(console) + assert "LIST AGENTS" in out + assert "Found 3 agent(s)." in out + + +@patch("code_puppy.messaging.rich_renderer.is_subagent", return_value=False) +def test_render_agent_list_before_subagent_invocation(mock_sub, renderer, console): + renderer._render_agent_list(AgentListMessage(agent_count=2)) + renderer._render_subagent_invocation( + SubAgentInvocationMessage( + agent_name="python-programmer", + session_id="python-programmer-session-1", + prompt="hello", + is_new_session=True, + message_count=0, + ) + ) + out = output(console) + assert out.index("LIST AGENTS") < out.index("INVOKE AGENT") + + # ========================================================================= # Skills # ========================================================================= @@ -858,6 +1074,12 @@ def test_do_render_skill_list_dispatch(mock_sub, renderer, console): renderer._do_render(msg) +@patch("code_puppy.messaging.rich_renderer.is_subagent", return_value=False) +def test_do_render_agent_list_dispatch(mock_sub, renderer, console): + msg = AgentListMessage(agent_count=4) + renderer._do_render(msg) + + @patch("code_puppy.messaging.rich_renderer.is_subagent", return_value=False) def test_do_render_skill_activate_dispatch(mock_sub, renderer, console): msg = SkillActivateMessage( diff --git a/tests/plugins/conftest.py b/tests/plugins/conftest.py index 25dfb7109..a16acabe4 100644 --- a/tests/plugins/conftest.py +++ b/tests/plugins/conftest.py @@ -3,6 +3,7 @@ from __future__ import annotations import sys +from types import ModuleType from unittest.mock import AsyncMock, MagicMock import pytest @@ -13,15 +14,59 @@ # Skip antigravity tests if pydantic/MCP conflict is detected def pytest_configure(config): """Configure pytest with compatibility workarounds.""" - # Pre-patch sys.modules to provide a mock mcp.types during collection - # This prevents the ValueError in pydantic's RootModel metaclass + # Pre-patch sys.modules to provide a lightweight MCP package during + # collection. Some plugin tests only need the names to import cleanly, but + # later suites may import pydantic_ai.mcp, which expects package-shaped + # modules such as mcp.client.sse to exist. if "mcp" not in sys.modules: - mcp_mock = MagicMock() - mcp_mock.types = MagicMock() - sys.modules["mcp"] = mcp_mock - sys.modules["mcp.types"] = mcp_mock.types - sys.modules["mcp.client"] = MagicMock() - sys.modules["mcp.client.session"] = MagicMock() + mcp_pkg = ModuleType("mcp") + types_mod = ModuleType("mcp.types") + client_pkg = ModuleType("mcp.client") + session_mod = ModuleType("mcp.client.session") + sse_mod = ModuleType("mcp.client.sse") + stdio_mod = ModuleType("mcp.client.stdio") + streamable_http_mod = ModuleType("mcp.client.streamable_http") + shared_pkg = ModuleType("mcp.shared") + exceptions_mod = ModuleType("mcp.shared.exceptions") + context_mod = ModuleType("mcp.shared.context") + message_mod = ModuleType("mcp.shared.message") + session_shared_mod = ModuleType("mcp.shared.session") + + session_mod.ClientSession = MagicMock() + session_mod.ElicitationFnT = MagicMock() + session_mod.LoggingFnT = MagicMock() + sse_mod.sse_client = MagicMock() + stdio_mod.StdioServerParameters = MagicMock() + stdio_mod.stdio_client = MagicMock() + streamable_http_mod.streamable_http_client = MagicMock() + context_mod.RequestContext = MagicMock() + message_mod.SessionMessage = MagicMock() + session_shared_mod.RequestResponder = MagicMock() + + mcp_pkg.types = types_mod + mcp_pkg.client = client_pkg + mcp_pkg.shared = shared_pkg + client_pkg.session = session_mod + client_pkg.sse = sse_mod + client_pkg.stdio = stdio_mod + client_pkg.streamable_http = streamable_http_mod + shared_pkg.exceptions = exceptions_mod + shared_pkg.context = context_mod + shared_pkg.message = message_mod + shared_pkg.session = session_shared_mod + + sys.modules["mcp"] = mcp_pkg + sys.modules["mcp.types"] = types_mod + sys.modules["mcp.client"] = client_pkg + sys.modules["mcp.client.session"] = session_mod + sys.modules["mcp.client.sse"] = sse_mod + sys.modules["mcp.client.stdio"] = stdio_mod + sys.modules["mcp.client.streamable_http"] = streamable_http_mod + sys.modules["mcp.shared"] = shared_pkg + sys.modules["mcp.shared.exceptions"] = exceptions_mod + sys.modules["mcp.shared.context"] = context_mod + sys.modules["mcp.shared.message"] = message_mod + sys.modules["mcp.shared.session"] = session_shared_mod class ClientShim: diff --git a/tests/plugins/test_antigravity_callbacks_coverage.py b/tests/plugins/test_antigravity_callbacks_coverage.py index e1fd80276..ece3bee9b 100644 --- a/tests/plugins/test_antigravity_callbacks_coverage.py +++ b/tests/plugins/test_antigravity_callbacks_coverage.py @@ -470,7 +470,7 @@ def test_success_no_email(self): "code_puppy.plugins.antigravity_oauth.register_callbacks.emit_warning" ), ): - assert _perform_authentication(reload_agent=False) is True + assert _perform_authentication(reload_agent=False) is False class TestAntigravityHandleStatus: @@ -656,6 +656,9 @@ def test_unknown(self): assert _handle_custom_command("/x", "unknown") is None def test_auth_success(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.antigravity_oauth.register_callbacks import ( _handle_custom_command, ) @@ -675,13 +678,18 @@ def test_auth_success(self): ), patch( "code_puppy.plugins.antigravity_oauth.register_callbacks.set_model_and_reload_agent" - ), + ) as mock_set_model, ): - assert ( - _handle_custom_command("/antigravity-auth", "antigravity-auth") is True - ) + result = _handle_custom_command("/antigravity-auth", "antigravity-auth") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_set_model.assert_called_once_with("antigravity-gemini-3-pro-high") def test_auth_failure(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.antigravity_oauth.register_callbacks import ( _handle_custom_command, ) @@ -695,13 +703,26 @@ def test_auth_failure(self): patch( "code_puppy.plugins.antigravity_oauth.register_callbacks._perform_authentication", return_value=False, - ), + ) as mock_auth, + patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.set_model_and_reload_agent" + ) as mock_set_model, ): - assert ( - _handle_custom_command("/antigravity-auth", "antigravity-auth") is True + result = _handle_custom_command("/antigravity-auth", "antigravity-auth") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is False + mock_auth.assert_called_once_with( + add_account=False, + reload_agent=False, + cancel_event=cancel_event, ) + mock_set_model.assert_not_called() def test_add(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.antigravity_oauth.register_callbacks import ( _handle_custom_command, ) @@ -716,9 +737,18 @@ def test_add(self): ), patch( "code_puppy.plugins.antigravity_oauth.register_callbacks._perform_authentication" - ), + ) as mock_auth, ): - assert _handle_custom_command("/antigravity-add", "antigravity-add") is True + mock_auth.return_value = True + result = _handle_custom_command("/antigravity-add", "antigravity-add") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_auth.assert_called_once_with( + add_account=True, + reload_agent=False, + cancel_event=cancel_event, + ) def test_status(self): from code_puppy.plugins.antigravity_oauth.register_callbacks import ( diff --git a/tests/plugins/test_antigravity_register_callbacks.py b/tests/plugins/test_antigravity_register_callbacks.py index 2630c4154..e06ffe364 100644 --- a/tests/plugins/test_antigravity_register_callbacks.py +++ b/tests/plugins/test_antigravity_register_callbacks.py @@ -427,6 +427,59 @@ def test_perform_authentication_add_account_flag( calls_args = [str(call) for call in mock_emit_success.call_args_list] assert any("Added account" in str(c) for c in calls_args) + @patch("code_puppy.plugins.antigravity_oauth.register_callbacks.emit_warning") + @patch("code_puppy.plugins.antigravity_oauth.register_callbacks.save_tokens") + @patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.add_models_to_config" + ) + @patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.exchange_code_for_tokens" + ) + @patch("code_puppy.plugins.antigravity_oauth.register_callbacks._await_callback") + @patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.prepare_oauth_context" + ) + def test_perform_authentication_model_registration_failure( + self, + mock_prepare_context, + mock_await_callback, + mock_exchange_code, + mock_add_models, + mock_save_tokens, + mock_emit_warning, + ): + """Authentication should fail if model registration fails.""" + from code_puppy.plugins.antigravity_oauth.oauth import TokenExchangeSuccess + + mock_context = MagicMock() + mock_prepare_context.return_value = mock_context + mock_await_callback.return_value = ( + "code_123", + "state_456", + "http://localhost:51121/oauth-callback", + ) + mock_exchange_code.return_value = TokenExchangeSuccess( + access_token="access_token_123", + refresh_token="refresh_token_456", + expires_at=time.time() + 3600, + email="test@example.com", + project_id="project_123", + ) + mock_save_tokens.return_value = True + mock_add_models.return_value = False + + with patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.AccountManager" + ) as mock_manager_class: + mock_manager = MagicMock() + mock_manager.account_count = 0 + mock_manager_class.load_from_disk.return_value = mock_manager + + result = _perform_authentication(reload_agent=False) + + assert result is False + mock_emit_warning.assert_called_once() + # ============================================================================ # CUSTOM HELP TESTS @@ -752,13 +805,23 @@ def test_handle_custom_command_auth( mock_set_model, ): """Test antigravity-auth command.""" + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + mock_load_tokens.return_value = {} mock_perform_auth.return_value = True result = _handle_custom_command("custom_command", "antigravity-auth") - assert result is True - mock_perform_auth.assert_called_once_with(reload_agent=False) + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_perform_auth.assert_called_once_with( + add_account=False, + reload_agent=False, + cancel_event=cancel_event, + ) mock_set_model.assert_called_once_with("antigravity-gemini-3-pro-high") @patch( @@ -771,6 +834,10 @@ def test_handle_custom_command_add( mock_perform_auth, ): """Test antigravity-add command.""" + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + mock_perform_auth.return_value = True with patch( @@ -782,10 +849,14 @@ def test_handle_custom_command_add( result = _handle_custom_command("custom_command", "antigravity-add") - assert result is True - # Verify add_account=True was passed - call_kwargs = mock_perform_auth.call_args - assert call_kwargs[1].get("add_account") is True + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_perform_auth.assert_called_once_with( + add_account=True, + reload_agent=False, + cancel_event=cancel_event, + ) @patch("code_puppy.plugins.antigravity_oauth.register_callbacks._handle_status") def test_handle_custom_command_status(self, mock_handle_status): diff --git a/tests/plugins/test_chatgpt_oauth_coverage.py b/tests/plugins/test_chatgpt_oauth_coverage.py index 5fb95ef3a..3865fd217 100644 --- a/tests/plugins/test_chatgpt_oauth_coverage.py +++ b/tests/plugins/test_chatgpt_oauth_coverage.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +import threading from unittest.mock import MagicMock, patch @@ -146,17 +147,28 @@ def test_unknown(self): assert _handle_custom_command("/x", "unknown") is None def test_auth(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( _handle_custom_command, ) with ( - patch("code_puppy.plugins.chatgpt_oauth.register_callbacks.run_oauth_flow"), + patch( + "code_puppy.plugins.chatgpt_oauth.register_callbacks.run_oauth_flow", + return_value=True, + ) as mock_oauth, patch( "code_puppy.plugins.chatgpt_oauth.register_callbacks.set_model_and_reload_agent" - ), + ) as mock_set_model, ): - assert _handle_custom_command("/chatgpt-auth", "chatgpt-auth") is True + result = _handle_custom_command("/chatgpt-auth", "chatgpt-auth") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_oauth.assert_called_once_with(cancel_event=cancel_event) + mock_set_model.assert_called_once_with("chatgpt-gpt-5.3-codex") def test_status(self): from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( diff --git a/tests/plugins/test_chatgpt_oauth_integration.py b/tests/plugins/test_chatgpt_oauth_integration.py index b9b5b0812..78d51919b 100644 --- a/tests/plugins/test_chatgpt_oauth_integration.py +++ b/tests/plugins/test_chatgpt_oauth_integration.py @@ -9,6 +9,7 @@ import json import os +import threading from unittest.mock import Mock, patch import requests @@ -288,11 +289,18 @@ def test_handle_chatgpt_logout( ) def test_handle_custom_command_auth(self, mock_set_model, mock_oauth): """Test chatgpt-auth command triggers OAuth flow.""" + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + + mock_oauth.return_value = True result = _handle_custom_command("custom_command", "chatgpt-auth") - assert result is True - mock_oauth.assert_called_once() - mock_set_model.assert_called_once_with("chatgpt-gpt-5.4") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_oauth.assert_called_once_with(cancel_event=cancel_event) + mock_set_model.assert_called_once_with("chatgpt-gpt-5.3-codex") @patch("code_puppy.plugins.chatgpt_oauth.register_callbacks.load_stored_tokens") def test_handle_custom_command_status(self, mock_load): diff --git a/tests/plugins/test_claude_code_oauth_callbacks.py b/tests/plugins/test_claude_code_oauth_callbacks.py index 5e80cbf7a..73cc734ac 100644 --- a/tests/plugins/test_claude_code_oauth_callbacks.py +++ b/tests/plugins/test_claude_code_oauth_callbacks.py @@ -382,12 +382,22 @@ def test_empty_name(self): def test_auth_with_existing_tokens( self, mock_warn, mock_info, mock_set, mock_auth, mock_tokens ): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.claude_code_oauth.register_callbacks import ( _handle_custom_command, ) - assert _handle_custom_command("/claude-code-auth", "claude-code-auth") is True + mock_auth.return_value = True + result = _handle_custom_command("/claude-code-auth", "claude-code-auth") + + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True mock_warn.assert_called() # warns about overwriting + mock_auth.assert_called_once_with(cancel_event=cancel_event) + mock_set.assert_called_once_with("claude-code-claude-opus-4-6") @patch( f"{MOD}.load_stored_tokens", diff --git a/tests/plugins/test_claude_code_oauth_coverage.py b/tests/plugins/test_claude_code_oauth_coverage.py index 74d5ecd13..4999f2862 100644 --- a/tests/plugins/test_claude_code_oauth_coverage.py +++ b/tests/plugins/test_claude_code_oauth_coverage.py @@ -539,6 +539,9 @@ def test_unknown(self): assert _handle_custom_command("/x", "unknown") is None def test_auth(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.claude_code_oauth.register_callbacks import ( _handle_custom_command, ) @@ -554,16 +557,23 @@ def test_auth(self): ), patch( "code_puppy.plugins.claude_code_oauth.register_callbacks._perform_authentication" - ), + ) as mock_auth, patch( "code_puppy.plugins.claude_code_oauth.register_callbacks.set_model_and_reload_agent" - ), + ) as mock_set_model, ): - assert ( - _handle_custom_command("/claude-code-auth", "claude-code-auth") is True - ) + mock_auth.return_value = True + result = _handle_custom_command("/claude-code-auth", "claude-code-auth") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_auth.assert_called_once_with(cancel_event=cancel_event) + mock_set_model.assert_called_once_with("claude-code-claude-opus-4-6") def test_auth_no_existing_tokens(self): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) from code_puppy.plugins.claude_code_oauth.register_callbacks import ( _handle_custom_command, ) @@ -576,14 +586,18 @@ def test_auth_no_existing_tokens(self): ), patch( "code_puppy.plugins.claude_code_oauth.register_callbacks._perform_authentication" - ), + ) as mock_auth, patch( "code_puppy.plugins.claude_code_oauth.register_callbacks.set_model_and_reload_agent" - ), + ) as mock_set_model, ): - assert ( - _handle_custom_command("/claude-code-auth", "claude-code-auth") is True - ) + mock_auth.return_value = True + result = _handle_custom_command("/claude-code-auth", "claude-code-auth") + assert isinstance(result, BackgroundInteractiveCommand) + cancel_event = threading.Event() + assert result.run(cancel_event) is True + mock_auth.assert_called_once_with(cancel_event=cancel_event) + mock_set_model.assert_called_once_with("claude-code-claude-opus-4-6") def test_status_authenticated(self): from code_puppy.plugins.claude_code_oauth.register_callbacks import ( diff --git a/tests/test_agent_tools_coverage.py b/tests/test_agent_tools_coverage.py index 00615df6b..dff481cbf 100644 --- a/tests/test_agent_tools_coverage.py +++ b/tests/test_agent_tools_coverage.py @@ -14,6 +14,7 @@ import pytest +from code_puppy.messaging.messages import AgentListMessage, MessageLevel, TextMessage from code_puppy.tools.agent_tools import ( AgentInfo, AgentInvokeOutput, @@ -278,17 +279,10 @@ def capture_tool(func): register_list_agents(mock_agent) assert registered_func is not None - # Mock the agent manager functions and config - # Note: get_banner_color is imported from code_puppy.config inside the function + mock_bus = MagicMock() with ( patch( - "code_puppy.config.get_banner_color", - return_value="blue", - ), - patch("code_puppy.tools.agent_tools.emit_info"), - patch( - "code_puppy.tools.agent_tools.generate_group_id", - return_value="test-group", + "code_puppy.tools.agent_tools.get_message_bus", return_value=mock_bus ), patch("code_puppy.agents.get_available_agents") as mock_available, patch("code_puppy.agents.get_agent_descriptions") as mock_descriptions, @@ -314,6 +308,10 @@ def capture_tool(func): agent_names = [a.name for a in result.agents] assert "code-reviewer" in agent_names assert "qa-expert" in agent_names + mock_bus.emit.assert_called_once() + emitted = mock_bus.emit.call_args.args[0] + assert isinstance(emitted, AgentListMessage) + assert emitted.agent_count == 2 def test_list_agents_handles_exception(self): """Test that list_agents handles exceptions gracefully.""" @@ -330,17 +328,10 @@ def capture_tool(func): mock_agent.tool = capture_tool register_list_agents(mock_agent) - # Mock to raise an exception + mock_bus = MagicMock() with ( patch( - "code_puppy.config.get_banner_color", - return_value="blue", - ), - patch("code_puppy.tools.agent_tools.emit_info"), - patch("code_puppy.tools.agent_tools.emit_error") as mock_emit_error, - patch( - "code_puppy.tools.agent_tools.generate_group_id", - return_value="test-group", + "code_puppy.tools.agent_tools.get_message_bus", return_value=mock_bus ), patch( "code_puppy.agents.get_available_agents", @@ -353,7 +344,11 @@ def capture_tool(func): assert isinstance(result, ListAgentsOutput) assert len(result.agents) == 0 assert "Database connection failed" in result.error - assert mock_emit_error.called + mock_bus.emit.assert_called_once() + emitted = mock_bus.emit.call_args.args[0] + assert isinstance(emitted, TextMessage) + assert emitted.level == MessageLevel.ERROR + assert emitted.text == "Error listing agents: Database connection failed" def test_list_agents_with_missing_description(self): """Test that list_agents handles missing descriptions.""" @@ -370,15 +365,10 @@ def capture_tool(func): mock_agent.tool = capture_tool register_list_agents(mock_agent) + mock_bus = MagicMock() with ( patch( - "code_puppy.config.get_banner_color", - return_value="blue", - ), - patch("code_puppy.tools.agent_tools.emit_info"), - patch( - "code_puppy.tools.agent_tools.generate_group_id", - return_value="test-group", + "code_puppy.tools.agent_tools.get_message_bus", return_value=mock_bus ), patch("code_puppy.agents.get_available_agents") as mock_available, patch("code_puppy.agents.get_agent_descriptions") as mock_descriptions, @@ -394,6 +384,10 @@ def capture_tool(func): # Should use default description assert len(result.agents) == 1 assert result.agents[0].description == "No description available" + mock_bus.emit.assert_called_once() + emitted = mock_bus.emit.call_args.args[0] + assert isinstance(emitted, AgentListMessage) + assert emitted.agent_count == 1 class TestRegisterInvokeAgentExecution: @@ -709,13 +703,14 @@ async def test_too_long_session_id_rejected(self): assert "128 characters or less" in result.error -class TestListAgentsEmitsBannerAndInfo: - """Test that list_agents properly emits banner and info messages.""" +class TestListAgentsStructuredOutput: + """Test that list_agents emits structured output for the bus renderer.""" - def test_emits_banner_message(self): - """Test that list_agents emits a banner message.""" + def test_emits_agent_list_message(self): + """Test that list_agents emits a structured agent list summary.""" mock_agent = MagicMock() mock_context = MagicMock() + mock_bus = MagicMock() registered_func = None @@ -729,27 +724,19 @@ def capture_tool(func): with ( patch( - "code_puppy.config.get_banner_color", - return_value="green", - ) as mock_banner_color, - patch("code_puppy.tools.agent_tools.emit_info") as mock_emit_info, - patch( - "code_puppy.tools.agent_tools.generate_group_id", - return_value="banner-group", + "code_puppy.tools.agent_tools.get_message_bus", return_value=mock_bus ), patch( "code_puppy.agents.get_available_agents", - return_value={}, + return_value={"python-programmer": "Python Programmer"}, ), patch( "code_puppy.agents.get_agent_descriptions", - return_value={}, + return_value={"python-programmer": "Writes Python"}, ), ): registered_func(mock_context) - - # Verify banner color was fetched - mock_banner_color.assert_called_once_with("list_agents") - - # Verify emit_info was called (at least for banner) - assert mock_emit_info.called + mock_bus.emit.assert_called_once() + emitted = mock_bus.emit.call_args.args[0] + assert isinstance(emitted, AgentListMessage) + assert emitted.agent_count == 1 diff --git a/tests/test_cli_runner_coverage.py b/tests/test_cli_runner_coverage.py index 49e82e81b..79811c71c 100644 --- a/tests/test_cli_runner_coverage.py +++ b/tests/test_cli_runner_coverage.py @@ -8,6 +8,11 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest +from code_puppy.command_line.interactive_runtime import ( + PromptRuntimeState, + clear_active_interactive_runtime, + register_active_interactive_runtime, +) class TestRunPromptWithAttachments: @@ -169,6 +174,84 @@ async def test_clipboard_placeholder_cleaned(self): call_args = mock_agent.run_with_mcp.call_args assert "clipboard image" not in call_args[0][0] + @pytest.mark.anyio + async def test_interactive_runtime_disables_spinner(self): + from code_puppy.cli_runner import run_prompt_with_attachments + + runtime = PromptRuntimeState(running=True) + register_active_interactive_runtime(runtime) + + mock_agent = MagicMock() + mock_result = MagicMock() + mock_agent.run_with_mcp = AsyncMock(return_value=mock_result) + + try: + with ( + patch("code_puppy.cli_runner.parse_prompt_attachments") as mock_parse, + patch("code_puppy.cli_runner.get_clipboard_manager") as mock_clip, + patch("code_puppy.agents.event_stream_handler.set_streaming_console"), + patch("code_puppy.messaging.spinner.ConsoleSpinner") as mock_spinner, + ): + mock_parse.return_value = MagicMock( + prompt="do stuff", + warnings=[], + attachments=[], + link_attachments=[], + ) + clip_mgr = MagicMock() + clip_mgr.get_pending_images.return_value = [] + clip_mgr.get_pending_count.return_value = 0 + mock_clip.return_value = clip_mgr + + console = MagicMock() + result, _task = await run_prompt_with_attachments( + mock_agent, "do stuff", spinner_console=console, use_spinner=True + ) + + assert result is mock_result + mock_spinner.assert_not_called() + finally: + clear_active_interactive_runtime(runtime) + + @pytest.mark.anyio + async def test_seeds_spinner_context_before_agent_updates(self): + from code_puppy.cli_runner import run_prompt_with_attachments + + mock_agent = MagicMock() + mock_result = MagicMock() + mock_agent.run_with_mcp = AsyncMock(return_value=mock_result) + mock_agent.get_message_history.return_value = ["m1", "m2"] + mock_agent.estimate_tokens_for_message.side_effect = [100, 200] + mock_agent.estimate_context_overhead_tokens.return_value = 400 + mock_agent.estimate_token_count.side_effect = lambda text: len(text) * 10 + mock_agent.get_model_context_length.return_value = 10000 + + with ( + patch("code_puppy.cli_runner.parse_prompt_attachments") as mock_parse, + patch("code_puppy.cli_runner.get_clipboard_manager") as mock_clip, + patch("code_puppy.agents.event_stream_handler.set_streaming_console"), + patch("code_puppy.messaging.spinner.clear_spinner_context") as mock_clear, + patch("code_puppy.messaging.spinner.update_spinner_context") as mock_update, + ): + mock_parse.return_value = MagicMock( + prompt="do stuff", + warnings=[], + attachments=[], + link_attachments=[MagicMock(url_part="https://example.com")], + ) + clip_mgr = MagicMock() + clip_mgr.get_pending_images.return_value = [] + clip_mgr.get_pending_count.return_value = 0 + mock_clip.return_value = clip_mgr + + result, _task = await run_prompt_with_attachments( + mock_agent, "do stuff", use_spinner=False + ) + + assert result is mock_result + mock_clear.assert_called_once() + mock_update.assert_called_once_with("Tokens: 970/10,000 (9.7% used)") + class TestExecuteSinglePrompt: @pytest.mark.anyio @@ -207,7 +290,7 @@ async def test_none_response(self): ) as mock_run, patch("code_puppy.cli_runner.emit_info"), ): - mock_run.return_value = None + mock_run.return_value = (None, None) await execute_single_prompt("hello", mock_renderer) @pytest.mark.anyio diff --git a/tests/test_cli_runner_full_coverage.py b/tests/test_cli_runner_full_coverage.py index 0e2d3c985..e62be8b25 100644 --- a/tests/test_cli_runner_full_coverage.py +++ b/tests/test_cli_runner_full_coverage.py @@ -11,6 +11,8 @@ import pytest +from code_puppy.command_line.prompt_toolkit_completion import PromptSubmission + # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- @@ -45,6 +47,20 @@ def _mock_clipboard(images=None): return mgr +def _submission( + text: str, + action: str = "submit", + echo_in_transcript: bool = False, + allow_command_dispatch: bool = True, +) -> PromptSubmission: + return PromptSubmission( + action=action, + text=text, + echo_in_transcript=echo_in_transcript, + allow_command_dispatch=allow_command_dispatch, + ) + + def _apply_patches(stack, patches_dict): """Apply a dict of patches using an ExitStack.""" for target, value in patches_dict.items(): @@ -109,14 +125,28 @@ async def _run_interactive( agent = MagicMock() agent.get_user_prompt.return_value = "task:" + from code_puppy.command_line.prompt_toolkit_completion import PromptSubmission + + async def prompt_side_effect(*args, **kwargs): + if isinstance(input_fn, AsyncMock): + value = await input_fn(*args, **kwargs) + elif callable(input_fn): + value = input_fn(*args, **kwargs) + if asyncio.iscoroutine(value): + value = await value + else: + value = input_fn + + if value is None or isinstance(value, PromptSubmission): + return value + return PromptSubmission(action="submit", text=value) + with ExitStack() as stack: _apply_patches(stack, patches_dict) stack.enter_context( patch( - "code_puppy.command_line.prompt_toolkit_completion.get_input_with_combined_completion", - side_effect=input_fn - if callable(input_fn) and not isinstance(input_fn, AsyncMock) - else input_fn, + "code_puppy.command_line.prompt_toolkit_completion.prompt_for_submission", + side_effect=prompt_side_effect, ) ) stack.enter_context( @@ -139,6 +169,173 @@ async def _run_interactive( await interactive_mode(renderer, initial_command=initial_command) +def test_emit_interject_queue_lifecycle_uses_friendly_interject_copy(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "queued", + item=QueuedPrompt(kind="interject", text="steer now"), + position=2, + level="warning", + ) + + emitted = message_bus.emit.call_args[0][0] + assert emitted.text == "[INTERJECT] stopping current work: steer now" + + +def test_emit_interject_queue_lifecycle_uses_friendly_queue_copy(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "queued", + item=QueuedPrompt(kind="queued", text="follow up"), + position=2, + level="info", + ) + + emitted = message_bus.emit.call_args[0][0] + assert emitted.text == "[Queued][2] follow up" + + +def test_emit_interject_queue_lifecycle_skips_dequeued_user_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "dequeued", + item=QueuedPrompt(kind="queued", text="follow up"), + level="success", + ) + + message_bus.emit.assert_not_called() + + +def test_emit_interject_queue_lifecycle_skips_started_queue_user_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "started", + item=QueuedPrompt(kind="queued", text="follow up"), + level="success", + ) + + message_bus.emit.assert_not_called() + + +def test_emit_interject_queue_lifecycle_skips_started_interject_user_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "started", + item=QueuedPrompt(kind="interject", text="steer now"), + level="warning", + ) + + message_bus.emit.assert_not_called() + + +def test_emit_interject_queue_lifecycle_skips_completed_launch_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "completed", + item=QueuedPrompt(kind="queued", text="follow up"), + level="success", + ) + + message_bus.emit.assert_not_called() + + +def test_emit_interject_queue_lifecycle_skips_run_cancelled_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "cancelled", + item=QueuedPrompt(kind="queued", text="follow up"), + reason="run_cancelled", + level="warning", + ) + + message_bus.emit.assert_not_called() + + +def test_emit_interject_queue_lifecycle_keeps_command_completion_message(): + from code_puppy.cli_runner import ( + PromptRuntimeState, + QueuedPrompt, + emit_interject_queue_lifecycle, + ) + + message_bus = MagicMock() + + with patch("code_puppy.messaging.get_message_bus", return_value=message_bus): + emit_interject_queue_lifecycle( + PromptRuntimeState(), + "completed", + item=QueuedPrompt(kind="queued", text="/help"), + reason="command_consumed", + level="success", + ) + + emitted = message_bus.emit.call_args[0][0] + assert emitted.text == "[QUEUE] finished: /help" + + # --------------------------------------------------------------------------- # main() tests # --------------------------------------------------------------------------- @@ -518,27 +715,44 @@ async def fake_input(*a, **kw): @pytest.mark.anyio async def test_keyboard_interrupt_stops_wiggum(self): call_count = 0 + wiggum_active = {"value": False} + mock_warning = MagicMock() async def fake_input(*a, **kw): nonlocal call_count call_count += 1 if call_count == 1: + wiggum_active["value"] = True raise KeyboardInterrupt return "/exit" mock_stop = MagicMock() + + def fake_wiggum_active(): + return wiggum_active["value"] + + def fake_stop_wiggum(): + wiggum_active["value"] = False + mock_stop() + await _run_interactive( _mock_renderer(), _interactive_patches(), fake_input, extra_patches={ "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( - return_value=True + side_effect=fake_wiggum_active ), - "code_puppy.command_line.wiggum_state.stop_wiggum": mock_stop, + "code_puppy.command_line.wiggum_state.stop_wiggum": MagicMock( + side_effect=fake_stop_wiggum + ), + "code_puppy.messaging.emit_warning": mock_warning, }, ) mock_stop.assert_called() + warning_messages = [call.args[0] for call in mock_warning.call_args_list] + assert "\nšŸ© Wiggum loop stopped!" in warning_messages + assert "\nInput cancelled" not in warning_messages @pytest.mark.anyio async def test_clear_command(self): @@ -646,6 +860,43 @@ async def fake_input(*a, **kw): }, ) + @pytest.mark.anyio + async def test_runtime_cleared_after_unhandled_exception(self): + from code_puppy.cli_runner import interactive_mode + from code_puppy.command_line.interactive_runtime import ( + get_active_interactive_runtime, + ) + + renderer = _mock_renderer() + agent = MagicMock() + agent.get_user_prompt.return_value = "task:" + + with ExitStack() as stack: + _apply_patches(stack, _interactive_patches()) + stack.enter_context( + patch( + "code_puppy.command_line.prompt_toolkit_completion.prompt_for_submission", + side_effect=RuntimeError("boom"), + ) + ) + stack.enter_context( + patch( + "code_puppy.command_line.prompt_toolkit_completion.get_prompt_with_active_model", + return_value="> ", + ) + ) + stack.enter_context( + patch( + "code_puppy.agents.agent_manager.get_current_agent", + return_value=agent, + ) + ) + + with pytest.raises(RuntimeError, match="boom"): + await interactive_mode(renderer) + + assert get_active_interactive_runtime() is None + @pytest.mark.anyio async def test_normal_prompt_execution(self): call_count = 0 @@ -669,116 +920,838 @@ async def fake_input(*a, **kw): "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( return_value=False ), - "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result("write hello") - ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + }, + ) + + @pytest.mark.anyio + async def test_prompt_returns_none_cancelled(self): + call_count = 0 + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + return "write hello" if call_count == 1 else "/exit" + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( + return_value=(None, MagicMock()) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + }, + ) + + @pytest.mark.anyio + async def test_prompt_cancelled_wiggum_active(self): + call_count = 0 + run_started = asyncio.Event() + wiggum_active = {"value": False} + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return "write hello" + await asyncio.wait_for(run_started.wait(), timeout=1) + return "/exit" + + mock_stop = MagicMock() + + def fake_wiggum_active(): + return wiggum_active["value"] + + def fake_stop_wiggum(): + wiggum_active["value"] = False + mock_stop() + + async def fake_run(*args, **kwargs): + wiggum_active["value"] = True + run_started.set() + return (None, MagicMock()) + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( + side_effect=fake_run + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + side_effect=fake_wiggum_active + ), + "code_puppy.command_line.wiggum_state.stop_wiggum": MagicMock( + side_effect=fake_stop_wiggum + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + }, + ) + mock_stop.assert_called() + + @pytest.mark.anyio + async def test_prompt_exception(self): + call_count = 0 + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + return "write hello" if call_count == 1 else "/exit" + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( + side_effect=RuntimeError("agent error") + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + "code_puppy.messaging.queue_console.get_queue_console": MagicMock( + return_value=MagicMock() + ), + }, + ) + + @pytest.mark.anyio + async def test_empty_input_skipped(self): + call_count = 0 + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + return " " if call_count == 1 else "/exit" + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result(" ") + ), + }, + ) + + @pytest.mark.anyio + async def test_successful_interactive_response_autosaves(self): + call_count = 0 + patches = _interactive_patches() + autosave_mock = patches["code_puppy.config.auto_save_session_if_enabled"] + autosave_done = asyncio.Event() + autosave_mock.side_effect = lambda: autosave_done.set() + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("write hello") + await autosave_done.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + result = MagicMock(output="done: write hello") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + patches, + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + autosave_mock.assert_called_once_with() + + @pytest.mark.anyio + async def test_cancelled_interactive_response_does_not_autosave(self): + call_count = 0 + patches = _interactive_patches() + autosave_mock = patches["code_puppy.config.auto_save_session_if_enabled"] + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("write hello") + return _submission("/exit") + + async def fake_run(*args, **kwargs): + return None, MagicMock() + + await _run_interactive( + _mock_renderer(), + patches, + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + return_value=_mock_parse_result("write hello") + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + autosave_mock.assert_not_called() + + +class TestInteractiveQueueHandoff: + """Test unified queue, interject, and idle-drain behavior.""" + + @pytest.mark.anyio + async def test_queued_prompt_runs_after_current_task_finishes(self): + call_count = 0 + second_prompt_seen = asyncio.Event() + queued_prompt_started = asyncio.Event() + started_prompts = [] + patches = _interactive_patches() + autosave_mock = patches["code_puppy.config.auto_save_session_if_enabled"] + all_autosaves_done = asyncio.Event() + autosave_mock.side_effect = lambda: ( + all_autosaves_done.set() if autosave_mock.call_count >= 2 else None + ) + render_notice = MagicMock() + render_prompt_echo = MagicMock( + side_effect=lambda text: render_order.append(("echo", text)) + ) + render_order = [] + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("first task") + if call_count == 2: + second_prompt_seen.set() + return _submission("queued task", action="queue") + await all_autosaves_done.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + render_order.append(("start", prompt)) + if prompt == "first task": + await second_prompt_seen.wait() + await asyncio.sleep(0.05) + if prompt == "queued task": + assert autosave_mock.call_count == 1 + queued_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + patches, + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + "code_puppy.command_line.prompt_toolkit_completion.render_transcript_notice": MagicMock( + side_effect=lambda text: render_order.append(("notice", text)) + or render_notice(text) + ), + "code_puppy.command_line.prompt_toolkit_completion.render_submitted_prompt_echo": render_prompt_echo, + }, + ) + + assert started_prompts[:2] == ["first task", "queued task"] + render_notice.assert_any_call("[QUEUE TRIGGERED] queued task") + render_prompt_echo.assert_any_call("queued task") + queued_notice_idx = render_order.index( + ("notice", "[QUEUE TRIGGERED] queued task") + ) + queued_echo_idx = render_order.index(("echo", "queued task")) + queued_start_idx = render_order.index(("start", "queued task")) + assert queued_notice_idx < queued_echo_idx < queued_start_idx + assert autosave_mock.call_count == 2 + + @pytest.mark.anyio + async def test_hidden_direct_submission_echoes_before_agent_starts(self): + call_count = 0 + launched = asyncio.Event() + render_order = [] + render_prompt_echo = MagicMock( + side_effect=lambda text: render_order.append(("echo", text)) + ) + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("nice work!", echo_in_transcript=True) + await launched.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + render_order.append(("start", prompt)) + launched.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.prompt_toolkit_completion.render_submitted_prompt_echo": render_prompt_echo, + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + render_prompt_echo.assert_any_call("nice work!") + assert render_order.index(("echo", "nice work!")) < render_order.index( + ("start", "nice work!") + ) + + @pytest.mark.anyio + async def test_hidden_direct_submission_echoes_before_command_dispatch(self): + call_count = 0 + render_order = [] + render_prompt_echo = MagicMock( + side_effect=lambda text: render_order.append(("echo", text)) + ) + run_prompt = AsyncMock() + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("/help", echo_in_transcript=True) + return _submission("/exit") + + def fake_handle_command(command): + render_order.append(("command", command)) + if command == "/help": + return True + return False + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": run_prompt, + "code_puppy.command_line.command_handler.handle_command": MagicMock( + side_effect=fake_handle_command + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.prompt_toolkit_completion.render_submitted_prompt_echo": render_prompt_echo, + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + render_prompt_echo.assert_any_call("/help") + assert render_order.index(("echo", "/help")) < render_order.index( + ("command", "/help") + ) + run_prompt.assert_not_called() + + @pytest.mark.anyio + async def test_visible_direct_submission_does_not_duplicate_echo(self): + call_count = 0 + launched = asyncio.Event() + render_prompt_echo = MagicMock() + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("already visible") + await launched.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + launched.set() + result = MagicMock(output=f"done: {args[1]}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.prompt_toolkit_completion.render_submitted_prompt_echo": render_prompt_echo, + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + render_prompt_echo.assert_not_called() + + @pytest.mark.anyio + async def test_queued_prompt_starts_when_run_finishes_during_choice_menu(self): + call_count = 0 + queued_prompt_started = asyncio.Event() + started_prompts = [] + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("first task") + if call_count == 2: + await asyncio.sleep(0.05) + return _submission("queued task", action="queue") + await queued_prompt_started.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + if prompt == "first task": + await asyncio.sleep(0.01) + if prompt == "queued task": + queued_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + assert started_prompts[:2] == ["first task", "queued task"] + + @pytest.mark.anyio + async def test_queued_command_consumes_and_drain_continues(self): + call_count = 0 + ready_for_completion = asyncio.Event() + followup_started = asyncio.Event() + started_prompts = [] + handled_commands = [] + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("first task") + if call_count == 2: + return _submission("/help", action="queue") + if call_count == 3: + ready_for_completion.set() + return _submission("followup task", action="queue") + await followup_started.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + if prompt == "first task": + await ready_for_completion.wait() + await asyncio.sleep(0.05) + if prompt == "followup task": + followup_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + def fake_handle_command(command): + handled_commands.append(command) + if command == "/help": + return True + return False + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.command_line.command_handler.handle_command": MagicMock( + side_effect=fake_handle_command + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + }, + ) + + assert "/help" in handled_commands + assert started_prompts[:2] == ["first task", "followup task"] + + @pytest.mark.anyio + async def test_queued_command_returning_prompt_uses_normal_dispatch(self): + call_count = 0 + second_prompt_seen = asyncio.Event() + transformed_prompt_started = asyncio.Event() + started_prompts = [] + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("first task") + if call_count == 2: + second_prompt_seen.set() + return _submission("/custom", action="queue") + await transformed_prompt_started.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + if prompt == "first task": + await second_prompt_seen.wait() + await asyncio.sleep(0.05) + if prompt == "transformed prompt": + transformed_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.command_line.command_handler.handle_command": MagicMock( + side_effect=lambda command: "transformed prompt" + if command == "/custom" + else False + ), + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), }, ) + assert started_prompts[:2] == ["first task", "transformed prompt"] + @pytest.mark.anyio - async def test_prompt_returns_none_cancelled(self): + async def test_interject_runs_before_queued_prompts(self): call_count = 0 + first_task_started = asyncio.Event() + queued_prompt_started = asyncio.Event() + started_prompts = [] + patches = _interactive_patches() + autosave_mock = patches["code_puppy.config.auto_save_session_if_enabled"] + all_autosaves_done = asyncio.Event() + autosave_mock.side_effect = lambda: ( + all_autosaves_done.set() if autosave_mock.call_count >= 2 else None + ) + render_notice = MagicMock() + render_order = [] + render_prompt_echo = MagicMock( + side_effect=lambda text: render_order.append(("echo", text)) + ) async def fake_input(*a, **kw): nonlocal call_count call_count += 1 - return "write hello" if call_count == 1 else "/exit" + if call_count == 1: + return _submission("first task") + if call_count == 2: + await first_task_started.wait() + return _submission("second queued", action="queue") + if call_count == 3: + return _submission("steer now", action="interject") + await all_autosaves_done.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + render_order.append(("start", prompt)) + if prompt == "first task": + first_task_started.set() + await asyncio.sleep(10) + if prompt == "second queued": + assert autosave_mock.call_count == 1 + queued_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() await _run_interactive( _mock_renderer(), - _interactive_patches(), + patches, fake_input, extra_patches={ - "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( - return_value=(None, MagicMock()) + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) ), + "code_puppy.command_line.prompt_toolkit_completion.render_transcript_notice": MagicMock( + side_effect=lambda text: render_order.append(("notice", text)) + or render_notice(text) + ), + "code_puppy.command_line.prompt_toolkit_completion.render_submitted_prompt_echo": render_prompt_echo, "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( return_value=False ), - "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result("write hello") + "code_puppy.tools.command_runner.get_running_shell_process_count": MagicMock( + return_value=0 ), + "code_puppy.tools.command_runner.kill_all_running_shell_processes": MagicMock(), }, ) + assert started_prompts[0] == "first task" + assert started_prompts[1].startswith("user interjects - steer now") + assert "continue the interrupted task" in started_prompts[1] + assert started_prompts[2] == "second queued" + render_prompt_echo.assert_any_call("steer now") + render_notice.assert_any_call("[QUEUE TRIGGERED] second queued") + queued_notice_idx = render_order.index( + ("notice", "[QUEUE TRIGGERED] second queued") + ) + queued_echo_idx = render_order.index(("echo", "second queued")) + queued_start_idx = render_order.index(("start", "second queued")) + assert queued_notice_idx < queued_echo_idx < queued_start_idx + assert not any( + call.args[0] == "[QUEUE TRIGGERED] steer now" + for call in render_notice.call_args_list + ) + assert autosave_mock.call_count == 2 + @pytest.mark.anyio - async def test_prompt_cancelled_wiggum_active(self): + async def test_full_queue_interject_does_not_cancel_active_run(self): call_count = 0 + first_task_started = asyncio.Event() + queued_prompt_started = asyncio.Event() + first_task_cancelled = asyncio.Event() + started_prompts = [] + warning = MagicMock() async def fake_input(*a, **kw): nonlocal call_count call_count += 1 - return "write hello" if call_count == 1 else "/exit" + if call_count == 1: + return _submission("first task") + if call_count == 2: + await first_task_started.wait() + return _submission("second queued", action="queue") + if call_count == 3: + return _submission("steer now", action="interject") + await queued_prompt_started.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + if prompt == "first task": + first_task_started.set() + try: + await asyncio.sleep(0.2) + except asyncio.CancelledError: + first_task_cancelled.set() + raise + if prompt == "second queued": + queued_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() - mock_stop = MagicMock() await _run_interactive( _mock_renderer(), _interactive_patches(), fake_input, extra_patches={ - "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( - return_value=(None, MagicMock()) + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.cli_runner.get_queue_limit": MagicMock(return_value=1), + "code_puppy.command_line.interactive_runtime.get_queue_limit": MagicMock( + return_value=1 ), + "code_puppy.messaging.emit_warning": warning, "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( - return_value=True + return_value=False ), - "code_puppy.command_line.wiggum_state.stop_wiggum": mock_stop, - "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result("write hello") + "code_puppy.tools.command_runner.get_running_shell_process_count": MagicMock( + return_value=0 ), + "code_puppy.tools.command_runner.kill_all_running_shell_processes": MagicMock(), }, ) - mock_stop.assert_called() + + assert started_prompts == ["first task", "second queued"] + assert not first_task_cancelled.is_set() + assert any( + "Cannot interject right now" in call.args[0] + for call in warning.call_args_list + ) @pytest.mark.anyio - async def test_prompt_exception(self): + async def test_queued_background_command_returning_false_skips_completed_lifecycle( + self, + ): + from code_puppy.command_line.interactive_command import ( + BackgroundInteractiveCommand, + ) + call_count = 0 + first_task_started = asyncio.Event() + background_done = asyncio.Event() + lifecycle = MagicMock() async def fake_input(*a, **kw): nonlocal call_count call_count += 1 - return "write hello" if call_count == 1 else "/exit" + if call_count == 1: + return _submission("first task") + if call_count == 2: + await first_task_started.wait() + return _submission("/claude-code-auth", action="queue") + await background_done.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + if prompt == "first task": + first_task_started.set() + await asyncio.sleep(0.05) + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + def auth_wait(cancel_event): + background_done.set() + return False + + def fake_handle_command(command): + if command == "/claude-code-auth": + return BackgroundInteractiveCommand(run=auth_wait) + return False await _run_interactive( _mock_renderer(), _interactive_patches(), fake_input, extra_patches={ - "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( - side_effect=RuntimeError("agent error") - ), - "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( - return_value=False + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.command_line.command_handler.handle_command": MagicMock( + side_effect=fake_handle_command ), "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result("write hello") + side_effect=lambda text: _mock_parse_result(text) ), - "code_puppy.messaging.queue_console.get_queue_console": MagicMock( - return_value=MagicMock() + "code_puppy.cli_runner.emit_interject_queue_lifecycle": lifecycle, + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False ), }, ) + assert not any( + len(call.args) > 1 and call.args[1] == "completed" + for call in lifecycle.call_args_list + ) + @pytest.mark.anyio - async def test_empty_input_skipped(self): + async def test_exit_while_running_cancels_runtime_task(self): call_count = 0 + cancelled = asyncio.Event() + first_task_started = asyncio.Event() async def fake_input(*a, **kw): nonlocal call_count call_count += 1 - return " " if call_count == 1 else "/exit" + if call_count == 1: + return "do work" + await first_task_started.wait() + return "/exit" + + async def fake_run(*args, **kwargs): + try: + first_task_started.set() + await asyncio.sleep(10) + except asyncio.CancelledError: + cancelled.set() + raise await _run_interactive( _mock_renderer(), _interactive_patches(), fake_input, extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result(" ") + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False ), + "code_puppy.tools.command_runner.get_running_shell_process_count": MagicMock( + return_value=0 + ), + "code_puppy.tools.command_runner.kill_all_running_shell_processes": MagicMock(), }, ) + assert cancelled.is_set() + @pytest.mark.anyio async def test_initial_command_success(self): agent = MagicMock() @@ -1063,6 +2036,203 @@ def fake_wiggum(): }, ) + @pytest.mark.anyio + async def test_wiggum_queued_prompt_waits_until_loop_stops(self): + call_count = 0 + queued_submitted = asyncio.Event() + started_prompts = [] + patches = _interactive_patches() + autosave_mock = patches["code_puppy.config.auto_save_session_if_enabled"] + all_autosaves_done = asyncio.Event() + autosave_mock.side_effect = lambda: ( + all_autosaves_done.set() if autosave_mock.call_count >= 4 else None + ) + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("write hello") + if call_count == 2: + queued_submitted.set() + return _submission("queued task", action="queue") + await all_autosaves_done.wait() + return _submission("/exit") + + def fake_wiggum_active(): + return len(started_prompts) >= 1 and len(started_prompts) < 3 + + async def fake_run(*a, **kw): + prompt = a[1] + started_prompts.append(prompt) + if prompt == "write hello": + await queued_submitted.wait() + await asyncio.sleep(0.05) + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + patches, + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + side_effect=fake_wiggum_active + ), + "code_puppy.command_line.wiggum_state.get_wiggum_prompt": MagicMock( + return_value="repeat" + ), + "code_puppy.command_line.wiggum_state.increment_wiggum_count": MagicMock( + return_value=1 + ), + "code_puppy.command_line.wiggum_state.stop_wiggum": MagicMock(), + }, + ) + + assert started_prompts[:4] == ["write hello", "repeat", "repeat", "queued task"] + assert autosave_mock.call_count == 4 + + @pytest.mark.anyio + async def test_wiggum_queued_busy_slash_text_stays_literal_after_loop_ends(self): + call_count = 0 + queued_submitted = asyncio.Event() + queued_finished = asyncio.Event() + started_prompts = [] + handle_command = MagicMock(return_value=True) + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("write hello") + if call_count == 2: + queued_submitted.set() + return _submission( + "/agent", + action="queue", + allow_command_dispatch=False, + ) + await queued_finished.wait() + return _submission("/exit") + + def fake_wiggum_active(): + return len(started_prompts) >= 1 and len(started_prompts) < 3 + + async def fake_run(*a, **kw): + prompt = a[1] + started_prompts.append(prompt) + if prompt == "write hello": + await queued_submitted.wait() + await asyncio.sleep(0.05) + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + if prompt == "/agent": + queued_finished.set() + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.command_line.command_handler.handle_command": handle_command, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + side_effect=fake_wiggum_active + ), + "code_puppy.command_line.wiggum_state.get_wiggum_prompt": MagicMock( + return_value="repeat" + ), + "code_puppy.command_line.wiggum_state.increment_wiggum_count": MagicMock( + return_value=1 + ), + "code_puppy.command_line.wiggum_state.stop_wiggum": MagicMock(), + }, + ) + + assert started_prompts[:4] == ["write hello", "repeat", "repeat", "/agent"] + handle_command.assert_not_called() + + @pytest.mark.anyio + async def test_wiggum_interject_runs_immediately_and_reloop_resumes(self): + call_count = 0 + repeat_started = asyncio.Event() + rerun_finished = asyncio.Event() + started_prompts = [] + repeat_runs = 0 + stop_wiggum = MagicMock() + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("write hello") + if call_count == 2: + await repeat_started.wait() + return _submission("steer now", action="interject") + await rerun_finished.wait() + return _submission("/exit") + + def fake_wiggum_active(): + return len(started_prompts) >= 1 and len(started_prompts) < 4 + + async def fake_run(*a, **kw): + nonlocal repeat_runs + prompt = a[1] + started_prompts.append(prompt) + if prompt == "repeat": + repeat_runs += 1 + if repeat_runs == 1: + repeat_started.set() + try: + await asyncio.Future() + except asyncio.CancelledError: + return None, MagicMock() + else: + rerun_finished.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + side_effect=fake_wiggum_active + ), + "code_puppy.command_line.wiggum_state.get_wiggum_prompt": MagicMock( + return_value="repeat" + ), + "code_puppy.command_line.wiggum_state.increment_wiggum_count": MagicMock( + return_value=1 + ), + "code_puppy.command_line.wiggum_state.stop_wiggum": stop_wiggum, + "code_puppy.tools.command_runner.get_running_shell_process_count": MagicMock( + return_value=0 + ), + }, + ) + + assert started_prompts[0] == "write hello" + assert started_prompts[1] == "repeat" + assert started_prompts[2].startswith("user interjects - steer now") + assert started_prompts[3] == "repeat" + stop_wiggum.assert_not_called() + @pytest.mark.anyio async def test_wiggum_loop_cancelled(self): call_count = 0 @@ -1127,7 +2297,7 @@ async def fake_input(*a, **kw): def fake_wiggum(): nonlocal wiggum_calls wiggum_calls += 1 - return wiggum_calls <= 1 + return wiggum_calls <= 2 mock_stop = MagicMock() await _run_interactive( @@ -1363,6 +2533,88 @@ async def fake_input(*a, **kw): ) +class TestInteractiveShellSuspension: + @pytest.mark.anyio + async def test_shell_suspension_does_not_poll_old_shell_lock_path(self): + call_count = 0 + shell_count_mock = MagicMock(return_value=0) + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + return "first task" if call_count == 1 else "/exit" + + async def fake_run(*args, **kwargs): + prompt = args[1] + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + "code_puppy.tools.command_runner.get_running_shell_process_count": shell_count_mock, + }, + ) + + assert call_count == 2 + assert shell_count_mock.call_count <= 1 + + @pytest.mark.anyio + async def test_queue_behavior_still_drains_without_shell_lock_loop(self): + call_count = 0 + queued_prompt_started = asyncio.Event() + started_prompts = [] + shell_count_mock = MagicMock(return_value=0) + + async def fake_input(*a, **kw): + nonlocal call_count + call_count += 1 + if call_count == 1: + return _submission("first task") + if call_count == 2: + return _submission("queued after shell", action="queue") + await queued_prompt_started.wait() + return _submission("/exit") + + async def fake_run(*args, **kwargs): + prompt = args[1] + started_prompts.append(prompt) + if prompt == "queued after shell": + queued_prompt_started.set() + result = MagicMock(output=f"done: {prompt}") + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + _mock_renderer(), + _interactive_patches(), + fake_input, + extra_patches={ + "code_puppy.cli_runner.run_prompt_with_attachments": fake_run, + "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( + side_effect=lambda text: _mock_parse_result(text) + ), + "code_puppy.command_line.wiggum_state.is_wiggum_active": MagicMock( + return_value=False + ), + "code_puppy.tools.command_runner.get_running_shell_process_count": shell_count_mock, + }, + ) + + assert started_prompts[:2] == ["first task", "queued after shell"] + assert shell_count_mock.call_count <= 1 + + # --------------------------------------------------------------------------- # main_entry() additional tests # --------------------------------------------------------------------------- @@ -1522,6 +2774,7 @@ async def fake_input(*a, **kw): async def test_wiggum_keyboard_interrupt(self): """Lines 874-876: KeyboardInterrupt in wiggum loop.""" call_count = 0 + mock_warning = MagicMock() async def fake_input(*a, **kw): nonlocal call_count @@ -1563,8 +2816,11 @@ def fake_wiggum(): return_value=1 ), "code_puppy.command_line.wiggum_state.stop_wiggum": MagicMock(), + "code_puppy.messaging.emit_warning": mock_warning, }, ) + warning_messages = [call.args[0] for call in mock_warning.call_args_list] + assert "\nInput cancelled" not in warning_messages # --------------------------------------------------------------------------- @@ -1690,58 +2946,12 @@ def fake_import(name, *args, **kwargs): class TestRemainingEdgeCases: """Cover the hardest-to-reach lines.""" - @pytest.mark.anyio - async def test_cancelled_result_wiggum_stop_message(self): - """Lines 750-751: cancelled result emits wiggum stop warning.""" - call_count = 0 - - async def fake_input(*a, **kw): - nonlocal call_count - call_count += 1 - return "write hello" if call_count == 1 else "/exit" - - agent = MagicMock() - agent.get_user_prompt.return_value = "task:" - - # First call to is_wiggum_active: False (in the result==None block) - # But we need the result to be None AND wiggum to be active - # The code path: result is None -> reset terminal -> check wiggum -> stop + emit - wiggum_calls = 0 - - def fake_wiggum(): - nonlocal wiggum_calls - wiggum_calls += 1 - # Called from the result==None block - if wiggum_calls == 1: - return True # in the cancelled block - return False # after the while loop - - mock_stop = MagicMock() - await _run_interactive( - _mock_renderer(), - _interactive_patches(), - fake_input, - agent=agent, - extra_patches={ - "code_puppy.cli_runner.run_prompt_with_attachments": AsyncMock( - return_value=(None, MagicMock()) - ), - "code_puppy.cli_runner.parse_prompt_attachments": MagicMock( - return_value=_mock_parse_result("write hello") - ), - "code_puppy.command_line.wiggum_state.is_wiggum_active": fake_wiggum, - "code_puppy.command_line.wiggum_state.stop_wiggum": mock_stop, - }, - ) - mock_stop.assert_called() - @pytest.mark.anyio async def test_execute_single_prompt_success_path(self): - """Lines 1005-1015: execute_single_prompt success with .output access.""" + """Lines 1005-1015: execute_single_prompt success with tuple unpack.""" from code_puppy.cli_runner import execute_single_prompt mock_renderer = _mock_renderer() - # response needs .output attribute (not a tuple) mock_response = MagicMock() mock_response.output = "the response" @@ -1751,7 +2961,7 @@ async def test_execute_single_prompt_success_path(self): patch( "code_puppy.cli_runner.run_prompt_with_attachments", new_callable=AsyncMock, - return_value=mock_response, + return_value=(mock_response, MagicMock()), ) ) stack.enter_context(patch("code_puppy.cli_runner.emit_info")) @@ -1798,9 +3008,9 @@ def fake_import(name, *args, **kwargs): _apply_patches(stack, patches) stack.enter_context( patch( - "code_puppy.command_line.prompt_toolkit_completion.get_input_with_combined_completion", + "code_puppy.command_line.prompt_toolkit_completion.prompt_for_submission", new_callable=AsyncMock, - return_value="/exit", + return_value=_submission("/exit"), ) ) stack.enter_context( @@ -1832,11 +3042,15 @@ def fake_import(name, *args, **kwargs): class TestMainEntryAdditional: - @patch("asyncio.run", side_effect=KeyboardInterrupt) - def test_keyboard_interrupt_stderr_output(self, mock_run): + def test_keyboard_interrupt_stderr_output(self): + def fake_asyncio_run(coro): + coro.close() + raise KeyboardInterrupt + from code_puppy.cli_runner import main_entry with ExitStack() as stack: + stack.enter_context(patch("asyncio.run", side_effect=fake_asyncio_run)) stack.enter_context(patch("code_puppy.cli_runner.reset_unix_terminal")) stack.enter_context( patch("code_puppy.cli_runner.get_use_dbos", return_value=False) diff --git a/tests/test_command_overhaul_targeted.py b/tests/test_command_overhaul_targeted.py new file mode 100644 index 000000000..eefc146ee --- /dev/null +++ b/tests/test_command_overhaul_targeted.py @@ -0,0 +1,664 @@ +import asyncio +import importlib +import threading +from contextlib import ExitStack +from types import ModuleType +from unittest.mock import MagicMock, patch + +import pytest + +from code_puppy.command_line.interactive_command import BackgroundInteractiveCommand +from code_puppy.command_line.interactive_runtime import get_active_interactive_runtime +from code_puppy.command_line.prompt_toolkit_completion import PromptSubmission + + +def _renderer(): + renderer = MagicMock() + renderer.console = MagicMock() + renderer.console.file = MagicMock() + renderer.console.file.flush = MagicMock() + return renderer + + +def _submission( + text: str, *, action: str = "submit", allow_command_dispatch: bool = True +): + return PromptSubmission( + action=action, + text=text, + allow_command_dispatch=allow_command_dispatch, + ) + + +async def _run_interactive( + prompt_side_effect, *, run_prompt_side_effect, handle_command +): + agent = MagicMock() + agent.get_user_prompt.return_value = "task:" + fake_agents_pkg = ModuleType("code_puppy.agents") + fake_agent_manager = ModuleType("code_puppy.agents.agent_manager") + fake_agent_manager.get_current_agent = MagicMock(return_value=agent) + fake_agents_pkg.agent_manager = fake_agent_manager + fake_agents_pkg.get_current_agent = fake_agent_manager.get_current_agent + + with ExitStack() as stack: + stack.enter_context( + patch.dict( + "sys.modules", + { + "code_puppy.agents": fake_agents_pkg, + "code_puppy.agents.agent_manager": fake_agent_manager, + "code_puppy.command_line.command_handler": MagicMock( + handle_command=handle_command + ), + }, + ) + ) + cli_runner_module = importlib.import_module("code_puppy.cli_runner") + stack.enter_context( + patch( + "code_puppy.command_line.prompt_toolkit_completion.prompt_for_submission", + side_effect=prompt_side_effect, + ) + ) + stack.enter_context( + patch( + "code_puppy.command_line.prompt_toolkit_completion.get_prompt_with_active_model", + return_value="> ", + ) + ) + stack.enter_context(patch.object(cli_runner_module, "print_truecolor_warning")) + stack.enter_context( + patch.object( + cli_runner_module, + "get_cancel_agent_display_name", + return_value="Ctrl+C", + ) + ) + stack.enter_context( + patch.object(cli_runner_module, "reset_windows_terminal_ansi") + ) + stack.enter_context( + patch.object(cli_runner_module, "reset_windows_terminal_full") + ) + stack.enter_context(patch.object(cli_runner_module, "save_command_to_history")) + stack.enter_context(patch("code_puppy.command_line.motd.print_motd")) + stack.enter_context( + patch( + "code_puppy.command_line.onboarding_wizard.should_show_onboarding", + return_value=False, + ) + ) + stack.enter_context( + patch.object( + cli_runner_module, + "run_prompt_with_attachments", + side_effect=run_prompt_side_effect, + ) + ) + await cli_runner_module.interactive_mode(_renderer()) + + +@pytest.mark.anyio +async def test_busy_slash_text_queues_as_literal_prompt(): + release_first = asyncio.Event() + queued_started = asyncio.Event() + started_prompts: list[str] = [] + handle_command = MagicMock(return_value=True) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("first task") + if prompt_side_effect.calls == 2: + release_first.set() + return _submission( + "/model", + action="queue", + allow_command_dispatch=False, + ) + await queued_started.wait() + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + started_prompts.append(prompt) + if prompt == "first task": + await release_first.wait() + if prompt == "/model": + queued_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert started_prompts[:2] == ["first task", "/model"] + handle_command.assert_not_called() + + +@pytest.mark.anyio +async def test_hooks_list_dispatches_as_idle_command(): + handle_command = MagicMock(return_value=True) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("/hooks list") + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + raise AssertionError(f"unexpected agent run for {prompt}") + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + handle_command.assert_called_once_with("/hooks list") + + +def test_chatgpt_auth_returns_background_interactive_command(): + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + _handle_custom_command, + start_chatgpt_oauth_setup, + ) + + result = _handle_custom_command("/chatgpt-auth", "chatgpt-auth") + + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_chatgpt_oauth_setup + + +def test_chatgpt_auth_switches_model_only_on_success(): + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + start_chatgpt_oauth_setup, + ) + + cancel_event = threading.Event() + + with ( + patch( + "code_puppy.plugins.chatgpt_oauth.register_callbacks.run_oauth_flow", + return_value=True, + ) as mock_flow, + patch( + "code_puppy.plugins.chatgpt_oauth.register_callbacks.set_model_and_reload_agent" + ) as mock_set_model, + ): + assert start_chatgpt_oauth_setup(cancel_event) is True + + mock_flow.assert_called_once_with(cancel_event=cancel_event) + mock_set_model.assert_called_once_with("chatgpt-gpt-5.3-codex") + + +def test_chatgpt_auth_cancel_does_not_switch_model(): + from code_puppy.plugins.chatgpt_oauth.register_callbacks import ( + start_chatgpt_oauth_setup, + ) + + cancel_event = threading.Event() + cancel_event.set() + + with ( + patch( + "code_puppy.plugins.chatgpt_oauth.register_callbacks.run_oauth_flow", + return_value=False, + ), + patch( + "code_puppy.plugins.chatgpt_oauth.register_callbacks.set_model_and_reload_agent" + ) as mock_set_model, + ): + assert start_chatgpt_oauth_setup(cancel_event) is False + + mock_set_model.assert_not_called() + + +def test_claude_auth_returns_background_interactive_command(): + from code_puppy.plugins.claude_code_oauth.register_callbacks import ( + _handle_custom_command, + start_claude_code_oauth_setup, + ) + + result = _handle_custom_command("/claude-code-auth", "claude-code-auth") + + assert isinstance(result, BackgroundInteractiveCommand) + assert result.run is start_claude_code_oauth_setup + + +def test_antigravity_add_returns_background_interactive_command(): + from code_puppy.plugins.antigravity_oauth.register_callbacks import ( + _handle_custom_command, + ) + + with patch( + "code_puppy.plugins.antigravity_oauth.register_callbacks.AccountManager.load_from_disk", + return_value=MagicMock(account_count=1), + ): + result = _handle_custom_command("/antigravity-add", "antigravity-add") + + assert isinstance(result, BackgroundInteractiveCommand) + + +@pytest.mark.anyio +async def test_interject_during_background_command_cancels_cleanly(): + wait_started = asyncio.Event() + interject_started = asyncio.Event() + loop = asyncio.get_running_loop() + started_prompts: list[str] = [] + + def auth_wait(cancel_event: threading.Event) -> None: + loop.call_soon_threadsafe(wait_started.set) + cancel_event.wait(timeout=5) + + handle_command = MagicMock( + side_effect=lambda command: ( + BackgroundInteractiveCommand(run=auth_wait) + if command == "/claude-code-auth" + else True + ) + ) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("/claude-code-auth") + if prompt_side_effect.calls == 2: + await wait_started.wait() + return _submission("please continue", action="interject") + await interject_started.wait() + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + started_prompts.append(prompt) + if prompt.startswith("user interjects - please continue"): + interject_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert len(started_prompts) == 1 + assert started_prompts[0].startswith("user interjects - please continue - ") + assert "continue the interrupted task" in started_prompts[0] + handle_command.assert_called_once_with("/claude-code-auth") + + +@pytest.mark.anyio +async def test_queue_during_background_command_drains_after_wait_completes(): + wait_started = asyncio.Event() + queued_started = asyncio.Event() + release_wait = threading.Event() + loop = asyncio.get_running_loop() + started_prompts: list[str] = [] + + def auth_wait(cancel_event: threading.Event) -> None: + loop.call_soon_threadsafe(wait_started.set) + while not cancel_event.is_set(): + if release_wait.wait(timeout=0.05): + return + + handle_command = MagicMock( + side_effect=lambda command: ( + BackgroundInteractiveCommand(run=auth_wait) + if command == "/claude-code-auth" + else True + ) + ) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("/claude-code-auth") + if prompt_side_effect.calls == 2: + await wait_started.wait() + release_wait.set() + return _submission("report later", action="queue") + await queued_started.wait() + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + started_prompts.append(prompt) + if prompt == "report later": + queued_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert started_prompts == ["report later"] + handle_command.assert_called_once_with("/claude-code-auth") + + +@pytest.mark.anyio +@pytest.mark.parametrize("cancel_reason", ["ctrl_c", "ctrl+k"]) +async def test_manual_cancel_pauses_queued_prompts_until_user_acts(cancel_reason: str): + first_cancelled = asyncio.Event() + queued_started = asyncio.Event() + started_prompts: list[str] = [] + handle_command = MagicMock(return_value=True) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("first task") + if prompt_side_effect.calls == 2: + return _submission("queued task", action="queue") + if prompt_side_effect.calls == 3: + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.request_active_cancel(cancel_reason) is True + return _submission("") + + await first_cancelled.wait() + await asyncio.sleep(0.05) + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.is_queue_autodrain_suppressed() is True + assert [item.text for item in runtime.queue] == ["queued task"] + assert queued_started.is_set() is False + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + started_prompts.append(prompt) + if prompt == "first task": + try: + await asyncio.Future() + except asyncio.CancelledError: + first_cancelled.set() + raise + if prompt == "queued task": + queued_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert started_prompts == ["first task"] + + +@pytest.mark.anyio +async def test_manual_cancel_queue_pause_clears_after_new_submission(): + first_cancelled = asyncio.Event() + queued_started = asyncio.Event() + started_prompts: list[str] = [] + handle_command = MagicMock(return_value=True) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("first task") + if prompt_side_effect.calls == 2: + return _submission("queued task", action="queue") + if prompt_side_effect.calls == 3: + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.request_active_cancel("ctrl_c") is True + return _submission("") + if prompt_side_effect.calls == 4: + await first_cancelled.wait() + await asyncio.sleep(0.05) + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.is_queue_autodrain_suppressed() is True + assert [item.text for item in runtime.queue] == ["queued task"] + return _submission("resume task") + + await queued_started.wait() + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + started_prompts.append(prompt) + if prompt == "first task": + try: + await asyncio.Future() + except asyncio.CancelledError: + first_cancelled.set() + raise + if prompt == "queued task": + queued_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert started_prompts[:3] == ["first task", "resume task", "queued task"] + + +@pytest.mark.anyio +async def test_wiggum_manual_cancel_does_not_emit_followup_input_cancelled(): + run_started = asyncio.Event() + run_cancelled = asyncio.Event() + handle_command = MagicMock(return_value=True) + warning_messages: list[str] = [] + wiggum_active = {"value": False} + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("first task") + if prompt_side_effect.calls == 2: + await run_started.wait() + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.request_active_cancel("ctrl_c") is True + return _submission("") + if prompt_side_effect.calls == 3: + await run_cancelled.wait() + raise KeyboardInterrupt + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + wiggum_active["value"] = True + run_started.set() + try: + await asyncio.Future() + except asyncio.CancelledError: + run_cancelled.set() + raise + + def fake_is_wiggum_active(): + return wiggum_active["value"] + + def fake_stop_wiggum(): + wiggum_active["value"] = False + + with ( + patch( + "code_puppy.command_line.wiggum_state.is_wiggum_active", + side_effect=fake_is_wiggum_active, + ), + patch( + "code_puppy.command_line.wiggum_state.stop_wiggum", + side_effect=fake_stop_wiggum, + ), + patch( + "code_puppy.messaging.emit_warning", + side_effect=warning_messages.append, + ), + ): + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert any("šŸ© Wiggum loop stopped" in message for message in warning_messages) + assert "\nInput cancelled" not in warning_messages + + +@pytest.mark.anyio +async def test_wiggum_manual_cancel_keeps_queued_prompts_paused(): + run_started = asyncio.Event() + run_cancelled = asyncio.Event() + queued_started = asyncio.Event() + handle_command = MagicMock(return_value=True) + wiggum_active = {"value": False} + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("first task") + if prompt_side_effect.calls == 2: + return _submission("queued task", action="queue") + if prompt_side_effect.calls == 3: + await run_started.wait() + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.request_active_cancel("ctrl_c") is True + return _submission("") + if prompt_side_effect.calls == 4: + await run_cancelled.wait() + raise KeyboardInterrupt + + await asyncio.sleep(0.05) + runtime = get_active_interactive_runtime() + assert runtime is not None + assert runtime.is_queue_autodrain_suppressed() is True + assert [item.text for item in runtime.queue] == ["queued task"] + assert queued_started.is_set() is False + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + if prompt == "first task": + wiggum_active["value"] = True + run_started.set() + try: + await asyncio.Future() + except asyncio.CancelledError: + run_cancelled.set() + raise + if prompt == "queued task": + queued_started.set() + result = MagicMock() + result.output = f"response for {prompt}" + result.all_messages.return_value = [] + return result, MagicMock() + + def fake_is_wiggum_active(): + return wiggum_active["value"] + + def fake_stop_wiggum(): + wiggum_active["value"] = False + + with ( + patch( + "code_puppy.command_line.wiggum_state.is_wiggum_active", + side_effect=fake_is_wiggum_active, + ), + patch( + "code_puppy.command_line.wiggum_state.stop_wiggum", + side_effect=fake_stop_wiggum, + ), + ): + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + +@pytest.mark.anyio +async def test_background_command_wait_does_not_autosave(): + wait_started = asyncio.Event() + cancel_seen = threading.Event() + loop = asyncio.get_running_loop() + handle_command = MagicMock() + + def auth_wait(cancel_event: threading.Event) -> None: + loop.call_soon_threadsafe(wait_started.set) + cancel_event.wait(timeout=5) + if cancel_event.is_set(): + cancel_seen.set() + + handle_command.side_effect = lambda command: ( + BackgroundInteractiveCommand(run=auth_wait) + if command == "/claude-code-auth" + else True + ) + + async def prompt_side_effect(*_args, **_kwargs): + prompt_side_effect.calls += 1 + if prompt_side_effect.calls == 1: + return _submission("/claude-code-auth") + await wait_started.wait() + return _submission("/exit") + + prompt_side_effect.calls = 0 + + async def run_prompt_side_effect(_agent, prompt, **_kwargs): + raise AssertionError(f"unexpected agent run for {prompt}") + + with patch("code_puppy.config.auto_save_session_if_enabled") as mock_autosave: + await _run_interactive( + prompt_side_effect, + run_prompt_side_effect=run_prompt_side_effect, + handle_command=handle_command, + ) + + assert cancel_seen.is_set() + mock_autosave.assert_not_called() + handle_command.assert_called_once_with("/claude-code-auth") + + +@pytest.mark.anyio +async def test_mark_idle_if_task_is_idempotent_for_finished_background_work(): + from code_puppy.command_line.interactive_runtime import PromptRuntimeState + + runtime = PromptRuntimeState() + + async def noop() -> None: + return + + task = asyncio.create_task(noop()) + runtime.mark_running(task, kind="interactive_command") + await task + + assert runtime.mark_idle_if_task(task) is True + assert runtime.mark_idle_if_task(task) is False + assert runtime.bg_task is None diff --git a/tests/test_config.py b/tests/test_config.py index 58c97fcc3..60d12b055 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -327,6 +327,7 @@ def test_get_config_keys_with_existing_keys( "openai_reasoning_summary", "openai_verbosity", "protected_token_count", + "queue_limit", "resume_message_count", "temperature", "yolo_mode", @@ -385,6 +386,7 @@ def test_get_config_keys_empty_config( "openai_reasoning_summary", "openai_verbosity", "protected_token_count", + "queue_limit", "resume_message_count", "temperature", "yolo_mode", diff --git a/tests/test_config_and_storage_edge_cases.py b/tests/test_config_and_storage_edge_cases.py index 674da22e4..e276c6aad 100644 --- a/tests/test_config_and_storage_edge_cases.py +++ b/tests/test_config_and_storage_edge_cases.py @@ -270,6 +270,7 @@ def test_get_config_keys_includes_defaults(self, mock_config_paths): "auto_save_session", "enable_dbos", "cancel_agent_key", + "queue_limit", ] for key in expected_keys: assert key in result, f"Expected key '{key}' not in config keys" diff --git a/tests/test_config_full_coverage.py b/tests/test_config_full_coverage.py index 1da133abe..d652e9eec 100644 --- a/tests/test_config_full_coverage.py +++ b/tests/test_config_full_coverage.py @@ -230,6 +230,22 @@ def test_get_message_limit_custom_default(self): cp_config.reset_value("message_limit") assert cp_config.get_message_limit(default=50) == 50 + def test_get_queue_limit_default(self): + cp_config.reset_value("queue_limit") + assert cp_config.get_queue_limit() == 25 + + def test_get_queue_limit_custom(self): + cp_config.set_config_value("queue_limit", "7") + assert cp_config.get_queue_limit() == 7 + + def test_get_queue_limit_invalid(self): + cp_config.set_config_value("queue_limit", "bad") + assert cp_config.get_queue_limit() == 25 + + def test_get_queue_limit_clamped_low(self): + cp_config.set_config_value("queue_limit", "0") + assert cp_config.get_queue_limit() == 1 + def test_get_diff_context_lines_default(self): cp_config.reset_value("diff_context_lines") assert cp_config.get_diff_context_lines() == 6 @@ -730,6 +746,7 @@ def test_get_config_keys_returns_sorted_list(self): assert "enable_dbos" in keys assert "enable_streaming" in keys assert "cancel_agent_key" in keys + assert "queue_limit" in keys assert "resume_message_count" in keys diff --git a/tests/test_console_spinner_coverage.py b/tests/test_console_spinner_coverage.py index 5c470675e..e8533734f 100644 --- a/tests/test_console_spinner_coverage.py +++ b/tests/test_console_spinner_coverage.py @@ -186,6 +186,24 @@ def test_start_does_not_create_thread_if_already_running(self): # Should not create a new thread mock_thread_class.assert_not_called() + def test_start_skips_live_display_when_terminal_updates_are_unsafe(self): + from code_puppy.messaging.spinner.console_spinner import ConsoleSpinner + + mock_console = MagicMock(spec=Console) + spinner = ConsoleSpinner(console=mock_console) + + with ( + patch( + "code_puppy.messaging.spinner.console_spinner.supports_live_terminal_updates", + return_value=False, + ), + patch("code_puppy.messaging.spinner.console_spinner.Live") as mock_live_cls, + ): + spinner.start() + + mock_live_cls.assert_not_called() + mock_console.print.assert_not_called() + class TestConsoleSpinnerStop: """Tests for ConsoleSpinner.stop() method.""" @@ -612,13 +630,12 @@ def test_pause_clears_line(self): mock_live = MagicMock() spinner._live = mock_live - mock_stdout = MagicMock() - - with patch.object(sys, "stdout", mock_stdout): + with patch( + "code_puppy.messaging.spinner.console_spinner.clear_live_terminal_line" + ) as mock_clear_line: spinner.pause() - # Should write cursor/line clear codes - mock_stdout.write.assert_called() + mock_clear_line.assert_called_once() def test_pause_does_nothing_when_not_spinning(self): """Test that pause does nothing when not spinning.""" diff --git a/tests/test_prompt_toolkit_completion.py b/tests/test_prompt_toolkit_completion.py index c9a1553d1..ef7eb194d 100644 --- a/tests/test_prompt_toolkit_completion.py +++ b/tests/test_prompt_toolkit_completion.py @@ -1,587 +1,914 @@ +import asyncio +import contextlib import os -import sys +import threading from pathlib import Path from unittest.mock import AsyncMock, MagicMock, patch import pytest from prompt_toolkit.buffer import Buffer +from prompt_toolkit.completion import ConditionalCompleter from prompt_toolkit.document import Document -from prompt_toolkit.formatted_text import FormattedText from prompt_toolkit.keys import Keys from prompt_toolkit.layout.controls import BufferControl from prompt_toolkit.layout.processors import TransformationInput +from code_puppy.command_line.interactive_runtime import ( + PromptRuntimeState, + clear_active_interactive_runtime, + register_active_interactive_runtime, +) from code_puppy.command_line.prompt_toolkit_completion import ( AttachmentPlaceholderProcessor, - CDCompleter, - FilePathCompleter, - SetCompleter, + PromptSubmission, + clear_active_prompt_surface, + get_active_prompt_surface_kind, get_input_with_combined_completion, + get_prompt_with_active_model, + has_active_prompt_surface, + is_shell_prompt_suspended, + prompt_for_submission, + register_active_prompt_surface, + render_submitted_prompt_echo, + render_transcript_notice, + set_shell_prompt_suspended, ) -# Skip some path-format sensitive tests on Windows where backslashes are expected -IS_WINDOWS = os.name == "nt" or sys.platform.startswith("win") +@pytest.fixture +def active_runtime(): + runtime = PromptRuntimeState() + register_active_interactive_runtime(runtime) + yield runtime + clear_active_interactive_runtime(runtime) -def setup_files(tmp_path): - d = tmp_path / "dir" - d.mkdir() - (d / "file1.txt").write_text("content1") - (d / "file2.py").write_text("content2") - (tmp_path / "file3.txt").write_text("hi") - (tmp_path / ".hiddenfile").write_text("sneaky") - return d +@patch("code_puppy.command_line.prompt_toolkit_completion.print_formatted_text") +@patch("prompt_toolkit.output.defaults.create_output") +def test_render_submitted_prompt_echo(mock_create_output, mock_print_formatted_text): + mock_output = MagicMock() + mock_create_output.return_value = mock_output -def test_no_symbol(tmp_path): - completer = FilePathCompleter(symbol="@") - doc = Document(text="no_completion_here", cursor_position=7) - completions = list(completer.get_completions(doc, None)) - assert completions == [] + render_submitted_prompt_echo("queued task") + mock_create_output.assert_called_once() + mock_print_formatted_text.assert_called_once() + rendered = mock_print_formatted_text.call_args.args[0] + assert any("queued task" in text for _style, text in rendered) -def test_completion_basic(tmp_path, monkeypatch): - setup_files(tmp_path) - cwd = os.getcwd() - os.chdir(tmp_path) - try: - completer = FilePathCompleter(symbol="@") - doc = Document(text="run @fi", cursor_position=7) - completions = list(completer.get_completions(doc, None)) - # Should see file3.txt from the base dir, but NOT .hiddenfile - values = {c.text for c in completions} - assert any("file3.txt" in v for v in values) - assert not any(".hiddenfile" in v for v in values) - finally: - os.chdir(cwd) +@patch("code_puppy.command_line.prompt_toolkit_completion.print_formatted_text") +@patch("prompt_toolkit.output.defaults.create_output") +def test_render_submitted_prompt_echo_uses_prompt_app_when_available( + mock_create_output, mock_print_formatted_text, active_runtime +): + session = MagicMock() + session.app = MagicMock() + active_runtime.register_prompt_surface(session) + active_runtime.run_above_prompt = MagicMock(return_value=True) -def test_completion_directory_listing(tmp_path): - d = setup_files(tmp_path) - completer = FilePathCompleter(symbol="@") - # Set cwd so dir lookup matches. Fix cursor position off by one. - cwd = os.getcwd() - os.chdir(tmp_path) - try: - text = f"test @{d.name}/" - doc = Document(text=text, cursor_position=len(text)) - completions = list(completer.get_completions(doc, None)) - # In modern prompt_toolkit, display is a FormattedText: a list of (style, text) tuples - filenames = { - c.display[0][1] if hasattr(c.display, "__getitem__") else str(c.display) - for c in completions - } - assert "file1.txt" in filenames - assert "file2.py" in filenames - finally: - os.chdir(cwd) + render_submitted_prompt_echo("queued task") + active_runtime.run_above_prompt.assert_called_once() + session.app.print_text.assert_not_called() + mock_create_output.assert_not_called() + mock_print_formatted_text.assert_not_called() -def test_completion_symbol_in_middle(tmp_path): - setup_files(tmp_path) - completer = FilePathCompleter(symbol="@") - cwd = os.getcwd() - os.chdir(tmp_path) - try: - doc = Document(text="echo @fi then something", cursor_position=7) - completions = list(completer.get_completions(doc, None)) - assert any("file3.txt" in c.text for c in completions) - finally: - os.chdir(cwd) +def test_runtime_request_queue_respects_configured_queue_limit(active_runtime): + with patch( + "code_puppy.command_line.interactive_runtime.get_queue_limit", return_value=2 + ): + ok, position, item = active_runtime.request_queue("first") + assert ok is True + assert position == 1 + assert item is not None -def test_completion_with_hidden_file(tmp_path): - # Should show hidden files if user types starting with . - setup_files(tmp_path) - completer = FilePathCompleter(symbol="@") - cwd = os.getcwd() - os.chdir(tmp_path) - try: - doc = Document(text="@.", cursor_position=2) - completions = list(completer.get_completions(doc, None)) - assert any(".hiddenfile" in c.text for c in completions) - finally: - os.chdir(cwd) + ok, position, item = active_runtime.request_queue("second") + assert ok is True + assert position == 2 + assert item is not None + ok, position, item = active_runtime.request_queue("third") + assert ok is False + assert position == 2 + assert item is None -def test_completion_handles_permissionerror(monkeypatch): - # Patch os.listdir to explode! - completer = FilePathCompleter(symbol="@") - def explode(path): - raise PermissionError +def test_runtime_request_interject_respects_configured_queue_limit(active_runtime): + with patch( + "code_puppy.command_line.interactive_runtime.get_queue_limit", return_value=1 + ): + ok, position, item = active_runtime.request_interject("now") + assert ok is True + assert position == 1 + assert item is not None - monkeypatch.setattr(os, "listdir", explode) - doc = Document(text="@", cursor_position=1) - # Should not raise: - list(completer.get_completions(doc, None)) + ok, position, item = active_runtime.request_interject("later") + assert ok is False + assert position == 1 + assert item is None -def test_set_completer_on_non_trigger(): - completer = SetCompleter() - doc = Document(text="not_a_set_command") - assert list(completer.get_completions(doc, None)) == [] +@patch("code_puppy.command_line.prompt_toolkit_completion.print_formatted_text") +@patch("prompt_toolkit.output.defaults.create_output") +def test_render_transcript_notice(mock_create_output, mock_print_formatted_text): + mock_output = MagicMock() + mock_create_output.return_value = mock_output + render_transcript_notice("[QUEUE TRIGGERED] queued task") -def test_set_completer_exact_trigger(monkeypatch): - completer = SetCompleter() - doc = Document(text="/set", cursor_position=len("/set")) - completions = list(completer.get_completions(doc, None)) - assert len(completions) == 1 - assert completions[0].text == "/set " # Check the actual text to be inserted - # display_meta can be FormattedText, so access its content - assert completions[0].display_meta[0][1] == "set config key" + mock_create_output.assert_called_once() + mock_print_formatted_text.assert_called_once() + rendered = mock_print_formatted_text.call_args.args[0] + assert any("[QUEUE TRIGGERED] queued task" in text for _style, text in rendered) -def test_set_completer_on_set_trigger(monkeypatch): - # Simulate config keys - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_config_keys", - lambda: ["foo", "bar"], - ) - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_value", - lambda key: "woo" if key == "foo" else None, - ) - completer = SetCompleter() - doc = Document(text="/set ", cursor_position=len("/set ")) - completions = list(completer.get_completions(doc, None)) - completion_texts = sorted([c.text for c in completions]) - completion_metas = sorted( - [c.display_meta for c in completions] - ) # Corrected display_meta access - - # The completer now provides 'key = value' as text, not '/set key = value' - assert completion_texts == sorted(["bar = ", "foo = woo"]) - # Display meta should be empty now - assert len(completion_metas) == 2 - for meta in completion_metas: - assert isinstance(meta, FormattedText) - assert len(meta) == 1 - assert meta[0][1] == "" - - -def test_set_completer_partial_key(monkeypatch): - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_config_keys", - lambda: ["long_key_name", "other_key", "model"], - ) - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_value", - lambda key: "value_for_" + key if key == "long_key_name" else None, - ) - completer = SetCompleter() - - doc = Document(text="/set long_k", cursor_position=len("/set long_k")) - completions = list(completer.get_completions(doc, None)) - assert len(completions) == 1 - # `text` for partial key completion should be the key itself and its value part - assert completions[0].text == "long_key_name = value_for_long_key_name" - # Display meta should be empty now - assert isinstance(completions[0].display_meta, FormattedText) - assert len(completions[0].display_meta) == 1 - assert completions[0].display_meta[0][1] == "" - - doc = Document(text="/set oth", cursor_position=len("/set oth")) - completions = list(completer.get_completions(doc, None)) - assert len(completions) == 1 - assert completions[0].text == "other_key = " - # Display meta should be empty now - assert isinstance(completions[0].display_meta, FormattedText) - assert len(completions[0].display_meta) == 1 - assert completions[0].display_meta[0][1] == "" - - -def test_set_completer_excludes_model_key(monkeypatch): - # Ensure 'model' is a config key but SetCompleter doesn't offer it - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_config_keys", - lambda: ["api_key", "model", "temperature"], - ) - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_value", - lambda key: "test_value", - ) - completer = SetCompleter() +@patch("code_puppy.command_line.prompt_toolkit_completion.print_formatted_text") +@patch("prompt_toolkit.output.defaults.create_output") +def test_render_transcript_notice_uses_prompt_app_when_available( + mock_create_output, mock_print_formatted_text, active_runtime +): + session = MagicMock() + session.app = MagicMock() + active_runtime.register_prompt_surface(session) + active_runtime.run_above_prompt = MagicMock(return_value=True) - # Test with full "model" typed - doc = Document(text="/set model", cursor_position=len("/set model")) - completions = list(completer.get_completions(doc, None)) - assert completions == [], ( - "SetCompleter should not complete for 'model' key directly" - ) + render_transcript_notice("[QUEUE TRIGGERED] queued task") - # Test with partial "mo" that would match "model" - doc = Document(text="/set mo", cursor_position=len("/set mo")) - completions = list(completer.get_completions(doc, None)) - assert completions == [], ( - "SetCompleter should not complete for 'model' key even partially" - ) + active_runtime.run_above_prompt.assert_called_once() + session.app.print_text.assert_not_called() + mock_create_output.assert_not_called() + mock_print_formatted_text.assert_not_called() + + +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.patch_stdout") +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +@patch("code_puppy.command_line.prompt_toolkit_completion.merge_completers") +async def test_prompt_for_submission_allows_at_completion_while_busy_but_blocks_it_in_chooser( + mock_merge_completers, + mock_prompt_session_cls, + mock_patch_stdout, + active_runtime, +): + active_runtime.running = True + mock_session_instance = MagicMock() + mock_session_instance.prompt_async = AsyncMock(return_value="test input") + mock_prompt_session_cls.return_value = mock_session_instance + mock_merge_completers.return_value = MagicMock() + mock_patch_stdout.return_value.__enter__ = MagicMock() + mock_patch_stdout.return_value.__exit__ = MagicMock(return_value=False) - # Ensure other keys are still completed - doc = Document(text="/set api", cursor_position=len("/set api")) - completions = list(completer.get_completions(doc, None)) - assert len(completions) == 1 - assert completions[0].text == "api_key = test_value" + await prompt_for_submission() + attachment_completer = mock_merge_completers.call_args.args[0][0] + assert isinstance(attachment_completer, ConditionalCompleter) + assert attachment_completer.filter() is True -def test_set_completer_excludes_puppy_token(monkeypatch): - # Ensure 'puppy_token' is a config key but SetCompleter doesn't offer it - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_config_keys", - lambda: ["puppy_token", "user_name", "temp_dir"], - ) - monkeypatch.setattr( - "code_puppy.command_line.prompt_toolkit_completion.get_value", - lambda key: "sensitive_token_value" if key == "puppy_token" else "normal_value", - ) - completer = SetCompleter() + active_runtime.set_pending_submission("queued task") + assert attachment_completer.filter() is False - # Test with full "puppy_token" typed - doc = Document(text="/set puppy_token", cursor_position=len("/set puppy_token")) - completions = list(completer.get_completions(doc, None)) - assert completions == [], ( - "SetCompleter should not complete for 'puppy_token' key directly" - ) - # Test with partial "puppy" that would match "puppy_token" - doc = Document(text="/set puppy", cursor_position=len("/set puppy")) - completions = list(completer.get_completions(doc, None)) - assert completions == [], ( - "SetCompleter should not complete for 'puppy_token' key even partially" +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +@patch("code_puppy.command_line.prompt_toolkit_completion._interrupt_shell_from_prompt") +async def test_get_input_key_binding_ctrl_c_shell_interrupt_suppresses_queue_autodrain( + mock_interrupt_shell, mock_prompt_session_cls, active_runtime +): + mock_session_instance = MagicMock() + mock_session_instance.prompt_async = AsyncMock(return_value="test") + mock_prompt_session_cls.return_value = mock_session_instance + + await get_input_with_combined_completion() + + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + ctrl_c_binding = next( + binding_obj for binding_obj in bindings.bindings if binding_obj.keys == ("c-c",) ) - # Ensure other keys are still completed - doc = Document(text="/set user", cursor_position=len("/set user")) - completions = list(completer.get_completions(doc, None)) - assert len(completions) == 1 - assert completions[0].text == "user_name = normal_value" + active_runtime.notify_shell_started() + active_runtime.request_queue("queued task") + active_runtime.set_pending_submission("draft") + buffer = Buffer(document=Document(text="chooser text", cursor_position=11)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer -def test_set_completer_no_match(monkeypatch): - monkeypatch.setattr("code_puppy.config.get_config_keys", lambda: ["actual_key"]) - completer = SetCompleter() - doc = Document(text="/set non_existent", cursor_position=len("/set non_existent")) - completions = list(completer.get_completions(doc, None)) - assert completions == [] + ctrl_c_binding.handler(mock_event) + mock_interrupt_shell.assert_called_once_with("Ctrl-C") + assert active_runtime.is_queue_autodrain_suppressed() is True + assert active_runtime.has_pending_submission() is False + assert buffer.text == "" + mock_event.app.exit.assert_not_called() -def test_cd_completer_on_non_trigger(): - completer = CDCompleter() - doc = Document(text="something_else") - assert list(completer.get_completions(doc, None)) == [] +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +@patch("code_puppy.command_line.prompt_toolkit_completion._interrupt_shell_from_prompt") +async def test_get_input_key_binding_configured_cancel_shell_interrupt_suppresses_queue_autodrain( + mock_interrupt_shell, mock_prompt_session_cls, active_runtime +): + mock_session_instance = MagicMock() + mock_session_instance.prompt_async = AsyncMock(return_value="test") + mock_prompt_session_cls.return_value = mock_session_instance -@pytest.fixture -def setup_cd_test_dirs(tmp_path): - # Current working directory structure - (tmp_path / "dir1").mkdir() - (tmp_path / "dir2_long_name").mkdir() - (tmp_path / "another_dir").mkdir() - (tmp_path / "file_not_dir.txt").write_text("hello") - - # Home directory structure for testing '~' expansion - mock_home_path = tmp_path / "mock_home" / "user" - mock_home_path.mkdir(parents=True, exist_ok=True) - (mock_home_path / "Documents").mkdir() - (mock_home_path / "Downloads").mkdir() - (mock_home_path / "Desktop").mkdir() - return tmp_path, mock_home_path - - -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_initial_trigger(setup_cd_test_dirs, monkeypatch): - tmp_path, _ = setup_cd_test_dirs - monkeypatch.chdir(tmp_path) - completer = CDCompleter() - doc = Document(text="/cd ", cursor_position=len("/cd ")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - displays = sorted( - [ - "".join(item[1] for item in c.display) - if isinstance(c.display, list) - else str(c.display) - for c in completions - ] - ) - - # mock_home is also created at the root of tmp_path by the fixture - assert texts == sorted(["another_dir/", "dir1/", "dir2_long_name/", "mock_home/"]) - assert displays == sorted( - ["another_dir/", "dir1/", "dir2_long_name/", "mock_home/"] - ) - assert not any("file_not_dir.txt" in t for t in texts) - - -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_partial_name(setup_cd_test_dirs, monkeypatch): - tmp_path, _ = setup_cd_test_dirs - monkeypatch.chdir(tmp_path) - completer = CDCompleter() - doc = Document(text="/cd di", cursor_position=len("/cd di")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - assert texts == sorted(["dir1/", "dir2_long_name/"]) - assert "another_dir/" not in texts - - -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_sub_directory(setup_cd_test_dirs, monkeypatch): - tmp_path, _ = setup_cd_test_dirs - # Create a subdirectory with content - sub_dir = tmp_path / "dir1" / "sub1" - sub_dir.mkdir(parents=True) - (tmp_path / "dir1" / "sub2_another").mkdir() - - monkeypatch.chdir(tmp_path) - completer = CDCompleter() - doc = Document(text="/cd dir1/", cursor_position=len("/cd dir1/")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - # Completions should be relative to the 'base' typed in the command, which is 'dir1/' - # So, the 'text' part of completion should be 'dir1/sub1/' and 'dir1/sub2_another/' - assert texts == sorted(["dir1/sub1/", "dir1/sub2_another/"]) - displays = sorted(["".join(item[1] for item in c.display) for c in completions]) - assert displays == sorted(["sub1/", "sub2_another/"]) - - -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_partial_sub_directory(setup_cd_test_dirs, monkeypatch): - tmp_path, _ = setup_cd_test_dirs - sub_dir = tmp_path / "dir1" / "sub_alpha" - sub_dir.mkdir(parents=True) - (tmp_path / "dir1" / "sub_beta").mkdir() - - monkeypatch.chdir(tmp_path) - completer = CDCompleter() - doc = Document(text="/cd dir1/sub_a", cursor_position=len("/cd dir1/sub_a")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - assert texts == ["dir1/sub_alpha/"] - displays = sorted(["".join(item[1] for item in c.display) for c in completions]) - assert displays == ["sub_alpha/"] - - -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_home_directory_expansion(setup_cd_test_dirs, monkeypatch): - _, mock_home_path = setup_cd_test_dirs - monkeypatch.setattr( - os.path, "expanduser", lambda p: p.replace("~", str(mock_home_path)) + with patch( + "code_puppy.command_line.prompt_toolkit_completion.get_value", + side_effect=lambda key, default=None: "ctrl+k" + if key == "cancel_agent_key" + else default, + ): + await get_input_with_combined_completion() + + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + ctrl_k_binding = next( + binding_obj for binding_obj in bindings.bindings if binding_obj.keys == ("c-k",) ) - # We don't chdir here, as ~ expansion should work irrespective of cwd - completer = CDCompleter() - doc = Document(text="/cd ~/", cursor_position=len("/cd ~/")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - displays = sorted(["".join(item[1] for item in c.display) for c in completions]) + active_runtime.notify_shell_started() + active_runtime.request_queue("queued task") + active_runtime.set_pending_submission("draft") - # The 'text' should include the '~/' prefix as that's what the user typed as base - assert texts == sorted(["~/Desktop/", "~/Documents/", "~/Downloads/"]) - assert displays == sorted(["Desktop/", "Documents/", "Downloads/"]) + buffer = Buffer(document=Document(text="chooser text", cursor_position=11)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer + ctrl_k_binding.handler(mock_event) -@pytest.mark.skipif(IS_WINDOWS, reason="Path separator expectations differ on Windows") -def test_cd_completer_home_directory_expansion_partial(setup_cd_test_dirs, monkeypatch): - _, mock_home_path = setup_cd_test_dirs - monkeypatch.setattr( - os.path, "expanduser", lambda p: p.replace("~", str(mock_home_path)) - ) + mock_interrupt_shell.assert_called_once_with("CTRL+K") + assert active_runtime.is_queue_autodrain_suppressed() is True + assert active_runtime.has_pending_submission() is False + assert buffer.text == "" + mock_event.app.exit.assert_not_called() - completer = CDCompleter() - doc = Document(text="/cd ~/Do", cursor_position=len("/cd ~/Do")) - completions = list(completer.get_completions(doc, None)) - texts = sorted([c.text for c in completions]) - displays = sorted(["".join(item[1] for item in c.display) for c in completions]) - assert texts == sorted(["~/Documents/", "~/Downloads/"]) - assert displays == sorted(["Documents/", "Downloads/"]) - assert "~/Desktop/" not in texts +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +async def test_get_input_key_binding_escape_drops_pending_submission( + mock_prompt_session_cls, active_runtime +): + mock_session_instance = MagicMock() + mock_session_instance.prompt_async = AsyncMock(return_value="test") + mock_prompt_session_cls.return_value = mock_session_instance + await get_input_with_combined_completion() -def test_cd_completer_non_existent_base(setup_cd_test_dirs, monkeypatch): - tmp_path, _ = setup_cd_test_dirs - monkeypatch.chdir(tmp_path) - completer = CDCompleter() - doc = Document( - text="/cd non_existent_dir/", cursor_position=len("/cd non_existent_dir/") + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + escape_binding = next( + binding_obj + for binding_obj in bindings.bindings + if binding_obj.keys == (Keys.Escape,) ) - completions = list(completer.get_completions(doc, None)) - assert completions == [] + active_runtime.set_pending_submission("queued task") -def test_cd_completer_permission_error_silently_handled(monkeypatch): - completer = CDCompleter() - # Patch the utility function used by CDCompleter - with patch( - "code_puppy.command_line.prompt_toolkit_completion.list_directory", - side_effect=PermissionError, - ) as mock_list_dir: - doc = Document(text="/cd somedir/", cursor_position=len("/cd somedir/")) - completions = list(completer.get_completions(doc, None)) - assert completions == [] - mock_list_dir.assert_called_once() + buffer = Buffer(document=Document(text="stray chooser text", cursor_position=18)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer + + escape_binding.handler(mock_event) + + assert active_runtime.has_pending_submission() is False + assert buffer.text == "" + mock_event.app.exit.assert_not_called() @pytest.mark.asyncio @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -@patch("code_puppy.command_line.prompt_toolkit_completion.FileHistory") -@patch("code_puppy.command_line.prompt_toolkit_completion.merge_completers") -async def test_get_input_with_combined_completion_defaults( - mock_merge_completers, mock_file_history, mock_prompt_session_cls +async def test_get_input_key_binding_up_restores_pending_submission( + mock_prompt_session_cls, active_runtime ): mock_session_instance = MagicMock() - mock_session_instance.prompt_async = AsyncMock(return_value="test input") + mock_session_instance.prompt_async = AsyncMock(return_value="test") mock_prompt_session_cls.return_value = mock_session_instance - mock_merge_completers.return_value = MagicMock() # Mocked merged completer - result = await get_input_with_combined_completion() + await get_input_with_combined_completion() - mock_prompt_session_cls.assert_called_once() - assert ( - mock_prompt_session_cls.call_args[1]["completer"] - == mock_merge_completers.return_value - ) - assert mock_prompt_session_cls.call_args[1]["history"] is None - assert mock_prompt_session_cls.call_args[1]["complete_while_typing"] is True - assert "key_bindings" in mock_prompt_session_cls.call_args[1] - assert "input_processors" in mock_prompt_session_cls.call_args[1] - assert isinstance( - mock_prompt_session_cls.call_args[1]["input_processors"][0], - AttachmentPlaceholderProcessor, + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + up_binding = next( + binding_obj + for binding_obj in bindings.bindings + if binding_obj.keys == (Keys.Up,) ) - mock_session_instance.prompt_async.assert_called_once() - # Check default prompt string was converted to FormattedText - assert isinstance(mock_session_instance.prompt_async.call_args[0][0], FormattedText) - assert mock_session_instance.prompt_async.call_args[0][0] == FormattedText( - [(None, ">>> ")] - ) - assert "style" in mock_session_instance.prompt_async.call_args[1] + assert up_binding.filter() is False - # NOTE: update_model_in_input is no longer called from the prompt layer. - # Instead, /model commands are handled by the command handler. - # The prompt layer now just returns the input as-is. - assert result == "test input" - mock_file_history.assert_not_called() + active_runtime.set_pending_submission("queued task") + assert up_binding.filter() is True + + buffer = Buffer(document=Document(text="stray chooser text", cursor_position=18)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer + + up_binding.handler(mock_event) + + assert active_runtime.has_pending_submission() is False + assert buffer.text == "queued task" + assert buffer.cursor_position == len("queued task") + mock_event.app.exit.assert_not_called() @pytest.mark.asyncio @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -@patch("code_puppy.command_line.prompt_toolkit_completion.SafeFileHistory") -async def test_get_input_with_combined_completion_with_history( - mock_safe_file_history, mock_prompt_session_cls +async def test_get_input_key_binding_edit_restores_pending_submission( + mock_prompt_session_cls, active_runtime ): mock_session_instance = MagicMock() - mock_session_instance.prompt_async = AsyncMock(return_value="input with history") + mock_session_instance.prompt_async = AsyncMock(return_value="test") mock_prompt_session_cls.return_value = mock_session_instance - mock_history_instance = MagicMock() - mock_safe_file_history.return_value = mock_history_instance - history_path = "~/.my_test_history" - result = await get_input_with_combined_completion(history_file=history_path) + await get_input_with_combined_completion() + + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + edit_binding = next( + binding_obj for binding_obj in bindings.bindings if binding_obj.keys == ("e",) + ) + + assert edit_binding.filter() is False + + active_runtime.set_pending_submission("queued task") + assert edit_binding.filter() is True + + buffer = Buffer(document=Document(text="stray chooser text", cursor_position=18)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer - mock_safe_file_history.assert_called_once_with(history_path) - assert mock_prompt_session_cls.call_args[1]["history"] == mock_history_instance - # NOTE: update_model_in_input is no longer called from the prompt layer. - assert result == "input with history" + edit_binding.handler(mock_event) + + assert active_runtime.has_pending_submission() is False + assert buffer.text == "queued task" + assert buffer.cursor_position == len("queued task") + mock_event.app.exit.assert_not_called() @pytest.mark.asyncio @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -async def test_get_input_with_combined_completion_custom_prompt( - mock_prompt_session_cls, +async def test_empty_enter_recalls_next_paused_queue_prompt( + mock_prompt_session_cls, active_runtime ): mock_session_instance = MagicMock() - mock_session_instance.prompt_async = AsyncMock(return_value="custom prompt input") + mock_session_instance.prompt_async = AsyncMock(return_value="test") mock_prompt_session_cls.return_value = mock_session_instance - # Test with string prompt - custom_prompt_str = "Custom> " - await get_input_with_combined_completion(prompt_str=custom_prompt_str) - assert mock_session_instance.prompt_async.call_args[0][0] == FormattedText( - [(None, custom_prompt_str)] + await get_input_with_combined_completion() + + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + enter_binding = next( + binding_obj + for binding_obj in bindings.bindings + if binding_obj.keys == (Keys.ControlM,) ) - # Test with FormattedText prompt - custom_prompt_ft = FormattedText([("class:test", "Formatted>")]) - await get_input_with_combined_completion(prompt_str=custom_prompt_ft) - assert mock_session_instance.prompt_async.call_args[0][0] == custom_prompt_ft + active_runtime.request_queue("queued task", allow_command_dispatch=False) + active_runtime.suppress_queue_autodrain() + + buffer = Buffer(document=Document(text="", cursor_position=0)) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = buffer + + enter_binding.handler(mock_event) + + assert buffer.text == "queued task" + assert buffer.cursor_position == len("queued task") + assert len(active_runtime.queue) == 1 + assert active_runtime.queue[0].text == "queued task" + assert active_runtime.queue[0].allow_command_dispatch is False + mock_event.app.exit.assert_not_called() @pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.patch_stdout") @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -async def test_get_input_with_combined_completion_no_model_update( - mock_prompt_session_cls, +async def test_prompt_for_submission_recalled_queue_preserves_policy_and_dequeues_on_submit( + mock_prompt_session_cls, mock_patch_stdout, active_runtime ): - raw_input = "raw user input" mock_session_instance = MagicMock() - mock_session_instance.prompt_async = AsyncMock(return_value=raw_input) + mock_session_instance.default_buffer = Buffer( + document=Document(text="", cursor_position=0) + ) mock_prompt_session_cls.return_value = mock_session_instance - - result = await get_input_with_combined_completion() - # NOTE: update_model_in_input is no longer called from the prompt layer. - # The prompt layer now just returns the input as-is. - assert result == raw_input - - -# To test key bindings, we need to inspect the KeyBindings object passed to PromptSession -# We can get it from the mock_prompt_session_cls.call_args + mock_patch_stdout.return_value.__enter__ = MagicMock() + mock_patch_stdout.return_value.__exit__ = MagicMock(return_value=False) + + active_runtime.request_queue("/agent", allow_command_dispatch=False) + active_runtime.suppress_queue_autodrain() + + async def fake_prompt_async(*args, **kwargs): + bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] + enter_binding = next( + binding_obj + for binding_obj in bindings.bindings + if binding_obj.keys == (Keys.ControlM,) + ) + mock_event = MagicMock() + mock_event.app = MagicMock() + mock_event.app.current_buffer = mock_session_instance.default_buffer + enter_binding.handler(mock_event) + return mock_session_instance.default_buffer.text + + mock_session_instance.prompt_async = AsyncMock(side_effect=fake_prompt_async) + + result = await prompt_for_submission() + + assert result == PromptSubmission( + action="submit", + text="/agent", + echo_in_transcript=False, + allow_command_dispatch=False, + ) + assert active_runtime.queue == [] -@pytest.mark.xfail( - reason="Alt+M binding representation varies across prompt_toolkit versions; current implementation may not expose Keys.Escape + 'm' tuple.", - strict=False, -) @pytest.mark.asyncio @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -async def test_get_input_key_binding_alt_m(mock_prompt_session_cls): - # We don't need the function to run fully, just to set up PromptSession +async def test_get_input_chooser_makes_buffer_read_only( + mock_prompt_session_cls, active_runtime +): mock_session_instance = MagicMock() mock_session_instance.prompt_async = AsyncMock(return_value="test") + mock_session_instance.default_buffer = MagicMock() mock_prompt_session_cls.return_value = mock_session_instance await get_input_with_combined_completion() - bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] - # Find the Alt+M binding (Escape, 'm') - alt_m_handler = None - for binding in bindings.bindings: - if ( - len(binding.keys) == 2 - and binding.keys[0] == Keys.Escape - and binding.keys[1] == "m" - ): - alt_m_handler = binding.handler + read_only_filter = mock_session_instance.default_buffer.read_only + assert read_only_filter() is False + + active_runtime.set_pending_submission("queued task") + assert read_only_filter() is True + + active_runtime.set_pending_submission(None) + assert read_only_filter() is False + + +def test_prompt_runtime_registry_round_trip(active_runtime): + session = MagicMock() + session.app = MagicMock() + + clear_active_prompt_surface() + register_active_prompt_surface("main", session) + + assert has_active_prompt_surface() is True + assert get_active_prompt_surface_kind() == "main" + assert is_shell_prompt_suspended() is False + + session.app.invalidate.reset_mock() + set_shell_prompt_suspended(True) + assert is_shell_prompt_suspended() is True + session.app.invalidate.assert_called_once() + + set_shell_prompt_suspended(False) + clear_active_prompt_surface(session) + assert has_active_prompt_surface() is False + assert get_active_prompt_surface_kind() is None + assert is_shell_prompt_suspended() is False + + +def test_spinner_invalidation_yields_to_recent_prompt_redraw( + monkeypatch, active_runtime +): + session = MagicMock() + session.app = MagicMock() + active_runtime.register_prompt_surface(session) + session.app.invalidate.reset_mock() + + samples = iter([10.0, 10.02, 10.12]) + monkeypatch.setattr( + "code_puppy.command_line.interactive_runtime.time.monotonic", + lambda: next(samples), + ) + + active_runtime.invalidate_prompt() + session.app.invalidate.assert_called_once() + + session.app.invalidate.reset_mock() + active_runtime.invalidate_prompt_for_spinner() + session.app.invalidate.assert_not_called() + + active_runtime.invalidate_prompt_for_spinner() + session.app.invalidate.assert_called_once() + + +@pytest.mark.asyncio +async def test_run_above_prompt_async_serializes_callbacks(active_runtime, monkeypatch): + session = MagicMock() + session.app = MagicMock() + session.app.loop = asyncio.get_running_loop() + active_runtime.register_prompt_surface(session) + + active_count = 0 + max_active = 0 + seen: list[str] = [] + + async def fake_run_in_terminal(func): + nonlocal active_count, max_active + active_count += 1 + max_active = max(max_active, active_count) + await asyncio.sleep(0.01) + func() + await asyncio.sleep(0.01) + active_count -= 1 + + monkeypatch.setattr( + "prompt_toolkit.application.run_in_terminal", + fake_run_in_terminal, + ) + + first = asyncio.create_task( + active_runtime.run_above_prompt_async(lambda: seen.append("first")) + ) + await asyncio.sleep(0) + second = asyncio.create_task( + active_runtime.run_above_prompt_async(lambda: seen.append("second")) + ) + + assert await first is True + assert await second is True + assert seen == ["first", "second"] + assert max_active == 1 + + +@pytest.mark.asyncio +async def test_run_above_prompt_sync_and_async_share_serialization( + active_runtime, monkeypatch +): + session = MagicMock() + session.app = MagicMock() + session.app.loop = asyncio.get_running_loop() + active_runtime.register_prompt_surface(session) + + active_count = 0 + max_active = 0 + seen: list[str] = [] + sync_result: dict[str, bool] = {} + + async def fake_run_in_terminal(func): + nonlocal active_count, max_active + active_count += 1 + max_active = max(max_active, active_count) + await asyncio.sleep(0.01) + func() + await asyncio.sleep(0.01) + active_count -= 1 + + monkeypatch.setattr( + "prompt_toolkit.application.run_in_terminal", + fake_run_in_terminal, + ) + + async_task = asyncio.create_task( + active_runtime.run_above_prompt_async(lambda: seen.append("async")) + ) + await asyncio.sleep(0.005) + + def call_sync() -> None: + sync_result["ok"] = active_runtime.run_above_prompt( + lambda: seen.append("sync"), + timeout=1.0, + ) + + thread = threading.Thread(target=call_sync) + thread.start() + + assert await async_task is True + for _ in range(50): + if "ok" in sync_result: break - assert alt_m_handler is not None, "Alt+M keybinding not found" + await asyncio.sleep(0.01) + thread.join() + + assert sync_result == {"ok": True} + assert seen == ["async", "sync"] + assert max_active == 1 + + +def test_get_prompt_with_active_model_omits_shell_status(monkeypatch, active_runtime): + clear_active_prompt_surface() + session = MagicMock() + session.app = MagicMock() + register_active_prompt_surface("main", session) + set_shell_prompt_suspended(True) + + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_puppy_name", + lambda: "Buddy", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_active_model", + lambda: "gpt-test", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.os.getcwd", + lambda: "/tmp/demo", + ) + + agent = MagicMock() + agent.display_name = "code-puppy" + agent.get_model_name.return_value = "gpt-test" + + with patch( + "code_puppy.command_line.prompt_toolkit_completion._get_current_agent_for_prompt", + return_value=agent, + ): + with patch("shutil.get_terminal_size", return_value=os.terminal_size((80, 24))): + rendered = "".join(text for _style, text in get_prompt_with_active_model()) + + assert "shell running" not in rendered + clear_active_prompt_surface() + + +def test_get_prompt_with_active_model_shows_thinking_status( + monkeypatch, active_runtime +): + clear_active_prompt_surface() + session = MagicMock() + session.app = MagicMock() + register_active_prompt_surface("main", session) + active_runtime.running = True + active_runtime.prompt_status_started_at = 0.0 + + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_puppy_name", + lambda: "Buddy", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_active_model", + lambda: "gpt-test", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.os.getcwd", + lambda: "/tmp/demo", + ) + monkeypatch.setattr( + "code_puppy.command_line.interactive_runtime.time.monotonic", + lambda: 0.18, + ) + + agent = MagicMock() + agent.display_name = "code-puppy" + agent.get_model_name.return_value = "gpt-test" + + with ( + patch( + "code_puppy.command_line.prompt_toolkit_completion._get_current_agent_for_prompt", + return_value=agent, + ), + patch( + "code_puppy.command_line.prompt_toolkit_completion.SpinnerBase.get_context_info", + return_value="Tokens: 1,650/272,000 (0.6% used)", + ), + patch("shutil.get_terminal_size", return_value=os.terminal_size((80, 24))), + ): + rendered = "".join(text for _style, text in get_prompt_with_active_model()) + + assert "Buddy is thinking..." in rendered + assert "( 🐶 ) " in rendered + assert "Tokens: 1,650/272,000 (0.6% used)" in rendered + assert rendered.index("Buddy is thinking...") < rendered.index("─" * 80) + clear_active_prompt_surface() + + +def test_get_prompt_with_active_model_shows_pending_hint_copy( + monkeypatch, active_runtime +): + clear_active_prompt_surface() + session = MagicMock() + session.app = MagicMock() + register_active_prompt_surface("main", session) + active_runtime.set_pending_submission("queued task") + + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_puppy_name", + lambda: "Buddy", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_active_model", + lambda: "gpt-test", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.os.getcwd", + lambda: "/tmp/demo", + ) + + agent = MagicMock() + agent.display_name = "code-puppy" + agent.get_model_name.return_value = "gpt-test" + + with ( + patch( + "code_puppy.command_line.prompt_toolkit_completion._get_current_agent_for_prompt", + return_value=agent, + ), + patch("shutil.get_terminal_size", return_value=os.terminal_size((80, 24))), + ): + rendered = "".join(text for _style, text in get_prompt_with_active_model()) + + assert "[i]nterject [q]ueue [e]dit [esc]ape" in rendered + clear_active_prompt_surface() + + +def test_get_prompt_with_active_model_shows_ephemeral_status(monkeypatch, active_runtime): + clear_active_prompt_surface() + session = MagicMock() + session.app = MagicMock() + register_active_prompt_surface("main", session) + active_runtime.running = True + active_runtime.prompt_status_started_at = 0.0 + active_runtime.set_prompt_ephemeral_status("šŸ”§ Calling list_files... 11 token(s)") + active_runtime.set_prompt_ephemeral_preview( + "\n".join( + [ + "line 1", + "line 2", + "line 3", + "line 4", + "line 5", + "line 6", + "line 7", + "line 8", + ] + ) + ) + + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_puppy_name", + lambda: "Buddy", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.get_active_model", + lambda: "gpt-test", + ) + monkeypatch.setattr( + "code_puppy.command_line.prompt_toolkit_completion.os.getcwd", + lambda: "/tmp/demo", + ) + monkeypatch.setattr( + "code_puppy.command_line.interactive_runtime.time.monotonic", + lambda: 0.18, + ) + + agent = MagicMock() + agent.display_name = "code-puppy" + agent.get_model_name.return_value = "gpt-test" + + with ( + patch( + "code_puppy.command_line.prompt_toolkit_completion._get_current_agent_for_prompt", + return_value=agent, + ), + patch( + "code_puppy.command_line.prompt_toolkit_completion.SpinnerBase.get_context_info", + return_value="", + ), + patch("shutil.get_terminal_size", return_value=os.terminal_size((80, 24))), + ): + rendered = "".join(text for _style, text in get_prompt_with_active_model()) + + assert "šŸ”§ Calling list_files... 11 token(s)" in rendered + assert "line 1" not in rendered + assert "line 2" not in rendered + assert "line 3" in rendered + assert "line 8" in rendered + assert rendered.index("šŸ”§ Calling list_files... 11 token(s)") < rendered.index( + "line 3" + ) + active_runtime.clear_prompt_ephemeral_status() + active_runtime.clear_prompt_ephemeral_preview() + with ( + patch( + "code_puppy.command_line.prompt_toolkit_completion._get_current_agent_for_prompt", + return_value=agent, + ), + patch( + "code_puppy.command_line.prompt_toolkit_completion.SpinnerBase.get_context_info", + return_value="", + ), + patch("shutil.get_terminal_size", return_value=os.terminal_size((80, 24))), + ): + cleared = "".join(text for _style, text in get_prompt_with_active_model()) + + assert "šŸ”§ Calling list_files... 11 token(s)" not in cleared + assert "line 8" not in cleared + clear_active_prompt_surface() @pytest.mark.asyncio @patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") -async def test_get_input_key_binding_escape(mock_prompt_session_cls): - mock_session_instance = MagicMock() - mock_session_instance.prompt_async = AsyncMock(return_value="test") - mock_prompt_session_cls.return_value = mock_session_instance +async def test_get_input_registers_active_prompt_surface( + mock_prompt_session_cls, active_runtime +): + session = MagicMock() + session.app = MagicMock() + session.default_buffer = MagicMock() + + async def fake_prompt_async(*args, **kwargs): + assert has_active_prompt_surface() is True + assert get_active_prompt_surface_kind() == "main" + set_shell_prompt_suspended(True) + assert is_shell_prompt_suspended() is True + set_shell_prompt_suspended(False) + return "test input" + + session.prompt_async = AsyncMock(side_effect=fake_prompt_async) + mock_prompt_session_cls.return_value = session + + result = await get_input_with_combined_completion() + + assert result == "test input" + assert has_active_prompt_surface() is False + assert is_shell_prompt_suspended() is False + + +@pytest.mark.asyncio +async def test_prompt_runtime_refreshes_spinner_while_running(active_runtime): + session = MagicMock() + session.app = MagicMock() + active_runtime.register_prompt_surface(session) + + worker = asyncio.create_task(asyncio.sleep(1)) + active_runtime.mark_running(worker) + session.app.invalidate.reset_mock() + + try: + await asyncio.sleep(0.12) + assert session.app.invalidate.called + assert active_runtime.prompt_status_task is not None + finally: + active_runtime.mark_idle() + worker.cancel() + with contextlib.suppress(asyncio.CancelledError): + await worker + + await asyncio.sleep(0) + assert active_runtime.prompt_status_task is None + + +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +async def test_ctrl_x_interrupts_shell_when_prompt_is_suspended( + mock_prompt_session_cls, active_runtime +): + session = MagicMock() + session.app = MagicMock() + session.default_buffer = MagicMock() + session.prompt_async = AsyncMock(return_value="done") + mock_prompt_session_cls.return_value = session await get_input_with_combined_completion() bindings = mock_prompt_session_cls.call_args[1]["key_bindings"] - found_escape_handler = None - for binding_obj in bindings.bindings: - if binding_obj.keys == (Keys.Escape,): - found_escape_handler = binding_obj.handler - break + ctrl_x_handler = next( + binding.handler + for binding in bindings.bindings + if binding.keys == (Keys.ControlX,) + ) - assert found_escape_handler is not None, "Standalone Escape keybinding not found" + register_active_prompt_surface("main", session) + set_shell_prompt_suspended(True) mock_event = MagicMock() mock_event.app = MagicMock() - mock_event.app.exit.side_effect = KeyboardInterrupt - with pytest.raises(KeyboardInterrupt): - found_escape_handler(mock_event) - mock_event.app.exit.assert_called_once_with(exception=KeyboardInterrupt) + + with patch( + "code_puppy.tools.command_runner.kill_all_running_shell_processes", + return_value=1, + ) as mock_kill: + with patch("code_puppy.messaging.emit_warning"): + ctrl_x_handler(mock_event) + + mock_kill.assert_called_once() + mock_event.app.exit.assert_not_called() + clear_active_prompt_surface() + + +@pytest.mark.asyncio +@patch("code_puppy.command_line.prompt_toolkit_completion.PromptSession") +async def test_prompt_for_submission_returns_inline_queue_action( + mock_prompt_session_cls, active_runtime +): + session = MagicMock() + session.app = MagicMock() + session.default_buffer = MagicMock() + session.prompt_async = AsyncMock( + return_value=PromptSubmission(action="queue", text="queued task") + ) + mock_prompt_session_cls.return_value = session + + result = await prompt_for_submission() + + assert result == PromptSubmission( + action="queue", + text="queued task", + echo_in_transcript=False, + ) @pytest.mark.asyncio @@ -611,3 +938,34 @@ async def test_attachment_placeholder_processor_renders_images(tmp_path: Path) - assert "[png image]" in rendered_text assert "fluffy pupper" not in rendered_text + + +def test_attachment_placeholder_processor_skips_replacement_while_chooser_visible( + tmp_path: Path, active_runtime +) -> None: + image_path = tmp_path / "chooser.png" + image_path.write_bytes(b"png") + active_runtime.set_pending_submission("queued task") + + processor = AttachmentPlaceholderProcessor() + document_text = f"describe {image_path} now" + document = Document(text=document_text, cursor_position=len(document_text)) + + fragments = [("", document_text)] + buffer = Buffer(document=document) + control = BufferControl(buffer=buffer) + transformation_input = TransformationInput( + buffer_control=control, + document=document, + lineno=0, + source_to_display=lambda i: i, + fragments=fragments, + width=len(document_text), + height=1, + ) + + transformed = processor.apply_transformation(transformation_input) + rendered_text = "".join(text for _style, text in transformed.fragments) + + assert str(image_path) in rendered_text + assert "[png image]" not in rendered_text diff --git a/tests/test_terminal_utils.py b/tests/test_terminal_utils.py index e8612459f..82c6955a7 100644 --- a/tests/test_terminal_utils.py +++ b/tests/test_terminal_utils.py @@ -360,6 +360,24 @@ def test_windows_terminal(self, monkeypatch): monkeypatch.setenv("WT_SESSION", "abc") assert terminal_utils.detect_truecolor_support() is True + def test_ghostty_term_is_detected(self, monkeypatch): + monkeypatch.delenv("COLORTERM", raising=False) + monkeypatch.setenv("TERM", "xterm-ghostty") + assert terminal_utils.detect_truecolor_support() is True + + def test_terminal_app_profile_is_degraded_not_unknown(self, monkeypatch): + monkeypatch.delenv("COLORTERM", raising=False) + monkeypatch.delenv("TERM", raising=False) + monkeypatch.setenv("TERM_PROGRAM", "Apple_Terminal") + profile = terminal_utils.get_terminal_profile() + assert profile.terminal_family == "terminal_app" + assert profile.supports_truecolor is False + + def test_windows_terminal_profile_is_detected(self, monkeypatch): + monkeypatch.setenv("WT_SESSION", "abc") + profile = terminal_utils.get_terminal_profile() + assert profile.terminal_family == "windows_terminal" + def test_rich_fallback_truecolor(self, monkeypatch): monkeypatch.delenv("COLORTERM", raising=False) monkeypatch.setenv("TERM", "dumb") @@ -367,6 +385,7 @@ def test_rich_fallback_truecolor(self, monkeypatch): monkeypatch.delenv("KITTY_WINDOW_ID", raising=False) monkeypatch.delenv("ALACRITTY_SOCKET", raising=False) monkeypatch.delenv("WT_SESSION", raising=False) + monkeypatch.setattr(terminal_utils, "_stream_is_tty", lambda _stream: True) mock_console_cls = MagicMock() mock_console_cls.return_value.color_system = "truecolor" monkeypatch.setattr( @@ -430,7 +449,7 @@ def test_creates_console_when_none(self, monkeypatch): mock_console.color_system = "standard" import rich.console - monkeypatch.setattr(rich.console, "Console", lambda: mock_console) + monkeypatch.setattr(rich.console, "Console", lambda *args, **kwargs: mock_console) terminal_utils.print_truecolor_warning(console=None) assert mock_console.print.call_count > 10 @@ -460,3 +479,15 @@ def test_console_color_system_none(self, monkeypatch): # Should use "unknown" for color_system calls = [str(c) for c in mock_console.print.call_args_list] assert any("unknown" in c for c in calls) + +class TestLiveTerminalUpdates: + def test_windows_terminal_allows_live_updates(self, monkeypatch): + monkeypatch.setattr(terminal_utils.platform, "system", lambda: "Windows") + monkeypatch.setenv("WT_SESSION", "abc") + monkeypatch.setattr(terminal_utils, "_stream_is_tty", lambda _stream: True) + assert terminal_utils.supports_live_terminal_updates() is True + + def test_ci_disables_live_updates(self, monkeypatch): + monkeypatch.setattr(terminal_utils, "_stream_is_tty", lambda _stream: True) + monkeypatch.setenv("CI", "1") + assert terminal_utils.supports_live_terminal_updates() is False diff --git a/tests/test_terminal_utils_comprehensive.py b/tests/test_terminal_utils_comprehensive.py index fde6db23d..4027ef71b 100644 --- a/tests/test_terminal_utils_comprehensive.py +++ b/tests/test_terminal_utils_comprehensive.py @@ -730,7 +730,10 @@ def test_no_truecolor_support(self): def test_rich_fallback_truecolor(self): """Test Rich fallback detects truecolor.""" with patch.dict(os.environ, {}, clear=True): - with patch("rich.console.Console") as mock_console_class: + with ( + patch("code_puppy.terminal_utils._stream_is_tty", return_value=True), + patch("rich.console.Console") as mock_console_class, + ): mock_console = MagicMock() mock_console.color_system = "truecolor" mock_console_class.return_value = mock_console diff --git a/tests/tools/browser/test_remaining_coverage.py b/tests/tools/browser/test_remaining_coverage.py index 911e00bdd..60575ed19 100644 --- a/tests/tools/browser/test_remaining_coverage.py +++ b/tests/tools/browser/test_remaining_coverage.py @@ -150,13 +150,20 @@ async def test_list_workflows_file_error(self, tmp_path): wf_dir.mkdir() bad_file = wf_dir / "bad.md" bad_file.write_text("test") + original_stat = type(bad_file).stat + + def fake_stat(path_obj): + if path_obj == bad_file: + raise OSError("fail") + return original_stat(path_obj) with ( patch(f"{MOD_WF}.get_workflows_directory", return_value=wf_dir), + patch.object(type(wf_dir), "glob", return_value=[bad_file]), patch(f"{MOD_WF}.emit_info"), patch(f"{MOD_WF}.emit_warning") as mock_warn, patch(f"{MOD_WF}.emit_success"), - patch.object(type(bad_file), "stat", side_effect=OSError("fail")), + patch.object(type(bad_file), "stat", side_effect=fake_stat), ): r = await list_workflows() assert r["success"] is True diff --git a/tests/tools/test_command_runner_full_coverage.py b/tests/tools/test_command_runner_full_coverage.py index 05b5a0e3d..14654514f 100644 --- a/tests/tools/test_command_runner_full_coverage.py +++ b/tests/tools/test_command_runner_full_coverage.py @@ -9,7 +9,7 @@ import subprocess import sys import threading -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest from pydantic_ai import RunContext @@ -238,13 +238,25 @@ def test_set_and_check(self): ) with patch("code_puppy.tools.command_runner.pause_all_spinners", create=True): - pass + set_awaiting_user_input(True) + assert is_awaiting_user_input() is True + set_awaiting_user_input(False) + assert is_awaiting_user_input() is False - set_awaiting_user_input(True) - assert is_awaiting_user_input() is True - set_awaiting_user_input(False) - assert is_awaiting_user_input() is False +# --------------------------------------------------------------------------- +# Shell lock helpers +# --------------------------------------------------------------------------- + + +class TestShellLockHelpers: + def test_normalize_shell_cwd(self): + from code_puppy.tools.command_runner import _normalize_shell_cwd + + assert _normalize_shell_cwd(None) is None + assert _normalize_shell_cwd("") is None + assert _normalize_shell_cwd(" ") is None + assert _normalize_shell_cwd(" /tmp/work ") == " /tmp/work " # --------------------------------------------------------------------------- @@ -569,6 +581,71 @@ async def test_yolo_mode_executes(self): result = await run_shell_command(ctx, "echo hi", timeout=10) assert result.success is True + @pytest.mark.asyncio + async def test_omitted_cwd_normalizes_to_none(self): + from code_puppy.tools.command_runner import run_shell_command + + ctx = MagicMock(spec=RunContext) + mock_output = MagicMock(success=True) + callback_mock = AsyncMock(return_value=[]) + + with patch("code_puppy.callbacks.on_run_shell_command", callback_mock): + with patch("code_puppy.config.get_yolo_mode", return_value=True): + with patch( + "code_puppy.tools.command_runner.is_subagent", return_value=False + ): + with patch( + "code_puppy.tools.command_runner._execute_shell_command", + new_callable=AsyncMock, + return_value=mock_output, + ) as mock_execute: + result = await run_shell_command(ctx, "echo hi", timeout=10) + + assert result.success is True + callback_mock.assert_awaited_once_with(ctx, "echo hi", None, 10) + mock_execute.assert_awaited_once_with( + command="echo hi", + cwd=None, + timeout=10, + group_id=ANY, + silent=False, + ) + + @pytest.mark.asyncio + async def test_blank_cwd_normalizes_to_none(self): + from code_puppy.tools.command_runner import run_shell_command + + ctx = MagicMock(spec=RunContext) + mock_output = MagicMock(success=True) + callback_mock = AsyncMock(return_value=[]) + + with patch("code_puppy.callbacks.on_run_shell_command", callback_mock): + with patch("code_puppy.config.get_yolo_mode", return_value=True): + with patch( + "code_puppy.tools.command_runner.is_subagent", return_value=False + ): + with patch( + "code_puppy.tools.command_runner._execute_shell_command", + new_callable=AsyncMock, + return_value=mock_output, + ) as mock_execute: + result = await run_shell_command( + ctx, + "echo hi", + cwd=" ", + timeout=10, + ) + + assert result.success is True + callback_mock.assert_awaited_once_with(ctx, "echo hi", None, 10) + mock_execute.assert_awaited_once_with( + command="echo hi", + cwd=None, + timeout=10, + group_id=ANY, + silent=False, + ) + @pytest.mark.asyncio async def test_subagent_runs_silently(self): from code_puppy.tools.command_runner import run_shell_command @@ -790,6 +867,186 @@ async def test_executes(self): assert result.success is True + @pytest.mark.asyncio + async def test_marks_active_runtime_shell_state_and_skips_keyboard_listener(self): + from code_puppy.tools.command_runner import ( + ShellCommandOutput, + _execute_shell_command, + ) + + mock_result = ShellCommandOutput( + success=True, + command="echo hi", + stdout="hi", + stderr="", + exit_code=0, + execution_time=0.1, + ) + + runtime = MagicMock() + with patch("code_puppy.tools.command_runner.get_message_bus") as mock_bus: + mock_bus.return_value = MagicMock() + with patch("code_puppy.messaging.spinner.pause_all_spinners"): + with patch("code_puppy.messaging.spinner.resume_all_spinners"): + with patch( + "code_puppy.command_line.interactive_runtime.get_active_interactive_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.tools.command_runner._acquire_keyboard_context" + ) as mock_acquire: + with patch( + "code_puppy.tools.command_runner._release_keyboard_context" + ) as mock_release: + with patch( + "code_puppy.tools.command_runner._run_command_inner", + new_callable=AsyncMock, + return_value=mock_result, + ): + result = await _execute_shell_command( + "echo hi", None, 10, "grp" + ) + + assert result.success is True + runtime.notify_shell_started.assert_called_once() + runtime.notify_shell_finished.assert_called_once() + mock_acquire.assert_not_called() + mock_release.assert_not_called() + + @pytest.mark.asyncio + async def test_uses_keyboard_listener_when_no_active_prompt(self): + from code_puppy.tools.command_runner import ( + ShellCommandOutput, + _execute_shell_command, + ) + + mock_result = ShellCommandOutput( + success=True, + command="echo hi", + stdout="hi", + stderr="", + exit_code=0, + execution_time=0.1, + ) + + with patch("code_puppy.tools.command_runner.get_message_bus") as mock_bus: + mock_bus.return_value = MagicMock() + with patch("code_puppy.messaging.spinner.pause_all_spinners"): + with patch("code_puppy.messaging.spinner.resume_all_spinners"): + with patch( + "code_puppy.command_line.interactive_runtime.get_active_interactive_runtime", + return_value=None, + ): + with patch( + "code_puppy.tools.command_runner._acquire_keyboard_context" + ) as mock_acquire: + with patch( + "code_puppy.tools.command_runner._release_keyboard_context" + ) as mock_release: + with patch( + "code_puppy.tools.command_runner._run_command_inner", + new_callable=AsyncMock, + return_value=mock_result, + ): + result = await _execute_shell_command( + "echo hi", None, 10, "grp" + ) + + assert result.success is True + mock_acquire.assert_called_once() + mock_release.assert_called_once() + + @pytest.mark.asyncio + async def test_runtime_start_notification_failure_falls_back_to_keyboard_listener( + self, + ): + from code_puppy.tools.command_runner import ( + ShellCommandOutput, + _execute_shell_command, + ) + + mock_result = ShellCommandOutput( + success=True, + command="echo hi", + stdout="hi", + stderr="", + exit_code=0, + execution_time=0.1, + ) + + runtime = MagicMock() + runtime.notify_shell_started.side_effect = RuntimeError("boom") + with patch("code_puppy.tools.command_runner.get_message_bus") as mock_bus: + mock_bus.return_value = MagicMock() + with patch("code_puppy.messaging.spinner.pause_all_spinners"): + with patch("code_puppy.messaging.spinner.resume_all_spinners"): + with patch( + "code_puppy.command_line.interactive_runtime.get_active_interactive_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.tools.command_runner._acquire_keyboard_context" + ) as mock_acquire: + with patch( + "code_puppy.tools.command_runner._release_keyboard_context" + ) as mock_release: + with patch( + "code_puppy.tools.command_runner._run_command_inner", + new_callable=AsyncMock, + return_value=mock_result, + ): + result = await _execute_shell_command( + "echo hi", None, 10, "grp" + ) + + assert result.success is True + runtime.notify_shell_started.assert_called_once() + runtime.notify_shell_finished.assert_not_called() + mock_acquire.assert_called_once() + mock_release.assert_called_once() + + @pytest.mark.asyncio + async def test_runtime_finish_notification_failure_does_not_mask_result(self): + from code_puppy.tools.command_runner import ( + ShellCommandOutput, + _execute_shell_command, + ) + + mock_result = ShellCommandOutput( + success=True, + command="echo hi", + stdout="hi", + stderr="", + exit_code=0, + execution_time=0.1, + ) + + runtime = MagicMock() + runtime.notify_shell_finished.side_effect = RuntimeError("boom") + with patch("code_puppy.tools.command_runner.get_message_bus") as mock_bus: + mock_bus.return_value = MagicMock() + with patch("code_puppy.messaging.spinner.pause_all_spinners"): + with patch( + "code_puppy.messaging.spinner.resume_all_spinners" + ) as mock_resume: + with patch( + "code_puppy.command_line.interactive_runtime.get_active_interactive_runtime", + return_value=runtime, + ): + with patch( + "code_puppy.tools.command_runner._run_command_inner", + new_callable=AsyncMock, + return_value=mock_result, + ): + result = await _execute_shell_command( + "echo hi", None, 10, "grp" + ) + + assert result.success is True + runtime.notify_shell_started.assert_called_once() + runtime.notify_shell_finished.assert_called_once() + mock_resume.assert_called_once() + # --------------------------------------------------------------------------- # _run_command_inner exception handling diff --git a/tests/tools/test_file_modifications_extended.py b/tests/tools/test_file_modifications_extended.py index 52da75c3f..4e064aabd 100644 --- a/tests/tools/test_file_modifications_extended.py +++ b/tests/tools/test_file_modifications_extended.py @@ -11,6 +11,7 @@ ReplacementsPayload, _delete_file, _edit_file, + register_replace_in_file, ) @@ -221,6 +222,27 @@ def func3(): assert "def func1():" in content # Should remain assert "def func3():" in content # Should remain + def test_register_replace_in_file_rejects_missing_old_str(self, tmp_path): + registered = {} + + class Agent: + def tool(self, fn): + registered[fn.__name__] = fn + return fn + + register_replace_in_file(Agent()) + fn = registered["replace_in_file"] + + result = fn( + Mock(), + file_path=str(tmp_path / "test.py"), + replacements=[{"new_str": "updated"}], + ) + + assert result["success"] is False + assert result["changed"] is False + assert "old_str" in result["message"] + def test_error_recovery_file_permissions(self, tmp_path): """Test error recovery when file permissions prevent modification.""" test_file = tmp_path / "readonly.py"