|
| 1 | +from typing import Any, ClassVar, Literal |
| 2 | + |
| 3 | +from pydantic import BaseModel, ConfigDict, Field |
| 4 | + |
| 5 | +from dynamiq.connections.managers import ConnectionManager |
| 6 | +from dynamiq.nodes import ErrorHandling, Node, NodeGroup |
| 7 | +from dynamiq.nodes.llms import BaseLLM |
| 8 | +from dynamiq.nodes.node import NodeDependency, ensure_config |
| 9 | +from dynamiq.prompts import Message, Prompt |
| 10 | +from dynamiq.runnables import RunnableConfig, RunnableStatus |
| 11 | +from dynamiq.utils.logger import logger |
| 12 | + |
| 13 | +CONTEXT_MANAGER_PROMPT_TEMPLATE = """ |
| 14 | +You are a context compression assistant for an AI agent. |
| 15 | +
|
| 16 | +IMPORTANT: The agent will delete previous message history after this step. You MUST preserve all |
| 17 | +essential information needed to continue the task successfully. |
| 18 | +
|
| 19 | +Task: |
| 20 | +- Produce a detailed summary that replaces the prior message history. |
| 21 | +- Keep only what is necessary to proceed: reasoning overview, current subtasks, saved information and files, next steps, |
| 22 | + additional notes. |
| 23 | +- Omit chit-chat and non-essential details. Use clear, structured formatting. |
| 24 | +
|
| 25 | +History to compress: |
| 26 | +{history} |
| 27 | +
|
| 28 | +Output strictly in this structure: |
| 29 | +
|
| 30 | +## Reasoning overview of what is reasoning flow |
| 31 | +
|
| 32 | +## Current Subtasks |
| 33 | +- [ordered bullets: subtask -> status] |
| 34 | +
|
| 35 | +## Saved information and files |
| 36 | +- Inform about filesystem state and files that are saved (if available) |
| 37 | +
|
| 38 | +## Next Steps |
| 39 | +- [ordered bullets: next step -> status] |
| 40 | +
|
| 41 | +## Additional Notes: |
| 42 | +Any other information that is important to keep in mind and not lost. |
| 43 | +
|
| 44 | +""" |
| 45 | + |
| 46 | + |
| 47 | +class ContextManagerInputSchema(BaseModel): |
| 48 | + """Input for ContextManagerTool. |
| 49 | +
|
| 50 | + - history: The recent conversation/messages to compress. Can be a single string or list of strings. |
| 51 | + - is_history_preserved: Preserve the history with summarization. If False, the history will not be preserved, |
| 52 | + only notes will. |
| 53 | + - notes: Verbatim content that must be preserved as-is (not processed by LLM) and prepended to the result. |
| 54 | + """ |
| 55 | + |
| 56 | + history: list[Message] | None = Field( |
| 57 | + ..., description="Conversation history to be summarized and used to replace prior messages" |
| 58 | + ) |
| 59 | + |
| 60 | + is_history_preserved: bool = Field( |
| 61 | + default=True, |
| 62 | + description="Preserve the history with summarization. If False, the history will not be preserved," |
| 63 | + " only notes will.", |
| 64 | + ) |
| 65 | + |
| 66 | + notes: str | None = Field( |
| 67 | + default=None, |
| 68 | + description=( |
| 69 | + "Verbatim content to preserve as-is (e.g., IDs, filenames, critical details). " |
| 70 | + "This will be prepended unchanged to the output and NOT sent to the LLM." |
| 71 | + ), |
| 72 | + ) |
| 73 | + |
| 74 | + |
| 75 | +class ContextManagerTool(Node): |
| 76 | + """ |
| 77 | + A tool to prune previous message history and replace it with a concise summary. |
| 78 | +
|
| 79 | + IMPORTANT: Before calling this tool, ensure any necessary details are explicitly saved |
| 80 | + (e.g., files, pinned notes, or artifacts). This tool is intended to remove previous messages |
| 81 | + and keep only a structured summary to tighten context and focus on the active subtask. |
| 82 | +
|
| 83 | + Attributes: |
| 84 | + group (Literal[NodeGroup.TOOLS]): The group this node belongs to. |
| 85 | + name (str): The name of the tool. |
| 86 | + description (str): Tool description with usage warning. |
| 87 | + llm (BaseLLM): The LLM used to produce the compressed summary. |
| 88 | + error_handling (ErrorHandling): Configuration for error handling. |
| 89 | + prompt_template (str): Prompt template guiding the summarization. |
| 90 | + """ |
| 91 | + |
| 92 | + group: Literal[NodeGroup.TOOLS] = NodeGroup.TOOLS |
| 93 | + name: str = "Context Manager Tool" |
| 94 | + description: str = ( |
| 95 | + "Cleans prior message history and replaces it with a concise, self-contained summary.\n\n" |
| 96 | + "WARNING: Before calling this tool, the agent must save any necessary information (f.e in FileStore),\n" |
| 97 | + "because previous messages will be removed and replaced by the summary. " |
| 98 | + "You can also provide notes to the tool to preserve important information without being processed by the LLM. " |
| 99 | + "Make sure to provide all necessary information for the agent to stay on track and" |
| 100 | + " not lose any important details. " |
| 101 | + "You can also disable history preservation, only notes will be preserved. " |
| 102 | + "Disable history when you don't care about the history and only want to preserve notes." |
| 103 | + ) |
| 104 | + |
| 105 | + llm: BaseLLM = Field(..., description="LLM used to produce the compressed context summary") |
| 106 | + error_handling: ErrorHandling = Field(default_factory=lambda: ErrorHandling(timeout_seconds=600)) |
| 107 | + prompt_template: str = Field( |
| 108 | + default=CONTEXT_MANAGER_PROMPT_TEMPLATE, description="Prompt template for context compression" |
| 109 | + ) |
| 110 | + |
| 111 | + model_config = ConfigDict(arbitrary_types_allowed=True) |
| 112 | + input_schema: ClassVar[type[ContextManagerInputSchema]] = ContextManagerInputSchema |
| 113 | + |
| 114 | + def init_components(self, connection_manager: ConnectionManager | None = None) -> None: |
| 115 | + """Initialize components for the tool.""" |
| 116 | + connection_manager = connection_manager or ConnectionManager() |
| 117 | + super().init_components(connection_manager) |
| 118 | + if self.llm.is_postponed_component_init: |
| 119 | + self.llm.init_components(connection_manager) |
| 120 | + |
| 121 | + def reset_run_state(self): |
| 122 | + """Reset the intermediate steps (run_depends) of the node.""" |
| 123 | + self._run_depends = [] |
| 124 | + |
| 125 | + @property |
| 126 | + def to_dict_exclude_params(self) -> dict: |
| 127 | + """Exclude LLM object during serialization.""" |
| 128 | + return super().to_dict_exclude_params | {"llm": True} |
| 129 | + |
| 130 | + def to_dict(self, **kwargs) -> dict: |
| 131 | + data = super().to_dict(**kwargs) |
| 132 | + data["llm"] = self.llm.to_dict(**kwargs) |
| 133 | + return data |
| 134 | + |
| 135 | + def _build_prompt(self, history: list[Message]) -> str: |
| 136 | + formatted_history = "\n\n---\n\n".join([f"{m.role}: {str(m.content)}" for m in history]) |
| 137 | + return self.prompt_template.format(history=formatted_history) |
| 138 | + |
| 139 | + def _summarize_history(self, history: list[Message], config: RunnableConfig, **kwargs) -> str: |
| 140 | + prompt_content = self._build_prompt(history) |
| 141 | + |
| 142 | + result = self.llm.run( |
| 143 | + input_data={}, |
| 144 | + prompt=Prompt(messages=[Message(role="user", content=prompt_content, static=True)]), |
| 145 | + config=config, |
| 146 | + **(kwargs | {"parent_run_id": kwargs.get("run_id"), "run_depends": []}), |
| 147 | + ) |
| 148 | + |
| 149 | + self._run_depends = [NodeDependency(node=self.llm).to_dict(for_tracing=True)] |
| 150 | + |
| 151 | + if result.status != RunnableStatus.SUCCESS: |
| 152 | + raise ValueError("LLM execution failed during context compression") |
| 153 | + |
| 154 | + return result.output.get("content", "").strip() |
| 155 | + |
| 156 | + def execute( |
| 157 | + self, input_data: ContextManagerInputSchema, config: RunnableConfig | None = None, **kwargs |
| 158 | + ) -> dict[str, Any]: |
| 159 | + """ |
| 160 | + Summarize the provided history and emit an instruction to replace prior messages with the summary. |
| 161 | +
|
| 162 | + Returns: |
| 163 | + dict[str, Any]: |
| 164 | + - content: human-readable status message |
| 165 | + - summary: the compressed summary text |
| 166 | + - keep_last_n: advisory hint for UI/agent to keep last N messages |
| 167 | + - replacement_message: suggested system message to insert as new context root |
| 168 | + - instructions_for_agent: explicit instructions for applying the change |
| 169 | + """ |
| 170 | + config = ensure_config(config) |
| 171 | + self.reset_run_state() |
| 172 | + self.run_on_node_execute_run(config.callbacks, **kwargs) |
| 173 | + |
| 174 | + summary = "" |
| 175 | + |
| 176 | + if input_data.is_history_preserved: |
| 177 | + summary = self._summarize_history(input_data.history, config, **kwargs) |
| 178 | + summary = f"\nContext compressed; Summary:\n {summary}" |
| 179 | + |
| 180 | + if input_data.notes: |
| 181 | + summary = f"Notes: {input_data.notes}\n\n{summary}" |
| 182 | + |
| 183 | + logger.debug(f"Tool {self.name} - {self.id}: context compression completed, summary length: {len(summary)}") |
| 184 | + |
| 185 | + return {"content": summary} |
| 186 | + |
| 187 | + |
| 188 | +def _apply_context_manager_tool_effect(prompt: Prompt, tool_result: Any, history_offset: int) -> None: |
| 189 | + """Apply context cleaning effect after ContextManagerTool call. |
| 190 | +
|
| 191 | + Keeps default prefix (up to history_offset), replaces the rest with a copy of the last prefix message, |
| 192 | + and appends an observation with the tool_result summary. |
| 193 | + """ |
| 194 | + |
| 195 | + try: |
| 196 | + new_messages = prompt.messages[:history_offset] |
| 197 | + if new_messages: |
| 198 | + new_messages.append(prompt.messages[-1].copy()) |
| 199 | + prompt.messages = new_messages |
| 200 | + |
| 201 | + except Exception as e: |
| 202 | + logger.error(f"Error applying context manager tool effect: {e}") |
0 commit comments