From 408b52abec2ed9cad31b1918b35a6770614936c0 Mon Sep 17 00:00:00 2001 From: Merve Noyan Date: Fri, 24 Jan 2025 17:01:35 +0100 Subject: [PATCH] Add VLM support (#220) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * vlm initial commit * transformers integration for vlms * Add webbrowser example and make it work 🥳🥳 * Refactor image support * Allow modifying agent attributes in callback * Improve vlm browser example * time.sleep(0.5) before screenshot to let js animations happen * test to validate internal workflow for passing images * Update test_agents.py * Improve error logging * Switch to OpenAIServerModel * Improve the example * Format * add docs about steps, callbacks & co * Add precisions in doc * Improve browser * Tiny prompting update * Fix style * fix/add test * refactor * Fix write_inner_memory_from_logs for OpenAI format * Add back summary mode * Make it work with TransformersModel * Fix test * Fix loop * Fix quality * Fix mutable default argument * Rename tool_response_message to error_message and append it * Working browser with firefox * Use flatten_messages_as_text passed to TransformersModel * Fix quality * Document flatten_messages_as_text in docstring * Working ctrl + f in browser * Make style * Fix summary_mode type hint and add to docstring * Move image functions to tools * Update docstrings * Fix type hint * Fix typo * Fix type hints * Make callback call compatible with old single-argument functions * Revert update_metrics to have a single arg * Pass keyword args instead of args to callback * Update webbrowser * fix for single message case where final message list is empty * forgot debugger lol * accommodate VLM-like chat template and fix tests * Improve example wording * Style fixes * clarify naming and fix tests * test fix * Fix style * Add bm25 to fix one of the doc tests * fix mocking in VL test * fix bug in fallback * add transformers model * remove chrome dir from helium * Update Transformers example with flatten_messages_as_text * Add doc for flatten_messages_as_text * Fix merge error --------- Co-authored-by: Merve Noyan Co-authored-by: Aymeric Co-authored-by: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> --- docs/source/en/conceptual_guides/react.md | 40 +++- examples/vlm_web_browser.py | 222 ++++++++++++++++++++++ pyproject.toml | 3 +- src/smolagents/agents.py | 220 ++++++++++++++------- src/smolagents/local_python_executor.py | 20 +- src/smolagents/models.py | 114 ++++++++--- src/smolagents/monitoring.py | 5 + src/smolagents/tools.py | 1 - src/smolagents/utils.py | 14 +- tests/test_agents.py | 68 ++++++- tests/test_models.py | 27 ++- 11 files changed, 613 insertions(+), 121 deletions(-) create mode 100644 examples/vlm_web_browser.py diff --git a/docs/source/en/conceptual_guides/react.md b/docs/source/en/conceptual_guides/react.md index d85c9cad3..417fb8590 100644 --- a/docs/source/en/conceptual_guides/react.md +++ b/docs/source/en/conceptual_guides/react.md @@ -19,10 +19,33 @@ The ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629 The name is based on the concatenation of two words, "Reason" and "Act." Indeed, agents following this architecture will solve their task in as many steps as needed, each step consisting of a Reasoning step, then an Action step where it formulates tool calls that will bring it closer to solving the task at hand. -React process involves keeping a memory of past steps. +All agents in `smolagents` are based on singular `MultiStepAgent` class, which is an abstraction of ReAct framework. -> [!TIP] -> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about multi-step agents. +On a basic level, this class performs actions on a cycle of following steps, where existing variables and knowledge is incorporated into the agent logs like below: + +Initialization: the system prompt is stored in a `SystemPromptStep`, and the user query is logged into a `TaskStep` . + +While loop (ReAct loop): + +- Use `agent.write_inner_memory_from_logs()` to write the agent logs into a list of LLM-readable [chat messages](https://huggingface.co/docs/transformers/en/chat_templating). +- Send these messages to a `Model` object to get its completion. Parse the completion to get the action (a JSON blob for `ToolCallingAgent`, a code snippet for `CodeAgent`). +- Execute the action and logs result into memory (an `ActionStep`). +- At the end of each step, we run all callback functions defined in `agent.step_callbacks` . + +Optionally, when planning is activated, a plan can be periodically revised and stored in a `PlanningStep` . This includes feeding facts about the task at hand to the memory. + +For a `CodeAgent`, it looks like the figure below. + +
+ + +
Here is a video overview of how that works: @@ -39,9 +62,12 @@ Here is a video overview of how that works: ![Framework of a React Agent](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) -We implement two versions of ToolCallingAgent: -- [`ToolCallingAgent`] generates tool calls as a JSON in its output. -- [`CodeAgent`] is a new type of ToolCallingAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance. +We implement two versions of agents: +- [`CodeAgent`] is the preferred type of agent: it generates its tool calls as blobs of code. +- [`ToolCallingAgent`] generates tool calls as a JSON in its output, as is commonly done in agentic frameworks. We incorporate this option because it can be useful in some narrow cases where you can do fine with only one tool call per step: for instance, for web browsing, you need to wait after each action on the page to monitor how the page changes. + +> [!TIP] +> We also provide an option to run agents in one-shot: just pass `single_step=True` when launching the agent, like `agent.run(your_task, single_step=True)` > [!TIP] -> We also provide an option to run agents in one-shot: just pass `single_step=True` when launching the agent, like `agent.run(your_task, single_step=True)` \ No newline at end of file +> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about multi-step agents. \ No newline at end of file diff --git a/examples/vlm_web_browser.py b/examples/vlm_web_browser.py new file mode 100644 index 000000000..01d50a517 --- /dev/null +++ b/examples/vlm_web_browser.py @@ -0,0 +1,222 @@ +from io import BytesIO +from time import sleep + +import helium +from dotenv import load_dotenv +from PIL import Image +from selenium import webdriver +from selenium.common.exceptions import ElementNotInteractableException, TimeoutException +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +from smolagents import CodeAgent, LiteLLMModel, OpenAIServerModel, TransformersModel, tool # noqa: F401 +from smolagents.agents import ActionStep + + +load_dotenv() +import os + + +# Let's use Qwen-2VL-72B via an inference provider like Fireworks AI + +model = OpenAIServerModel( + api_key=os.getenv("FIREWORKS_API_KEY"), + api_base="https://api.fireworks.ai/inference/v1", + model_id="accounts/fireworks/models/qwen2-vl-72b-instruct", +) + +# You can also use a close model + +# model = LiteLLMModel( +# model_id="gpt-4o", +# api_key=os.getenv("OPENAI_API_KEY"), +# ) + +# locally a good candidate is Qwen2-VL-7B-Instruct +# model = TransformersModel( +# model_id="Qwen/Qwen2-VL-7B-Instruct", +# device_map = "auto", +# flatten_messages_as_text=False +# ) + + +# Prepare callback +def save_screenshot(step_log: ActionStep, agent: CodeAgent) -> None: + sleep(1.0) # Let JavaScript animations happen before taking the screenshot + driver = helium.get_driver() + current_step = step_log.step_number + if driver is not None: + for step_logs in agent.logs: # Remove previous screenshots from logs for lean processing + if isinstance(step_log, ActionStep) and step_log.step_number <= current_step - 2: + step_logs.observations_images = None + png_bytes = driver.get_screenshot_as_png() + image = Image.open(BytesIO(png_bytes)) + print(f"Captured a browser screenshot: {image.size} pixels") + step_log.observations_images = [image.copy()] # Create a copy to ensure it persists, important! + + # Update observations with current URL + url_info = f"Current url: {driver.current_url}" + step_log.observations = url_info if step_logs.observations is None else step_log.observations + "\n" + url_info + return + + +# Initialize driver and agent +chrome_options = webdriver.ChromeOptions() +chrome_options.add_argument("--force-device-scale-factor=1") +chrome_options.add_argument("--window-size=1000,1300") +chrome_options.add_argument("--disable-pdf-viewer") + +driver = helium.start_chrome(headless=False, options=chrome_options) + +# Initialize tools + + +@tool +def search_item_ctrl_f(text: str, nth_result: int = 1) -> str: + """ + Searches for text on the current page via Ctrl + F and jumps to the nth occurrence. + Args: + text: The text to search for + nth_result: Which occurrence to jump to (default: 1) + """ + elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]") + if nth_result > len(elements): + raise Exception(f"Match n°{nth_result} not found (only {len(elements)} matches found)") + result = f"Found {len(elements)} matches for '{text}'." + elem = elements[nth_result - 1] + driver.execute_script("arguments[0].scrollIntoView(true);", elem) + result += f"Focused on element {nth_result} of {len(elements)}" + return result + + +@tool +def go_back() -> None: + """Goes back to previous page.""" + driver.back() + + +@tool +def close_popups() -> str: + """ + Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows! This does not work on cookie consent banners. + """ + # Common selectors for modal close buttons and overlay elements + modal_selectors = [ + "button[class*='close']", + "[class*='modal']", + "[class*='modal'] button", + "[class*='CloseButton']", + "[aria-label*='close']", + ".modal-close", + ".close-modal", + ".modal .close", + ".modal-backdrop", + ".modal-overlay", + "[class*='overlay']", + ] + + wait = WebDriverWait(driver, timeout=0.5) + + for selector in modal_selectors: + try: + elements = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector))) + + for element in elements: + if element.is_displayed(): + try: + # Try clicking with JavaScript as it's more reliable + driver.execute_script("arguments[0].click();", element) + except ElementNotInteractableException: + # If JavaScript click fails, try regular click + element.click() + + except TimeoutException: + continue + except Exception as e: + print(f"Error handling selector {selector}: {str(e)}") + continue + return "Modals closed" + + +agent = CodeAgent( + tools=[go_back, close_popups, search_item_ctrl_f], + model=model, + additional_authorized_imports=["helium"], + step_callbacks=[save_screenshot], + max_steps=20, + verbosity_level=2, +) + +helium_instructions = """ +You can use helium to access websites. Don't bother about the helium driver, it's already managed. +First you need to import everything from helium, then you can do other actions! +Code: +```py +from helium import * +go_to('github.com/trending') +``` + +You can directly click clickable elements by inputting the text that appears on them. +Code: +```py +click("Top products") +``` + +If it's a link: +Code: +```py +click(Link("Top products")) +``` + +If you try to interact with an element and it's not found, you'll get a LookupError. +In general stop your action after each button click to see what happens on your screenshot. +Never try to login in a page. + +To scroll up or down, use scroll_down or scroll_up with as an argument the number of pixels to scroll from. +Code: +```py +scroll_down(num_pixels=1200) # This will scroll one viewport down +``` + +When you have pop-ups with a cross icon to close, don't try to click the close icon by finding its element or targeting an 'X' element (this most often fails). +Just use your built-in tool `close_popups` to close them: +Code: +```py +close_popups() +``` + +You can use .exists() to check for the existence of an element. For example: +Code: +```py +if Text('Accept cookies?').exists(): + click('I accept') +``` + +Proceed in several steps rather than trying to solve the task in one shot. +And at the end, only when you have your answer, return your final answer. +Code: +```py +final_answer("YOUR_ANSWER_HERE") +``` + +If pages seem stuck on loading, you might have to wait, for instance `import time` and run `time.sleep(5.0)`. But don't overuse this! +To list elements on page, DO NOT try code-based element searches like 'contributors = find_all(S("ol > li"))': just look at the latest screenshot you have and read it visually, or use your tool search_item_ctrl_f. +Of course, you can act on buttons like a user would do when navigating. +After each code blob you write, you will be automatically provided with an updated screenshot of the browser and the current browser url. +But beware that the screenshot will only be taken at the end of the whole action, it won't see intermediate states. +Don't kill the browser. +""" + +# Run the agent! + +github_request = """ +I'm trying to find how hard I have to work to get a repo in github.com/trending. +Can you navigate to the profile for the top author of the top trending repo, and give me their total number of commits over the last year? +""" # The agent is able to achieve this request only when powered by GPT-4o or Claude-3.5-sonnet. + +search_request = """ +Please navigate to https://en.wikipedia.org/wiki/Chicago and give me a sentence containing the word "1992" that mentions a construction accident. +""" + +agent.run(search_request + helium_instructions) diff --git a/pyproject.toml b/pyproject.toml index 76762bf97..860eb6c55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,8 +62,9 @@ all = [ test = [ "ipython>=8.31.0", # for interactive environment tests "pytest>=8.1.0", - "python-dotenv>=1.0.1", # For test_all_docs + "python-dotenv>=1.0.1", # For test_all_docs "smolagents[all]", + "rank-bm25", # For test_all_docs ] dev = [ "smolagents[quality,test]", diff --git a/src/smolagents/agents.py b/src/smolagents/agents.py index d864c36b3..b7111e824 100644 --- a/src/smolagents/agents.py +++ b/src/smolagents/agents.py @@ -14,6 +14,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect import time from collections import deque from dataclasses import dataclass @@ -88,11 +89,12 @@ class ActionStep(AgentStepLog): tool_calls: List[ToolCall] | None = None start_time: float | None = None end_time: float | None = None - step: int | None = None + step_number: int | None = None error: AgentError | None = None duration: float | None = None llm_output: str | None = None observations: str | None = None + observations_images: List[str] | None = None action_output: Any = None @@ -105,6 +107,7 @@ class PlanningStep(AgentStepLog): @dataclass class TaskStep(AgentStepLog): task: str + task_images: List[str] | None = None @dataclass @@ -213,6 +216,8 @@ def __init__( if managed_agents is not None: self.managed_agents = {agent.name: agent for agent in managed_agents} + for tool in tools: + assert isinstance(tool, Tool), f"This element is not of class Tool: {str(tool)}" self.tools = {tool.name: tool for tool in tools} if add_base_tools: for tool_name, tool_class in TOOL_MAPPING.items(): @@ -239,10 +244,13 @@ def initialize_system_prompt(self): return self.system_prompt - def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> List[Dict[str, str]]: + def write_inner_memory_from_logs(self, summary_mode: bool = False) -> List[Dict[str, str]]: """ Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages that can be used as input to the LLM. + + Args: + summary_mode (`bool`): Whether to write a summary of the logs or the full logs. """ memory = [] for i, step_log in enumerate(self.logs): @@ -250,7 +258,7 @@ def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> if not summary_mode: thought_message = { "role": MessageRole.SYSTEM, - "content": step_log.system_prompt.strip(), + "content": [{"type": "text", "text": step_log.system_prompt.strip()}], } memory.append(thought_message) @@ -271,65 +279,82 @@ def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> elif isinstance(step_log, TaskStep): task_message = { "role": MessageRole.USER, - "content": "New task:\n" + step_log.task, + "content": [{"type": "text", "text": f"New task:\n{step_log.task}"}], } + if step_log.task_images: + for image in step_log.task_images: + task_message["content"].append({"type": "image", "image": image}) memory.append(task_message) elif isinstance(step_log, ActionStep): if step_log.llm_output is not None and not summary_mode: thought_message = { "role": MessageRole.ASSISTANT, - "content": step_log.llm_output.strip(), + "content": [{"type": "text", "text": step_log.llm_output.strip()}], } memory.append(thought_message) - if step_log.tool_calls is not None: tool_call_message = { "role": MessageRole.ASSISTANT, - "content": str( - [ - { - "id": tool_call.id, - "type": "function", - "function": { - "name": tool_call.name, - "arguments": tool_call.arguments, - }, - } - for tool_call in step_log.tool_calls - ] - ), + "content": [ + { + "type": "text", + "text": str( + [ + { + "id": tool_call.id, + "type": "function", + "function": { + "name": tool_call.name, + "arguments": tool_call.arguments, + }, + } + for tool_call in step_log.tool_calls + ] + ), + } + ], } memory.append(tool_call_message) - - if step_log.tool_calls is None and step_log.error is not None: - message_content = ( - "Error:\n" - + str(step_log.error) - + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" - ) + if step_log.error is not None: error_message = { "role": MessageRole.ASSISTANT, - "content": message_content, + "content": [ + { + "type": "text", + "text": ( + "Error:\n" + + str(step_log.error) + + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" + ), + } + ], } memory.append(error_message) - if step_log.tool_calls is not None and ( - step_log.error is not None or step_log.observations is not None - ): - if step_log.error is not None: - message_content = ( - "Error:\n" - + str(step_log.error) - + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" - ) - elif step_log.observations is not None: - message_content = f"Observation:\n{step_log.observations}" + if step_log.observations is not None: + if step_log.tool_calls: + tool_call_reference = f"Call id: {(step_log.tool_calls[0].id if getattr(step_log.tool_calls[0], 'id') else 'call_0')}\n" + else: + tool_call_reference = "" + text_observations = f"Observation:\n{step_log.observations}" tool_response_message = { "role": MessageRole.TOOL_RESPONSE, - "content": f"Call id: {(step_log.tool_calls[0].id if getattr(step_log.tool_calls[0], 'id') else 'call_0')}\n" - + message_content, + "content": [{"type": "text", "text": tool_call_reference + text_observations}], } memory.append(tool_response_message) + if step_log.observations_images: + thought_message_image = { + "role": MessageRole.USER, + "content": [{"type": "text", "text": "Here are the observed images:"}] + + [ + { + "type": "image", + "image": image, + } + for image in step_log.observations_images + ], + } + memory.append(thought_message_image) return memory @@ -357,23 +382,56 @@ def extract_action(self, llm_output: str, split_token: str) -> Tuple[str, str]: ) return rationale.strip(), action.strip() - def provide_final_answer(self, task) -> str: + def provide_final_answer(self, task: str, images: Optional[list[str]]) -> str: """ - This method provides a final answer to the task, based on the logs of the agent's interactions. + Provide the final answer to the task, based on the logs of the agent's interactions. + + Args: + task (`str`): Task to perform. + images (`list[str]`, *optional*): Paths to image(s). + + Returns: + `str`: Final answer to the task. """ - self.input_messages = [ - { - "role": MessageRole.SYSTEM, - "content": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", - } - ] - self.input_messages += self.write_inner_memory_from_logs()[1:] - self.input_messages += [ - { - "role": MessageRole.USER, - "content": f"Based on the above, please provide an answer to the following user request:\n{task}", - } - ] + if images: + self.input_messages[0]["content"] = [ + { + "type": "text", + "text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", + } + ] + self.input_messages[0]["content"].append({"type": "image"}) + self.input_messages += self.write_inner_memory_from_logs()[1:] + self.input_messages += [ + { + "role": MessageRole.USER, + "content": [ + { + "type": "text", + "text": f"Based on the above, please provide an answer to the following user request:\n{task}", + } + ], + } + ] + else: + self.input_messages[0]["content"] = [ + { + "type": "text", + "text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", + } + ] + self.input_messages += self.write_inner_memory_from_logs()[1:] + self.input_messages += [ + { + "role": MessageRole.USER, + "content": [ + { + "type": "text", + "text": f"Based on the above, please provide an answer to the following user request:\n{task}", + } + ], + } + ] try: return self.model(self.input_messages).content except Exception as e: @@ -436,16 +494,18 @@ def run( stream: bool = False, reset: bool = True, single_step: bool = False, + images: Optional[List[str]] = None, additional_args: Optional[Dict] = None, ): """ - Runs the agent for the given task. + Run the agent for the given task. Args: - task (`str`): The task to perform. + task (`str`): Task to perform. stream (`bool`): Whether to run in a streaming way. reset (`bool`): Whether to reset the conversation or keep it going from previous run. single_step (`bool`): Whether to run the agent in one-shot fashion. + images (`list[str]`, *optional*): Paths to image(s). additional_args (`dict`): Any other variables that you want to pass to the agent run, for instance images or dataframes. Give them clear names! Example: @@ -455,6 +515,7 @@ def run( agent.run("What is the result of 2 power 3.7384?") ``` """ + self.task = task if additional_args is not None: self.state.update(additional_args) @@ -486,11 +547,10 @@ def run( level=LogLevel.INFO, ) - self.logs.append(TaskStep(task=self.task)) - + self.logs.append(TaskStep(task=self.task, task_images=images)) if single_step: step_start_time = time.time() - step_log = ActionStep(start_time=step_start_time) + step_log = ActionStep(start_time=step_start_time, observations_images=images) step_log.end_time = time.time() step_log.duration = step_log.end_time - step_start_time @@ -500,22 +560,27 @@ def run( if stream: # The steps are returned as they are executed through a generator to iterate on. - return self._run(task=self.task) + return self._run(task=self.task, images=images) # Outputs are returned only at the end as a string. We only look at the last step - return deque(self._run(task=self.task), maxlen=1)[0] + return deque(self._run(task=self.task, images=images), maxlen=1)[0] - def _run(self, task: str) -> Generator[str, None, None]: + def _run(self, task: str, images: List[str] | None = None) -> Generator[str, None, None]: """ - Runs the agent in streaming mode and returns a generator of all the steps. + Run the agent in streaming mode and returns a generator of all the steps. Args: - task (`str`): The task to perform. + task (`str`): Task to perform. + images (`list[str]`): Paths to image(s). """ final_answer = None self.step_number = 0 while final_answer is None and self.step_number < self.max_steps: step_start_time = time.time() - step_log = ActionStep(step=self.step_number, start_time=step_start_time) + step_log = ActionStep( + step_number=self.step_number, + start_time=step_start_time, + observations_images=images, + ) try: if self.planning_interval is not None and self.step_number % self.planning_interval == 0: self.planning_step( @@ -541,21 +606,31 @@ def _run(self, task: str) -> Generator[str, None, None]: step_log.duration = step_log.end_time - step_start_time self.logs.append(step_log) for callback in self.step_callbacks: - callback(step_log) + # For compatibility with old callbacks that don't take the agent as an argument + if len(inspect.signature(callback).parameters) == 1: + callback(step_log) + else: + callback(step_log=step_log, agent=self) self.step_number += 1 yield step_log if final_answer is None and self.step_number == self.max_steps: error_message = "Reached max steps." - final_step_log = ActionStep(error=AgentMaxStepsError(error_message, self.logger)) + final_step_log = ActionStep( + step_number=self.step_number, error=AgentMaxStepsError(error_message, self.logger) + ) self.logs.append(final_step_log) - final_answer = self.provide_final_answer(task) + final_answer = self.provide_final_answer(task, images) self.logger.log(Text(f"Final answer: {final_answer}"), level=LogLevel.INFO) final_step_log.action_output = final_answer final_step_log.end_time = time.time() final_step_log.duration = step_log.end_time - step_start_time for callback in self.step_callbacks: - callback(final_step_log) + # For compatibility with old callbacks that don't take the agent as an argument + if len(inspect.signature(callback).parameters) == 1: + callback(final_step_log) + else: + callback(step_log=final_step_log, agent=self) yield final_step_log yield handle_agent_output_types(final_answer) @@ -565,7 +640,7 @@ def planning_step(self, task, is_first_step: bool, step: int) -> None: Used periodically by the agent to plan the next steps to reach the objective. Args: - task (`str`): The task to perform + task (`str`): Task to perform. is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan. step (`int`): The number of the current step, used as an indication for the LLM. """ @@ -873,7 +948,6 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: # Add new step in logs log_entry.agent_memory = agent_memory.copy() - try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.model( @@ -883,7 +957,7 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: ).content log_entry.llm_output = llm_output except Exception as e: - raise AgentGenerationError(f"Error in generating model output:\n{e}", self.logger) + raise AgentGenerationError(f"Error in generating model output:\n{e}", self.logger) from e self.logger.log( Group( diff --git a/src/smolagents/local_python_executor.py b/src/smolagents/local_python_executor.py index b719ac838..a4f046dca 100644 --- a/src/smolagents/local_python_executor.py +++ b/src/smolagents/local_python_executor.py @@ -1013,11 +1013,21 @@ def check_module_authorized(module_name): return None elif isinstance(expression, ast.ImportFrom): if check_module_authorized(expression.module): - raw_module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) - for alias in expression.names: - state[alias.asname or alias.name] = get_safe_module( - getattr(raw_module, alias.name), dangerous_patterns - ) + module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) + if expression.names[0].name == "*": # Handle "from module import *" + if hasattr(module, "__all__"): # If module has __all__, import only those names + for name in module.__all__: + state[name] = getattr(module, name) + else: # If no __all__, import all public names (those not starting with '_') + for name in dir(module): + if not name.startswith("_"): + state[name] = getattr(module, name) + else: # regular from imports + for alias in expression.names: + if hasattr(module, alias.name): + state[alias.asname or alias.name] = getattr(module, alias.name) + else: + raise InterpreterError(f"Module {expression.module} has no attribute {alias.name}") else: raise InterpreterError(f"Import from {expression.module} is not allowed.") return None diff --git a/src/smolagents/models.py b/src/smolagents/models.py index 9eebf75af..eb613dffc 100644 --- a/src/smolagents/models.py +++ b/src/smolagents/models.py @@ -24,10 +24,16 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from huggingface_hub import InferenceClient -from huggingface_hub.utils import is_torch_available +from PIL import Image +from transformers import ( + AutoModelForImageTextToText, + AutoProcessor, + StoppingCriteriaList, + is_torch_available, +) from .tools import Tool -from .utils import _is_package_available +from .utils import _is_package_available, encode_image_base64, make_image_url if TYPE_CHECKING: @@ -180,31 +186,54 @@ def remove_stop_sequences(content: str, stop_sequences: List[str]) -> str: def get_clean_message_list( message_list: List[Dict[str, str]], role_conversions: Dict[MessageRole, MessageRole] = {}, + convert_images_to_image_urls: bool = False, + flatten_messages_as_text: bool = False, ) -> List[Dict[str, str]]: """ Subsequent messages with the same role will be concatenated to a single message. + output_message_list is a list of messages that will be used to generate the final message that is chat template compatible with transformers LLM chat template. Args: - message_list (`List[Dict[str, str]]`): List of chat messages. + message_list (`list[dict[str, str]]`): List of chat messages. + role_conversions (`dict[MessageRole, MessageRole]`, *optional* ): Mapping to convert roles. + convert_images_to_image_urls (`bool`, default `False`): Whether to convert images to image URLs. + flatten_messages_as_text (`bool`, default `False`): Whether to flatten messages as text. """ - final_message_list = [] + output_message_list = [] message_list = deepcopy(message_list) # Avoid modifying the original list for message in message_list: - # if not set(message.keys()) == {"role", "content"}: - # raise ValueError("Message should contain only 'role' and 'content' keys!") - role = message["role"] if role not in MessageRole.roles(): raise ValueError(f"Incorrect role {role}, only {MessageRole.roles()} are supported for now.") if role in role_conversions: message["role"] = role_conversions[role] - - if len(final_message_list) > 0 and message["role"] == final_message_list[-1]["role"]: - final_message_list[-1]["content"] += "\n=======\n" + message["content"] + # encode images if needed + if isinstance(message["content"], list): + for i, element in enumerate(message["content"]): + if element["type"] == "image": + assert not flatten_messages_as_text, f"Cannot use images with {flatten_messages_as_text=}" + if convert_images_to_image_urls: + message["content"][i] = { + "type": "image_url", + "image_url": {"url": make_image_url(encode_image_base64(element["image"]))}, + } + else: + message["content"][i]["image"] = encode_image_base64(element["image"]) + + if len(output_message_list) > 0 and message["role"] == output_message_list[-1]["role"]: + assert isinstance(message["content"], list), "Error: wrong content:" + str(message["content"]) + if flatten_messages_as_text: + output_message_list[-1]["content"] += message["content"][0]["text"] + else: + output_message_list[-1]["content"] += message["content"] else: - final_message_list.append(message) - return final_message_list + if flatten_messages_as_text: + content = message["content"][0]["text"] + else: + content = message["content"] + output_message_list.append({"role": message["role"], "content": content}) + return output_message_list class Model: @@ -222,6 +251,8 @@ def _prepare_completion_kwargs( grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, custom_role_conversions: Optional[Dict[str, str]] = None, + convert_images_to_image_urls: bool = False, + flatten_messages_as_text: bool = False, **kwargs, ) -> Dict: """ @@ -233,7 +264,12 @@ def _prepare_completion_kwargs( 3. Default values in self.kwargs """ # Clean and standardize the message list - messages = get_clean_message_list(messages, role_conversions=custom_role_conversions or tool_role_conversions) + messages = get_clean_message_list( + messages, + role_conversions=custom_role_conversions or tool_role_conversions, + convert_images_to_image_urls=convert_images_to_image_urls, + flatten_messages_as_text=flatten_messages_as_text, + ) # Use self.kwargs as the base configuration completion_kwargs = { @@ -356,6 +392,7 @@ def __call__( stop_sequences=stop_sequences, grammar=grammar, tools_to_call_from=tools_to_call_from, + convert_images_to_image_urls=True, **kwargs, ) @@ -386,6 +423,11 @@ class TransformersModel(Model): The torch_dtype to initialize your model with. trust_remote_code (bool, default `False`): Some models on the Hub require running remote code: for this model, you would have to set this flag to True. + flatten_messages_as_text (`bool`, default `True`): + Whether to flatten messages as text: this must be sent to False to use VLMs (as opposed to LLMs for which this flag can be ignored). + Caution: this parameter is experimental and will be removed in an upcoming PR as we auto-detect VLMs. + kwargs (dict, *optional*): + Any additional keyword arguments that you want to use in model.generate(), for instance `max_new_tokens` or `device`. **kwargs: Additional keyword arguments to pass to `model.generate()`, for instance `max_new_tokens` or `device`. Raises: @@ -412,6 +454,7 @@ def __init__( device_map: Optional[str] = None, torch_dtype: Optional[str] = None, trust_remote_code: bool = False, + flatten_messages_as_text: bool = True, **kwargs, ): super().__init__(**kwargs) @@ -432,13 +475,19 @@ def __init__( device_map = "cuda" if torch.cuda.is_available() else "cpu" logger.info(f"Using device: {device_map}") try: - self.tokenizer = AutoTokenizer.from_pretrained(model_id) self.model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, ) + self.tokenizer = AutoTokenizer.from_pretrained(model_id) + except ValueError as e: + if "Unrecognized configuration class" in str(e): + self.model = AutoModelForImageTextToText.from_pretrained(model_id, device_map=device_map) + self.processor = AutoProcessor.from_pretrained(model_id) + else: + raise e except Exception as e: logger.warning( f"Failed to load tokenizer and model for {model_id=}: {e}. Loading default tokenizer and model instead from {default_model_id=}." @@ -446,8 +495,9 @@ def __init__( self.model_id = default_model_id self.tokenizer = AutoTokenizer.from_pretrained(default_model_id) self.model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device_map, torch_dtype=torch_dtype) + self.flatten_messages_as_text = flatten_messages_as_text - def make_stopping_criteria(self, stop_sequences: List[str]) -> "StoppingCriteriaList": + def make_stopping_criteria(self, stop_sequences: List[str], tokenizer) -> "StoppingCriteriaList": from transformers import StoppingCriteria, StoppingCriteriaList class StopOnStrings(StoppingCriteria): @@ -466,7 +516,7 @@ def __call__(self, input_ids, scores, **kwargs): return True return False - return StoppingCriteriaList([StopOnStrings(stop_sequences, self.tokenizer)]) + return StoppingCriteriaList([StopOnStrings(stop_sequences, tokenizer)]) def __call__( self, @@ -474,12 +524,15 @@ def __call__( stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, + images: Optional[List[Image.Image]] = None, **kwargs, ) -> ChatMessage: completion_kwargs = self._prepare_completion_kwargs( messages=messages, stop_sequences=stop_sequences, grammar=grammar, + tools_to_call_from=tools_to_call_from, + flatten_messages_as_text=self.flatten_messages_as_text, **kwargs, ) @@ -496,31 +549,46 @@ def __call__( if max_new_tokens: completion_kwargs["max_new_tokens"] = max_new_tokens - if tools_to_call_from is not None: - prompt_tensor = self.tokenizer.apply_chat_template( + if hasattr(self, "processor"): + images = [Image.open(image) for image in images] if images else None + prompt_tensor = self.processor.apply_chat_template( messages, - tools=[get_tool_json_schema(tool) for tool in tools_to_call_from], + tools=[get_tool_json_schema(tool) for tool in tools_to_call_from] if tools_to_call_from else None, return_tensors="pt", + tokenize=True, return_dict=True, - add_generation_prompt=True, + images=images, + add_generation_prompt=True if tools_to_call_from else False, ) else: prompt_tensor = self.tokenizer.apply_chat_template( messages, + tools=[get_tool_json_schema(tool) for tool in tools_to_call_from] if tools_to_call_from else None, return_tensors="pt", return_dict=True, + add_generation_prompt=True if tools_to_call_from else False, ) prompt_tensor = prompt_tensor.to(self.model.device) count_prompt_tokens = prompt_tensor["input_ids"].shape[1] + if stop_sequences: + stopping_criteria = self.make_stopping_criteria( + stop_sequences, tokenizer=self.processor if hasattr(self, "processor") else self.tokenizer + ) + else: + stopping_criteria = None + out = self.model.generate( **prompt_tensor, - stopping_criteria=(self.make_stopping_criteria(stop_sequences) if stop_sequences else None), + stopping_criteria=stopping_criteria, **completion_kwargs, ) generated_tokens = out[0, count_prompt_tokens:] - output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True) + if hasattr(self, "processor"): + output = self.processor.decode(generated_tokens, skip_special_tokens=True) + else: + output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True) self.last_input_token_count = count_prompt_tokens self.last_output_token_count = len(generated_tokens) @@ -601,6 +669,7 @@ def __call__( model=self.model_id, api_base=self.api_base, api_key=self.api_key, + convert_images_to_image_urls=True, **kwargs, ) @@ -673,6 +742,7 @@ def __call__( tools_to_call_from=tools_to_call_from, model=self.model_id, custom_role_conversions=self.custom_role_conversions, + convert_images_to_image_urls=True, **kwargs, ) diff --git a/src/smolagents/monitoring.py b/src/smolagents/monitoring.py index 722f25e26..59f43f443 100644 --- a/src/smolagents/monitoring.py +++ b/src/smolagents/monitoring.py @@ -38,6 +38,11 @@ def reset(self): self.total_output_token_count = 0 def update_metrics(self, step_log): + """Update the metrics of the monitor. + + Args: + step_log ([`AgentStepLog`]): Step log to update the monitor with. + """ step_duration = step_log.duration self.step_durations.append(step_duration) console_outputs = f"[Step {len(self.step_durations) - 1}: Duration {step_duration:.2f} seconds" diff --git a/src/smolagents/tools.py b/src/smolagents/tools.py index b73bc6f77..10b22ea03 100644 --- a/src/smolagents/tools.py +++ b/src/smolagents/tools.py @@ -348,7 +348,6 @@ def push_to_hub( with tempfile.TemporaryDirectory() as work_dir: # Save all files. self.save(work_dir) - print(work_dir) with open(work_dir + "/tool.py", "r") as f: print("\n".join(f.readlines())) logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") diff --git a/src/smolagents/utils.py b/src/smolagents/utils.py index f8e34a03f..8aa631f1a 100644 --- a/src/smolagents/utils.py +++ b/src/smolagents/utils.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import ast +import base64 import importlib.metadata import importlib.util import inspect @@ -24,11 +25,15 @@ import types from enum import IntEnum from functools import lru_cache +from io import BytesIO from typing import Dict, Tuple, Union from rich.console import Console +__all__ = ["AgentError"] + + @lru_cache def _is_package_available(package_name: str) -> bool: try: @@ -383,4 +388,11 @@ def get_source(obj) -> str: raise e from inspect_error -__all__ = ["AgentError"] +def encode_image_base64(image): + buffered = BytesIO() + image.save(buffered, format="PNG") + return base64.b64encode(buffered.getvalue()).decode("utf-8") + + +def make_image_url(base64_image): + return f"data:image/png;base64,{base64_image}" diff --git a/tests/test_agents.py b/tests/test_agents.py index e76d35c79..1dcb5e933 100644 --- a/tests/test_agents.py +++ b/tests/test_agents.py @@ -104,6 +104,40 @@ def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, gramm ) +class FakeToolCallModelVL: + def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None): + if len(messages) < 3: + return ChatMessage( + role="assistant", + content="", + tool_calls=[ + ChatMessageToolCall( + id="call_0", + type="function", + function=ChatMessageToolCallDefinition( + name="fake_image_understanding_tool", + arguments={ + "prompt": "What is in this image?", + "image": "image.png", + }, + ), + ) + ], + ) + else: + return ChatMessage( + role="assistant", + content="", + tool_calls=[ + ChatMessageToolCall( + id="call_1", + type="function", + function=ChatMessageToolCallDefinition(name="final_answer", arguments="The image is a cat."), + ) + ], + ) + + def fake_code_model(messages, stop_sequences=None, grammar=None) -> str: prompt = str(messages) if "special_marker" not in prompt: @@ -139,10 +173,10 @@ def fake_code_model_error(messages, stop_sequences=None) -> str: Thought: I should multiply 2 by 3.6452. special_marker Code: ```py -a = 2 -b = a * 2 -print = 2 -print("Ok, calculation done!") +def error_function(): + raise ValueError("error") + +error_function() ``` """, ) @@ -150,7 +184,7 @@ def fake_code_model_error(messages, stop_sequences=None) -> str: return ChatMessage( role="assistant", content=""" -Thought: I can now answer the initial question +Thought: I faced an error in the previous step. Code: ```py final_answer("got an error") @@ -294,6 +328,25 @@ def fake_image_generation_tool(prompt: str) -> Image.Image: assert isinstance(output, AgentImage) assert isinstance(agent.state["image.png"], Image.Image) + def test_toolcalling_agent_handles_image_inputs(self): + from PIL import Image + + image = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") # dummy input + + @tool + def fake_image_understanding_tool(prompt: str, image: Image.Image) -> str: + """Tool that creates a caption for an image. + + Args: + prompt: The prompt + image: The image + """ + return "The image is a cat." + + agent = ToolCallingAgent(tools=[fake_image_understanding_tool], model=FakeToolCallModelVL()) + output = agent.run("Caption this image.", images=[image]) + assert output == "The image is a cat." + def test_fake_code_agent(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model) output = agent.run("What is 2 multiplied by 3.6452?") @@ -327,12 +380,13 @@ def test_reset_conversations(self): assert output == 7.2904 assert len(agent.logs) == 4 - def test_code_agent_code_errors_show_offending_lines(self): + def test_code_agent_code_errors_show_offending_line_and_error(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_error) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" - assert "Code execution failed at line 'print = 2' due to: InterpreterError" in str(agent.logs) + assert "Code execution failed at line 'error_function()'" in str(agent.logs[2].error) + assert "ValueError" in str(agent.logs) def test_code_agent_syntax_error_show_offending_lines(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_syntax_error) diff --git a/tests/test_models.py b/tests/test_models.py index bad87fdfc..cd3c96f24 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -14,8 +14,11 @@ # limitations under the License. import json import unittest +from pathlib import Path from typing import Optional +from transformers.testing_utils import get_tests_dir + from smolagents import ChatMessage, HfApiModel, TransformersModel, models, tool from smolagents.models import parse_json_if_needed @@ -39,13 +42,13 @@ def get_weather(location: str, celsius: Optional[bool] = False) -> str: ) def test_chatmessage_has_model_dumps_json(self): - message = ChatMessage("user", "Hello!") + message = ChatMessage("user", [{"type": "text", "text": "Hello!"}]) data = json.loads(message.model_dump_json()) - assert data["content"] == "Hello!" + assert data["content"] == [{"type": "text", "text": "Hello!"}] def test_get_hfapi_message_no_tool(self): model = HfApiModel(max_tokens=10) - messages = [{"role": "user", "content": "Hello!"}] + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] model(messages, stop_sequences=["great"]) def test_transformers_message_no_tool(self): @@ -54,11 +57,27 @@ def test_transformers_message_no_tool(self): max_new_tokens=5, device_map="auto", do_sample=False, + flatten_messages_as_text=True, ) - messages = [{"role": "user", "content": "Hello!"}] + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] output = model(messages, stop_sequences=["great"]).content assert output == "assistant\nHello" + def test_transformers_message_vl_no_tool(self): + from PIL import Image + + img = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") + model = TransformersModel( + model_id="llava-hf/llava-interleave-qwen-0.5b-hf", + max_new_tokens=5, + device_map="auto", + do_sample=False, + flatten_messages_as_text=False, + ) + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}, {"type": "image", "image": img}]}] + output = model(messages, stop_sequences=["great"]).content + assert output == "Hello! How can" + def test_parse_json_if_needed(self): args = "abc" parsed_args = parse_json_if_needed(args)