Skip to content

Commit 2a5e19e

Browse files
sternaktdavorrunje
andauthored
Make it easy to use single agent (#699)
* WIP * WIP * WIP * Add test for standalone AssistantAgent * Cleanup notebook * Cleanup notebook * Add browser use to standalone assistant agent nnotebook * Polish notebook demo * Add BrowserUseTool to assistant_agent standalone examle * Add BrowserUseTool to assistant_agent standalone example * Add async test * Remove unnecessary code * polishing * polishing * polishing * polishing --------- Co-authored-by: Davor Runje <[email protected]>
1 parent 3fc4960 commit 2a5e19e

File tree

5 files changed

+351
-2
lines changed

5 files changed

+351
-2
lines changed

autogen/agentchat/conversable_agent.py

+74
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,12 @@
1313
import re
1414
import warnings
1515
from collections import defaultdict
16+
from contextlib import contextmanager
1617
from typing import (
1718
Any,
1819
Callable,
20+
Generator,
21+
Iterable,
1922
Literal,
2023
Optional,
2124
TypeVar,
@@ -2907,6 +2910,77 @@ def get_total_usage(self) -> Union[None, dict[str, int]]:
29072910
else:
29082911
return self.client.total_usage_summary
29092912

2913+
@contextmanager
2914+
def _create_executor(
2915+
self, executor_kwargs: Optional[dict[str, Any]] = None, tools: Optional[Union[Tool, Iterable[Tool]]] = None
2916+
) -> Generator["ConversableAgent", None, None]:
2917+
if executor_kwargs is None:
2918+
executor_kwargs = {}
2919+
if "is_termination_msg" not in executor_kwargs:
2920+
executor_kwargs["is_termination_msg"] = lambda x: (x["content"] is not None) and x["content"].endswith(
2921+
"TERMINATE"
2922+
)
2923+
2924+
executor = ConversableAgent(
2925+
name="executor",
2926+
human_input_mode="NEVER",
2927+
code_execution_config={
2928+
"work_dir": "coding",
2929+
"use_docker": True,
2930+
},
2931+
**executor_kwargs,
2932+
)
2933+
2934+
try:
2935+
tools = [] if tools is None else tools
2936+
tools = [tools] if isinstance(tools, Tool) else tools
2937+
for tool in tools:
2938+
tool.register_for_execution(executor)
2939+
tool.register_for_llm(self)
2940+
yield executor
2941+
finally:
2942+
if tools is not None:
2943+
for tool in tools:
2944+
self.update_tool_signature(tool_sig=tool.tool_schema["function"]["name"], is_remove=True)
2945+
2946+
def run(
2947+
self,
2948+
message: str,
2949+
*,
2950+
clear_history: bool = False,
2951+
executor_kwargs: Optional[dict[str, Any]] = None,
2952+
tools: Optional[Union[Tool, Iterable[Tool]]] = None,
2953+
) -> ChatResult:
2954+
"""Run the agent with the given message.
2955+
2956+
Args:
2957+
message: the message to be processed.
2958+
clear_history: whether to clear the chat history.
2959+
executor_kwargs: the keyword arguments for the executor.
2960+
tools: the tools to be used by the agent.
2961+
"""
2962+
with self._create_executor(executor_kwargs=executor_kwargs, tools=tools) as executor:
2963+
return executor.initiate_chat(self, message=message, clear_history=clear_history).summary
2964+
2965+
async def a_run(
2966+
self,
2967+
message: str,
2968+
*,
2969+
clear_history=False,
2970+
tools: Optional[Union[Tool, Iterable[Tool]]] = None,
2971+
executor_kwargs: Optional[dict[str, Any]] = None,
2972+
) -> ChatResult:
2973+
"""Run the agent with the given message.
2974+
2975+
Args:
2976+
message: the message to be processed.
2977+
clear_history: whether to clear the chat history.
2978+
executor_kwargs: the keyword arguments for the executor.
2979+
tools: the tools to be used by the agent.
2980+
"""
2981+
with self._create_executor(executor_kwargs=executor_kwargs, tools=tools) as executor:
2982+
return (await executor.a_initiate_chat(self, message=message, clear_history=clear_history)).summary
2983+
29102984

29112985
@export_module("autogen")
29122986
def register_function(

autogen/tools/__init__.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
from .dependency_injection import BaseContext, ChatContext, Depends
66
from .function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str
7-
from .tool import Tool
7+
from .tool import Tool, tool
88

99
__all__ = [
1010
"BaseContext",
@@ -14,4 +14,5 @@
1414
"get_function_schema",
1515
"load_basemodels_if_needed",
1616
"serialize_to_str",
17+
"tool",
1718
]

autogen/tools/tool.py

+19-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
if TYPE_CHECKING:
1313
from ..agentchat.conversable_agent import ConversableAgent
1414

15-
__all__ = ["Tool"]
15+
__all__ = ["Tool", "tool"]
1616

1717

1818
@export_module("autogen.tools")
@@ -148,3 +148,21 @@ def realtime_tool_schema(self) -> dict[str, Any]:
148148
schema = {"type": schema["type"], **schema["function"]}
149149

150150
return schema
151+
152+
153+
@export_module("autogen.tools")
154+
def tool(name: Optional[str] = None, description: Optional[str] = None) -> Callable[[Callable[..., Any]], Tool]:
155+
"""Decorator to create a Tool from a function.
156+
157+
Args:
158+
name (str): The name of the tool.
159+
description (str): The description of the tool.
160+
161+
Returns:
162+
Callable[[Callable[..., Any]], Tool]: A decorator that creates a Tool from a function.
163+
"""
164+
165+
def decorator(func: Callable[..., Any]) -> Tool:
166+
return Tool(name=name, description=description, func_or_tool=func)
167+
168+
return decorator
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,216 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# Standalone AssistantAgent\n",
8+
"\n",
9+
"AG2 supports running `AssistantAgent` as a standalone agent with the ability to execute simple tasks without the need for interacting with other agents.\n",
10+
"\n",
11+
"To enable our assistant agent to surf the web, we will use `BrowserUseTool` fow which we need to install the browser-use optional dependency and [playwright](https://playwright.dev/python/docs/intro)\n",
12+
"\n",
13+
"````{=mdx}\n",
14+
":::info Requirements\n",
15+
"Install `ag2`:\n",
16+
"```bash\n",
17+
"pip install ag2[browser-use]\n",
18+
"playwright install\n",
19+
"```\n",
20+
"````\n"
21+
]
22+
},
23+
{
24+
"cell_type": "code",
25+
"execution_count": null,
26+
"metadata": {},
27+
"outputs": [],
28+
"source": [
29+
"import autogen\n",
30+
"from autogen import AssistantAgent\n",
31+
"from autogen.tools.experimental.browser_use.browser_use import BrowserUseTool"
32+
]
33+
},
34+
{
35+
"cell_type": "code",
36+
"execution_count": 2,
37+
"metadata": {},
38+
"outputs": [],
39+
"source": [
40+
"import nest_asyncio\n",
41+
"\n",
42+
"nest_asyncio.apply()"
43+
]
44+
},
45+
{
46+
"cell_type": "markdown",
47+
"metadata": {},
48+
"source": [
49+
"## Set your API Endpoint\n",
50+
"\n",
51+
"The [`config_list_from_json`](https://docs.ag2.ai/reference/autogen/config_list_from_json#config-list-from-json) function loads a list of configurations from an environment variable or a json file."
52+
]
53+
},
54+
{
55+
"cell_type": "code",
56+
"execution_count": 3,
57+
"metadata": {},
58+
"outputs": [],
59+
"source": [
60+
"config_list = autogen.config_list_from_json(\n",
61+
" \"OAI_CONFIG_LIST\",\n",
62+
" filter_dict={\n",
63+
" \"tags\": [\"gpt-4o-mini\"],\n",
64+
" },\n",
65+
")\n",
66+
"\n",
67+
"llm_config = {\n",
68+
" \"timeout\": 600,\n",
69+
" \"config_list\": config_list,\n",
70+
" \"temperature\": 0.8,\n",
71+
"}"
72+
]
73+
},
74+
{
75+
"cell_type": "markdown",
76+
"metadata": {},
77+
"source": [
78+
"# Configure your assistant agent\n",
79+
"\n",
80+
"Here we will configure two assistant agents:\n",
81+
"1. x_assistant, tasked with exploring the trending topics on X (Formally Twitter)\n",
82+
"2. arxiv_researcher, tasked with discovery of papers that allign with the hot topic of the day"
83+
]
84+
},
85+
{
86+
"cell_type": "code",
87+
"execution_count": null,
88+
"metadata": {},
89+
"outputs": [],
90+
"source": [
91+
"x_assistant = AssistantAgent(name=\"x_assistant\", llm_config=llm_config)\n",
92+
"\n",
93+
"arxiv_researcher = AssistantAgent(name=\"arxiv\", llm_config=llm_config)\n",
94+
"\n",
95+
"browser_use_tool = BrowserUseTool(\n",
96+
" llm_config=llm_config,\n",
97+
")"
98+
]
99+
},
100+
{
101+
"cell_type": "markdown",
102+
"metadata": {},
103+
"source": [
104+
"## Running the assistant agents"
105+
]
106+
},
107+
{
108+
"cell_type": "markdown",
109+
"metadata": {},
110+
"source": [
111+
"Lets run our x_assistant to discover the hot topic of the day\n",
112+
"To be able to do this let's give our assistant the capability to browse the web using a `BrowserUseTool`"
113+
]
114+
},
115+
{
116+
"cell_type": "code",
117+
"execution_count": null,
118+
"metadata": {},
119+
"outputs": [],
120+
"source": [
121+
"hot_topic_res = x_assistant.run(\n",
122+
" \"Find out today's hot topic and an influencer who is talking about it on X using a web search\",\n",
123+
" tools=browser_use_tool,\n",
124+
")\n",
125+
"\n",
126+
"print(hot_topic_res)"
127+
]
128+
},
129+
{
130+
"cell_type": "markdown",
131+
"metadata": {},
132+
"source": [
133+
"After discovering the hot topic, lets run the discovery of papers that allign with the topic"
134+
]
135+
},
136+
{
137+
"cell_type": "code",
138+
"execution_count": null,
139+
"metadata": {},
140+
"outputs": [],
141+
"source": [
142+
"paper_abstract = arxiv_researcher.run(\n",
143+
" \"Get the abstract of a relevant paper based on \" + hot_topic_res,\n",
144+
")\n",
145+
"\n",
146+
"print(paper_abstract)"
147+
]
148+
},
149+
{
150+
"cell_type": "markdown",
151+
"metadata": {},
152+
"source": [
153+
"Now, Lets create a twitter post using our x_assistant"
154+
]
155+
},
156+
{
157+
"cell_type": "code",
158+
"execution_count": null,
159+
"metadata": {},
160+
"outputs": [],
161+
"source": [
162+
"# Secneario 1. This task requires x_assistant's past state\n",
163+
"post = x_assistant.run(\n",
164+
" \"Create an X post based on the hot topic and this \" + paper_abstract + \"and mention the influencer\",\n",
165+
")\n",
166+
"\n",
167+
"print(post)"
168+
]
169+
},
170+
{
171+
"cell_type": "markdown",
172+
"metadata": {},
173+
"source": [
174+
"Finally, lets ask our x_assistant who should we follow on twitter"
175+
]
176+
},
177+
{
178+
"cell_type": "code",
179+
"execution_count": null,
180+
"metadata": {},
181+
"outputs": [],
182+
"source": [
183+
"# Scenario 2. Doing another task that does not require history or past state\n",
184+
"\n",
185+
"influencer_choice = x_assistant.run(\n",
186+
" \"Find a influencer I should follow on Twitter by searching the web\",\n",
187+
" clear_history=True,\n",
188+
" tools=browser_use_tool,\n",
189+
")\n",
190+
"\n",
191+
"print(influencer_choice)"
192+
]
193+
}
194+
],
195+
"metadata": {
196+
"kernelspec": {
197+
"display_name": ".venv",
198+
"language": "python",
199+
"name": "python3"
200+
},
201+
"language_info": {
202+
"codemirror_mode": {
203+
"name": "ipython",
204+
"version": 3
205+
},
206+
"file_extension": ".py",
207+
"mimetype": "text/x-python",
208+
"name": "python",
209+
"nbconvert_exporter": "python",
210+
"pygments_lexer": "ipython3",
211+
"version": "3.11.11"
212+
}
213+
},
214+
"nbformat": 4,
215+
"nbformat_minor": 2
216+
}

0 commit comments

Comments
 (0)