From 8d443647b73db8ca7ebb4b01e29801a7b08fd3b6 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 20:52:56 +0100 Subject: [PATCH 01/13] Add safe fixes --- .../contrib/capabilities/tools_capability.py | 4 +- .../agentchat/contrib/rag/document_utils.py | 11 +- autogen/agentchat/contrib/rag/parser_utils.py | 5 +- autogen/agentchat/conversable_agent.py | 105 +++++++++--------- autogen/coding/jupyter/import_utils.py | 1 - autogen/exception_utils.py | 4 +- autogen/import_utils.py | 1 - autogen/oai/anthropic.py | 4 +- autogen/oai/client.py | 3 +- autogen/oai/gemini.py | 4 +- autogen/oai/ollama.py | 4 +- autogen/tools/dependency_injection.py | 1 - autogen/tools/tool.py | 3 +- ...chat_realtime_gemini_swarm_websocket.ipynb | 4 +- .../contrib/rag/test_parser_utils.py | 9 +- test/conftest.py | 9 +- test/oai/test_client.py | 6 +- test/website/test_process_notebooks.py | 1 - website/process_api_reference.py | 2 +- website/process_notebooks.py | 1 - 20 files changed, 77 insertions(+), 105 deletions(-) diff --git a/autogen/agentchat/contrib/capabilities/tools_capability.py b/autogen/agentchat/contrib/capabilities/tools_capability.py index 068afff8b2..78e82788b4 100644 --- a/autogen/agentchat/contrib/capabilities/tools_capability.py +++ b/autogen/agentchat/contrib/capabilities/tools_capability.py @@ -17,8 +17,6 @@ def __init__(self, tool_list: list[Tool]): self.tools = [tool for tool in tool_list] def add_to_agent(self, agent: ConversableAgent): - """ - Add tools to the given agent. - """ + """Add tools to the given agent.""" for tool in self.tools: tool.register_tool(agent=agent) diff --git a/autogen/agentchat/contrib/rag/document_utils.py b/autogen/agentchat/contrib/rag/document_utils.py index 7a8e5b5f48..b9b65c75b6 100644 --- a/autogen/agentchat/contrib/rag/document_utils.py +++ b/autogen/agentchat/contrib/rag/document_utils.py @@ -18,8 +18,7 @@ def is_url(url: str) -> bool: - """ - Check if the string is a valid URL. + """Check if the string is a valid URL. It checks whether the URL has a valid scheme and network location. """ @@ -37,8 +36,7 @@ def is_url(url: str) -> bool: @require_optional_import(["selenium", "webdriver_manager"], "rag") def _download_rendered_html(url: str) -> str: - """ - Downloads a rendered HTML page of a given URL using headless ChromeDriver. + """Downloads a rendered HTML page of a given URL using headless ChromeDriver. Args: url (str): URL of the page to download. @@ -82,7 +80,7 @@ def download_url(url: Any, output_dir: Optional[Union[str, Path]] = None) -> Pat filename = url_path.name or "downloaded_content.html" if len(filename) < 5 or filename[-5:] != ".html": filename += ".html" - output_dir = Path(output_dir) if output_dir else Path(".") + output_dir = Path(output_dir) if output_dir else Path() filepath = output_dir / filename with filepath.open("w", encoding="utf-8") as f: f.write(rendered_html) @@ -91,8 +89,7 @@ def download_url(url: Any, output_dir: Optional[Union[str, Path]] = None) -> Pat def list_files(directory: Union[Path, str]) -> list[Path]: - """ - Recursively list all files in a directory. + """Recursively list all files in a directory. This function will raise an exception if the directory does not exist. """ diff --git a/autogen/agentchat/contrib/rag/parser_utils.py b/autogen/agentchat/contrib/rag/parser_utils.py index 71fa2aefde..779ccd7dc8 100644 --- a/autogen/agentchat/contrib/rag/parser_utils.py +++ b/autogen/agentchat/contrib/rag/parser_utils.py @@ -27,8 +27,7 @@ def docling_parse_docs( # type: ignore[no-any-unimported] input_file_path: Union[Path, str], output_dir_path: Union[Path, str], ) -> list["ConversionResult"]: - """ - Convert documents into a Deep Search document format using EasyOCR + """Convert documents into a Deep Search document format using EasyOCR with CPU only, and export the document and its tables to the specified output directory. @@ -84,7 +83,7 @@ def docling_parse_docs( # type: ignore[no-any-unimported] for res in conv_results: out_path = Path(output_dir_path) doc_filename = res.input.file.stem - _log.info(f"Document {res.input.file.name} converted.\nSaved markdown output to: {str(out_path)}") + _log.info(f"Document {res.input.file.name} converted.\nSaved markdown output to: {out_path!s}") _log.debug(res.document._export_to_indented_text(max_text_len=16)) # Export Docling document format to markdowndoc: with (out_path / f"{doc_filename}.md").open("w") as fp: diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 2c99f9a1a7..0a6c33adae 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -104,59 +104,58 @@ def __init__( silent: Optional[bool] = None, context_variables: Optional[dict[str, Any]] = None, ): - """ - Args: - name (str): name of the agent. - system_message (str or list): system message for the ChatCompletion inference. - is_termination_msg (function): a function that takes a message in the form of a dictionary - and returns a boolean value indicating if this received message is a termination message. - The dict can contain the following keys: "content", "role", "name", "function_call". - max_consecutive_auto_reply (int): the maximum number of consecutive auto replies. - default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case). - When set to 0, no auto reply will be generated. - human_input_mode (str): whether to ask for human inputs every time a message is received. - Possible values are "ALWAYS", "TERMINATE", "NEVER". - (1) When "ALWAYS", the agent prompts for human input every time a message is received. - Under this mode, the conversation stops when the human input is "exit", - or when is_termination_msg is True and there is no human input. - (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or - the number of auto reply reaches the max_consecutive_auto_reply. - (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops - when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True. - function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls. - code_execution_config (dict or False): config for the code execution. - To disable code execution, set to False. Otherwise, set to a dictionary with the following keys: - - work_dir (Optional, str): The working directory for the code execution. - If None, a default working directory will be used. - The default working directory is the "extensions" directory under - "path_to_autogen". - - use_docker (Optional, list, str or bool): The docker image to use for code execution. - Default is True, which means the code will be executed in a docker container. A default list of images will be used. - If a list or a str of image name(s) is provided, the code will be executed in a docker container - with the first image successfully pulled. - If False, the code will be executed in the current environment. - We strongly recommend using docker for code execution. - - timeout (Optional, int): The maximum execution time in seconds. - - last_n_messages (Experimental, int or str): The number of messages to look back for code execution. - If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto) - llm_config (dict or False or None): llm inference configuration. - Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) - for available options. - When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`. - To disable llm-based auto reply, set to False. - When set to None, will use self.DEFAULT_CONFIG, which defaults to False. - default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated. - description (str): a short description of the agent. This description is used by other agents - (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) - chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents. - Can be used to give the agent a memory by providing the chat history. This will allow the agent to - resume previous had conversations. Defaults to an empty chat history. - silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of - silent in each function. - context_variables (dict or None): Context variables that provide a persistent context for the agent. - Note: Will maintain a reference to the passed in context variables (enabling a shared context) - Only used in Swarms at this stage: - https://docs.ag2.ai/docs/reference/agentchat/contrib/swarm_agent + """Args: + name (str): name of the agent. + system_message (str or list): system message for the ChatCompletion inference. + is_termination_msg (function): a function that takes a message in the form of a dictionary + and returns a boolean value indicating if this received message is a termination message. + The dict can contain the following keys: "content", "role", "name", "function_call". + max_consecutive_auto_reply (int): the maximum number of consecutive auto replies. + default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case). + When set to 0, no auto reply will be generated. + human_input_mode (str): whether to ask for human inputs every time a message is received. + Possible values are "ALWAYS", "TERMINATE", "NEVER". + (1) When "ALWAYS", the agent prompts for human input every time a message is received. + Under this mode, the conversation stops when the human input is "exit", + or when is_termination_msg is True and there is no human input. + (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or + the number of auto reply reaches the max_consecutive_auto_reply. + (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops + when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True. + function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls. + code_execution_config (dict or False): config for the code execution. + To disable code execution, set to False. Otherwise, set to a dictionary with the following keys: + - work_dir (Optional, str): The working directory for the code execution. + If None, a default working directory will be used. + The default working directory is the "extensions" directory under + "path_to_autogen". + - use_docker (Optional, list, str or bool): The docker image to use for code execution. + Default is True, which means the code will be executed in a docker container. A default list of images will be used. + If a list or a str of image name(s) is provided, the code will be executed in a docker container + with the first image successfully pulled. + If False, the code will be executed in the current environment. + We strongly recommend using docker for code execution. + - timeout (Optional, int): The maximum execution time in seconds. + - last_n_messages (Experimental, int or str): The number of messages to look back for code execution. + If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto) + llm_config (dict or False or None): llm inference configuration. + Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) + for available options. + When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`. + To disable llm-based auto reply, set to False. + When set to None, will use self.DEFAULT_CONFIG, which defaults to False. + default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated. + description (str): a short description of the agent. This description is used by other agents + (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) + chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents. + Can be used to give the agent a memory by providing the chat history. This will allow the agent to + resume previous had conversations. Defaults to an empty chat history. + silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of + silent in each function. + context_variables (dict or None): Context variables that provide a persistent context for the agent. + Note: Will maintain a reference to the passed in context variables (enabling a shared context) + Only used in Swarms at this stage: + https://docs.ag2.ai/docs/reference/agentchat/contrib/swarm_agent """ # we change code_execution_config below and we have to make sure we don't change the input # in case of UserProxyAgent, without this we could even change the default value {} diff --git a/autogen/coding/jupyter/import_utils.py b/autogen/coding/jupyter/import_utils.py index fe0c835262..56997e4460 100644 --- a/autogen/coding/jupyter/import_utils.py +++ b/autogen/coding/jupyter/import_utils.py @@ -61,7 +61,6 @@ def skip_on_missing_jupyter_kernel_gateway() -> Callable[[T], T]: module: Module name dep_target: Target name for pip installation (e.g. 'test' in pip install ag2[test]) """ - if is_jupyter_kernel_gateway_installed(): def decorator(o: T) -> T: diff --git a/autogen/exception_utils.py b/autogen/exception_utils.py index d490339d64..e75cfaf4b4 100644 --- a/autogen/exception_utils.py +++ b/autogen/exception_utils.py @@ -56,9 +56,7 @@ def __init__(self, message: str = "The provided agents list does not overlap wit class ModelToolNotSupportedError(Exception): - """ - Exception raised when attempting to use tools with models that do not support them. - """ + """Exception raised when attempting to use tools with models that do not support them.""" def __init__( self, diff --git a/autogen/import_utils.py b/autogen/import_utils.py index d6b7746add..a6133cf632 100644 --- a/autogen/import_utils.py +++ b/autogen/import_utils.py @@ -272,7 +272,6 @@ def skip_on_missing_imports(modules: Union[str, Iterable[str]], dep_target: Opti module: Module name dep_target: Target name for pip installation (e.g. 'test' in pip install ag2[test]) """ - missing_modules = get_missing_imports(modules) if not missing_modules: diff --git a/autogen/oai/anthropic.py b/autogen/oai/anthropic.py index ec755c74d0..29ad705a8d 100644 --- a/autogen/oai/anthropic.py +++ b/autogen/oai/anthropic.py @@ -429,9 +429,7 @@ def _extract_json_response(self, response: "Message") -> Any: json_data = json.loads(json_str) return self._response_format.model_validate(json_data) except Exception as e: - raise ValueError( - f"Failed to parse response as valid JSON matching the schema for Structured Output: {str(e)}" - ) + raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") def _format_json_response(response: Any) -> str: diff --git a/autogen/oai/client.py b/autogen/oai/client.py index a814c1c9c8..8c7bba1817 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -524,8 +524,7 @@ def _create_or_parse(*args, **kwargs): return response def _process_reasoning_model_params(self, params) -> None: - """ - Cater for the reasoning model (o1, o3..) parameters + """Cater for the reasoning model (o1, o3..) parameters please refer: https://platform.openai.com/docs/guides/reasoning#limitations """ print(f"{params=}") diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index 9f03f0e65e..a2abc1aaa9 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -563,9 +563,7 @@ def _convert_json_response(self, response: str) -> Any: json_data = json.loads(response) return self._response_format.model_validate(json_data) except Exception as e: - raise ValueError( - f"Failed to parse response as valid JSON matching the schema for Structured Output: {str(e)}" - ) + raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") def _tools_to_gemini_tools(self, tools: list[dict[str, Any]]) -> list["Tool"]: """Create Gemini tools (as typically requires Callables)""" diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index b4aa3dc8bc..5149c1c4d6 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -491,9 +491,7 @@ def _convert_json_response(self, response: str) -> Any: # Parse JSON and validate against the Pydantic model return self._response_format.model_validate_json(response) except Exception as e: - raise ValueError( - f"Failed to parse response as valid JSON matching the schema for Structured Output: {str(e)}" - ) + raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") def _format_json_response(response: Any, original_answer: str) -> str: diff --git a/autogen/tools/dependency_injection.py b/autogen/tools/dependency_injection.py index a9aec1bcb0..0f785cff96 100644 --- a/autogen/tools/dependency_injection.py +++ b/autogen/tools/dependency_injection.py @@ -96,7 +96,6 @@ def get_context_params(func: Callable[..., Any], subclass: Union[type[BaseContex Returns: A list of parameter names that are instances of the specified subclass. """ - sig = inspect.signature(func) return [p.name for p in sig.parameters.values() if _is_context_param(p, subclass=subclass)] diff --git a/autogen/tools/tool.py b/autogen/tools/tool.py index 245b9ce76a..e72f675c0e 100644 --- a/autogen/tools/tool.py +++ b/autogen/tools/tool.py @@ -90,8 +90,7 @@ def register_for_execution(self, agent: "ConversableAgent") -> None: agent.register_for_execution()(self) def register_tool(self, agent: "ConversableAgent") -> None: - """ - Register a tool to be both proposed and executed by an agent. + """Register a tool to be both proposed and executed by an agent. Equivalent to calling both `register_for_llm` and `register_for_execution` with the same agent. diff --git a/notebook/agentchat_realtime_gemini_swarm_websocket.ipynb b/notebook/agentchat_realtime_gemini_swarm_websocket.ipynb index 2659389883..84e2acce7f 100644 --- a/notebook/agentchat_realtime_gemini_swarm_websocket.ipynb +++ b/notebook/agentchat_realtime_gemini_swarm_websocket.ipynb @@ -258,8 +258,8 @@ "\n", "\n", "def triage_instructions(context_variables: dict[str, str]) -> str:\n", - " customer_context = context_variables.get(\"customer_context\", None)\n", - " flight_context = context_variables.get(\"flight_context\", None)\n", + " customer_context = context_variables.get(\"customer_context\")\n", + " flight_context = context_variables.get(\"flight_context\")\n", " return f\"\"\"You are to triage a users request, and call a tool to transfer to the right intent.\n", " Once you are ready to transfer to the right intent, call the tool to transfer to the right intent.\n", " You dont need to know specifics, just the topic of the request.\n", diff --git a/test/agentchat/contrib/rag/test_parser_utils.py b/test/agentchat/contrib/rag/test_parser_utils.py index 425a4a6057..2dc0aa14b6 100644 --- a/test/agentchat/contrib/rag/test_parser_utils.py +++ b/test/agentchat/contrib/rag/test_parser_utils.py @@ -56,8 +56,7 @@ def test_returns_iterator_of_conversion_results(self, tmp_path: Path, mock_conve assert isinstance(results[0], ConversionResult) def test_exports_converted_documents(self, tmp_path: Path, mock_conversion_result: MagicMock) -> None: - """ - Test that the function exports converted documents to the specified output directory. + """Test that the function exports converted documents to the specified output directory. This test ensures that the function saves the converted documents in markdown and json formats to the specified output directory. @@ -93,8 +92,7 @@ def test_exports_converted_documents(self, tmp_path: Path, mock_conversion_resul def test_logs_conversion_time_and_document_conversion_info( self, tmp_path: Path, caplog: LogCaptureFixture, mock_conversion_result: MagicMock ) -> None: - """ - Test that the function logs conversion time and document conversion info. + """Test that the function logs conversion time and document conversion info. This test ensures that the function logs the conversion time and the document conversion information at the INFO level. @@ -117,8 +115,7 @@ def test_logs_conversion_time_and_document_conversion_info( ) def test_handles_invalid_input_file_paths_and_output_directory_paths(self, tmp_path: Path) -> None: - """ - Test that the function handles invalid input file paths and output directory paths. + """Test that the function handles invalid input file paths and output directory paths. This test ensures that the function raises a ValueError when the input file path is invalid and a FileNotFoundError when the output directory path is invalid. diff --git a/test/conftest.py b/test/conftest.py index 150f3b1ebe..fa38491a26 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -40,8 +40,7 @@ def remove_secret(secret: str) -> None: @staticmethod def sanitize_secrets(data: str, x: int = 5) -> str: - """ - Censors substrings of length `x` or greater derived from any secret in the list. + """Censors substrings of length `x` or greater derived from any secret in the list. Args: data (str): The string to be censored. @@ -67,8 +66,7 @@ def mask_match(match: re.Match[str]) -> str: @staticmethod def needs_sanitizing(data: str, x: int = 5) -> bool: - """ - Checks if the string contains any substrings of length `x` or greater derived from any secret in the list. + """Checks if the string contains any substrings of length `x` or greater derived from any secret in the list. Args: data (str): The string to be checked. @@ -133,8 +131,7 @@ def __init__(self, exception: BaseException): def pytest_runtest_makereport(item: Item, call: CallInfo[Any]) -> None: - """ - Hook to customize the exception output. + """Hook to customize the exception output. This is called after each test call. """ if call.excinfo is not None: # This means the test failed diff --git a/test/oai/test_client.py b/test/oai/test_client.py index 5a0def3845..d9a778fff1 100755 --- a/test/oai/test_client.py +++ b/test/oai/test_client.py @@ -26,7 +26,7 @@ with optional_import_block() as result: import openai - from openai import OpenAI # noqa: F401 + from openai import OpenAI if openai.__version__ >= "1.1.0": TOOL_ENABLED = True @@ -441,12 +441,12 @@ def mock_oai_client(self, mock_credentials: Credentials) -> OpenAIClient: @pytest.fixture def o1_mini_client(self, credentials_o1_mini: Credentials) -> Generator[OpenAIWrapper, None, None]: config_list = credentials_o1_mini.config_list - yield OpenAIWrapper(config_list=config_list, cache_seed=42) + return OpenAIWrapper(config_list=config_list, cache_seed=42) @pytest.fixture def o1_client(self, credentials_o1: Credentials) -> Generator[OpenAIWrapper, None, None]: config_list = credentials_o1.config_list - yield OpenAIWrapper(config_list=config_list, cache_seed=42) + return OpenAIWrapper(config_list=config_list, cache_seed=42) def test_reasoning_remove_unsupported_params(self, mock_oai_client: OpenAIClient) -> None: """Test that unsupported parameters are removed with appropriate warnings""" diff --git a/test/website/test_process_notebooks.py b/test/website/test_process_notebooks.py index 5cd6342085..501e3ccce3 100644 --- a/test/website/test_process_notebooks.py +++ b/test/website/test_process_notebooks.py @@ -300,7 +300,6 @@ def test_add_blogs_to_navigation(self) -> None: class TestUpdateNavigation: def setup(self, temp_dir: Path) -> None: """Set up test files in the temporary directory.""" - # Create directories snippets_dir = temp_dir / "snippets" / "data" snippets_dir.mkdir(parents=True, exist_ok=True) diff --git a/website/process_api_reference.py b/website/process_api_reference.py index bf7919b274..35e8a90742 100755 --- a/website/process_api_reference.py +++ b/website/process_api_reference.py @@ -38,7 +38,7 @@ def move_files_excluding_index(api_dir: Path) -> None: def run_pdoc3(api_dir: Path) -> None: """Run pydoc3 to generate the API documentation.""" try: - print(f"Generating API documentation and saving to {str(api_dir)}...") + print(f"Generating API documentation and saving to {api_dir!s}...") subprocess.run( ["pdoc", "--output-dir", str(api_dir), "--template-dir", "mako_templates", "--force", "autogen"], check=True, diff --git a/website/process_notebooks.py b/website/process_notebooks.py index c9cc0afe84..c52276fad7 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -646,7 +646,6 @@ def generate_nav_group(input_dir: Path, group_header: str, prefix: str) -> Dict[ input_dir (Path): Directory to process group_header (str): Group header """ - sorted_dir_files = get_sorted_files(input_dir, prefix) return {"group": group_header, "pages": sorted_dir_files} From 75f0c2c00a406a36e8104f659e775b049d86c1cc Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 21:09:57 +0100 Subject: [PATCH 02/13] Add lint fixes --- .../captainagent/tools/math/modular_inverse_sum.py | 7 ++++--- autogen/agentchat/contrib/society_of_mind_agent.py | 5 ++--- autogen/agentchat/contrib/swarm_agent.py | 2 +- autogen/browser_utils.py | 5 ++--- autogen/import_utils.py | 6 ++---- autogen/oai/client.py | 4 ++-- autogen/token_count_utils.py | 2 +- notebook/agenteval_cq_math.ipynb | 11 +++++------ pyproject.toml | 2 +- test/agentchat/contrib/vectordb/test_mongodb.py | 6 ++---- test/test_retrieve_utils.py | 5 ++--- website/process_notebooks.py | 9 +++------ 12 files changed, 27 insertions(+), 37 deletions(-) diff --git a/autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py b/autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py index 605627baed..b2da73a60d 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py @@ -1,6 +1,9 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 +from contextlib import suppress + + def modular_inverse_sum(expressions, modulus): """Calculates the sum of modular inverses of the given expressions modulo the specified modulus. @@ -15,8 +18,6 @@ def modular_inverse_sum(expressions, modulus): mod_sum = 0 for number in expressions: - try: + with suppress(ValueError): mod_sum += mod_inverse(number, modulus) - except ValueError: - pass # If modular inverse does not exist, skip the term return mod_sum % modulus diff --git a/autogen/agentchat/contrib/society_of_mind_agent.py b/autogen/agentchat/contrib/society_of_mind_agent.py index 0f409ceff9..aeb51ecf4a 100644 --- a/autogen/agentchat/contrib/society_of_mind_agent.py +++ b/autogen/agentchat/contrib/society_of_mind_agent.py @@ -7,6 +7,7 @@ # ruff: noqa: E722 import copy import traceback +from contextlib import suppress from typing import Callable, Literal, Optional, Union from ... import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper @@ -112,12 +113,10 @@ def _llm_response_preparer(self, prompt, messages): del message["tool_responses"] if "function_call" in message: if message["content"] == "": - try: + with suppress(KeyError): message["content"] = ( message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")" ) - except KeyError: - pass del message["function_call"] # Add the modified message to the transcript diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index 7139367bc0..7d671fc310 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -72,7 +72,7 @@ class ON_CONDITION: # noqa: N801 def __post_init__(self): # Ensure valid types if self.target is not None: - assert isinstance(self.target, SwarmAgent) or isinstance(self.target, dict), ( + assert isinstance(self.target, (SwarmAgent, dict)), ( "'target' must be a SwarmAgent or a Dict" ) diff --git a/autogen/browser_utils.py b/autogen/browser_utils.py index 6e0648e2f1..224133eb90 100644 --- a/autogen/browser_utils.py +++ b/autogen/browser_utils.py @@ -9,6 +9,7 @@ import os import re import uuid +from contextlib import suppress from typing import Any, Optional, Union from urllib.parse import urljoin, urlparse @@ -278,10 +279,8 @@ def _fetch_page(self, url: str) -> None: elif self.downloads_folder is not None: # Try producing a safe filename fname = None - try: + with suppress(NameError): fname = pathvalidate.sanitize_filename(os.path.basename(urlparse(url).path)).strip() - except NameError: - pass # No suitable name, so make one if fname is None: diff --git a/autogen/import_utils.py b/autogen/import_utils.py index a6133cf632..83844d8255 100644 --- a/autogen/import_utils.py +++ b/autogen/import_utils.py @@ -5,7 +5,7 @@ import inspect import sys from abc import ABC, abstractmethod -from contextlib import contextmanager +from contextlib import contextmanager, suppress from functools import wraps from logging import getLogger from typing import Any, Callable, Generator, Generic, Iterable, Optional, Type, TypeVar, Union @@ -228,10 +228,8 @@ def patch(self) -> Type[Any]: patched = patch_object( member, missing_modules=self.missing_modules, dep_target=self.dep_target, fail_if_not_patchable=False ) - try: + with suppress(AttributeError): setattr(self.o, name, patched) - except AttributeError: - pass return self.o diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 8c7bba1817..7063ead302 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -943,7 +943,7 @@ def yes_or_no_filter(context, response): price = extra_kwargs.get("price", None) if isinstance(price, list): price = tuple(price) - elif isinstance(price, float) or isinstance(price, int): + elif isinstance(price, (float, int)): logger.warning( "Input price is a float/int. Using the same price for prompt and completion tokens. Use a list/tuple if prompt and completion token prices are different." ) @@ -1131,7 +1131,7 @@ def _update_dict_from_chunk(chunk: BaseModel, d: dict[str, Any], field: str) -> assert isinstance(d, dict), d if hasattr(chunk, field) and getattr(chunk, field) is not None: new_value = getattr(chunk, field) - if isinstance(new_value, list) or isinstance(new_value, dict): + if isinstance(new_value, (list, dict)): raise NotImplementedError( f"Field {field} is a list or dict, which is currently not supported. " "Only string and numbers are supported." diff --git a/autogen/token_count_utils.py b/autogen/token_count_utils.py index 8ba5534d9e..55724ac20e 100644 --- a/autogen/token_count_utils.py +++ b/autogen/token_count_utils.py @@ -95,7 +95,7 @@ def count_token(input: Union[str, list, dict], model: str = "gpt-3.5-turbo-0613" """ if isinstance(input, str): return _num_token_from_text(input, model=model) - elif isinstance(input, list) or isinstance(input, dict): + elif isinstance(input, (list, dict)): return _num_token_from_messages(input, model=model) else: raise ValueError(f"input must be str, list or dict, but we got {type(input)}") diff --git a/notebook/agenteval_cq_math.ipynb b/notebook/agenteval_cq_math.ipynb index b715fd1d70..869f7e7a44 100644 --- a/notebook/agenteval_cq_math.ipynb +++ b/notebook/agenteval_cq_math.ipynb @@ -132,6 +132,7 @@ "source": [ "import json\n", "import os\n", + "from contextlib import suppress\n", "from pathlib import Path\n", "\n", "import matplotlib.pyplot as plt\n", @@ -2665,10 +2666,10 @@ ], "source": [ "# computing average and 95% interval for failed and successful cases on all criteria\n", - "try:\n", + "\n", + "\n", + "with suppress(Exception):\n", " criteria = Criterion.parse_json_str(open(criteria_file, \"r\").read())\n", - "except: # noqa: E722\n", - " pass\n", "\n", "\n", "nl2int = {}\n", @@ -2689,14 +2690,12 @@ " task = {\"s\": [], \"f\": []}\n", "\n", " for game in outcome:\n", - " try:\n", + " with suppress(Exception):\n", " tmp_dic = eval(outcome[game][\"estimated_performance\"])\n", " if outcome[game][\"actual_success\"] == \"false\":\n", " task[\"f\"].append(nl2int[tmp_dic[criterion.name]])\n", " else:\n", " task[\"s\"].append(nl2int[tmp_dic[criterion.name]])\n", - " except: # noqa: E722\n", - " pass\n", "\n", " average_f[criterion.name] = np.mean(task[\"f\"])\n", " average_s[criterion.name] = np.mean(task[\"s\"])\n", diff --git a/pyproject.toml b/pyproject.toml index fafab7bb25..e8c8dfc9ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 -# "SIM", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM102", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/contrib/vectordb/test_mongodb.py b/test/agentchat/contrib/vectordb/test_mongodb.py index 9464392a19..1cebb0cbf8 100644 --- a/test/agentchat/contrib/vectordb/test_mongodb.py +++ b/test/agentchat/contrib/vectordb/test_mongodb.py @@ -7,6 +7,7 @@ import logging import os import random +from contextlib import suppress from time import monotonic, sleep import pytest @@ -58,11 +59,8 @@ def _delete_search_indexes(collection: "Collection", wait=True): collection (pymongo.Collection): MongoDB Collection Abstraction """ for index in collection.list_search_indexes(): - try: + with suppress(OperationFailure): collection.drop_search_index(index["name"]) - except OperationFailure: - # Delete already issued - pass if wait: _wait_for_predicate(lambda: not list(collection.list_search_indexes()), "Not all collections deleted") diff --git a/test/test_retrieve_utils.py b/test/test_retrieve_utils.py index 1f82538809..7b3da072b3 100755 --- a/test/test_retrieve_utils.py +++ b/test/test_retrieve_utils.py @@ -9,6 +9,7 @@ """Unit test for retrieve_utils.py""" import os +from contextlib import suppress import pytest @@ -146,10 +147,8 @@ def create_lancedb(): {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] - try: + with suppress(OSError): db.create_table("my_table", data) - except OSError: - pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( diff --git a/website/process_notebooks.py b/website/process_notebooks.py index c52276fad7..f6004ebfbf 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -130,8 +130,7 @@ def skip_reason_or_none_if_ok(notebook: Path) -> Union[str, None, dict[str, Any] return "description is not in front matter" # Make sure tags is a list of strings - if front_matter["tags"] is not None: - if not all([isinstance(tag, str) for tag in front_matter["tags"]]): + if front_matter["tags"] is not None and not all([isinstance(tag, str) for tag in front_matter["tags"]]): return "tags must be a list of strings" # Make sure description is a string @@ -185,8 +184,7 @@ def process_notebook(src_notebook: Path, website_dir: Path, notebook_dir: Path, intermediate_notebook = dest_dir / relative_notebook # If the intermediate_notebook already exists, check if it is newer than the source file - if target_file.exists(): - if target_file.stat().st_mtime > src_notebook.stat().st_mtime: + if target_file.exists() and target_file.stat().st_mtime > src_notebook.stat().st_mtime: return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") if dry_run: @@ -215,8 +213,7 @@ def process_notebook(src_notebook: Path, website_dir: Path, notebook_dir: Path, target_file = src_notebook.with_suffix(".mdx") # If the intermediate_notebook already exists, check if it is newer than the source file - if target_file.exists(): - if target_file.stat().st_mtime > src_notebook.stat().st_mtime: + if target_file.exists() and target_file.stat().st_mtime > src_notebook.stat().st_mtime: return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") if dry_run: From 340b9c04e3608448ef1694b94c012dd4aed12d31 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 21:14:49 +0100 Subject: [PATCH 03/13] wip --- autogen/agentchat/assistant_agent.py | 5 ++-- .../graph_rag/neo4j_graph_rag_capability.py | 5 ++-- .../contrib/llamaindex_conversable_agent.py | 5 ++-- .../contrib/society_of_mind_agent.py | 13 +++++---- autogen/agentchat/contrib/swarm_agent.py | 5 ++-- .../agentchat/contrib/vectordb/chromadb.py | 5 ++-- autogen/agentchat/groupchat.py | 23 +++++++--------- .../jupyter/embedded_ipython_code_executor.py | 5 ++-- autogen/coding/utils.py | 5 ++-- autogen/math_utils.py | 5 ++-- autogen/oai/client_utils.py | 5 ++-- autogen/oai/ollama.py | 27 +++++++++---------- notebook/agentchat_transform_messages.ipynb | 5 ++-- 13 files changed, 49 insertions(+), 64 deletions(-) diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index dade51c76d..9aa89b955b 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -78,6 +78,5 @@ def __init__( # Update the provided description if None, and we are using the default system_message, # then use the default description. - if description is None: - if system_message == self.DEFAULT_SYSTEM_MESSAGE: - self.description = self.DEFAULT_DESCRIPTION + if description is None and system_message == self.DEFAULT_SYSTEM_MESSAGE: + self.description = self.DEFAULT_DESCRIPTION diff --git a/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py b/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py index 50a379b17f..1b7f8312bc 100644 --- a/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +++ b/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py @@ -70,7 +70,6 @@ def _get_last_question(self, message: Union[dict, str]): """Retrieves the last message from the conversation history.""" if isinstance(message, str): return message - if isinstance(message, dict): - if "content" in message: - return message["content"] + if isinstance(message, dict) and "content" in message: + return message["content"] return None diff --git a/autogen/agentchat/contrib/llamaindex_conversable_agent.py b/autogen/agentchat/contrib/llamaindex_conversable_agent.py index b6751e07ac..ff5eee23db 100644 --- a/autogen/agentchat/contrib/llamaindex_conversable_agent.py +++ b/autogen/agentchat/contrib/llamaindex_conversable_agent.py @@ -124,7 +124,6 @@ def _extract_message_and_history( for history_message in history: content = history_message.get("content", "") role = history_message.get("role", "user") - if role: - if role == "user" or role == "assistant": - history_messages.append(ChatMessage(content=content, role=role, additional_kwargs={})) + if role and (role == "user" or role == "assistant"): + history_messages.append(ChatMessage(content=content, role=role, additional_kwargs={})) return message, history_messages diff --git a/autogen/agentchat/contrib/society_of_mind_agent.py b/autogen/agentchat/contrib/society_of_mind_agent.py index aeb51ecf4a..2d33acb96c 100644 --- a/autogen/agentchat/contrib/society_of_mind_agent.py +++ b/autogen/agentchat/contrib/society_of_mind_agent.py @@ -111,13 +111,12 @@ def _llm_response_preparer(self, prompt, messages): del message["tool_calls"] if "tool_responses" in message: del message["tool_responses"] - if "function_call" in message: - if message["content"] == "": - with suppress(KeyError): - message["content"] = ( - message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")" - ) - del message["function_call"] + if "function_call" in message and message["content"] == "": + with suppress(KeyError): + message["content"] = ( + message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")" + ) + del message["function_call"] # Add the modified message to the transcript _messages.append(message) diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index 7d671fc310..fd261b503c 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -134,9 +134,8 @@ def _prepare_swarm_agents( # Ensure all agents in hand-off after-works are in the passed in agents list for agent in agents: - if agent.after_work is not None: - if isinstance(agent.after_work.agent, SwarmAgent): - assert agent.after_work.agent in agents, "Agent in hand-off must be in the agents list" + if agent.after_work is not None and isinstance(agent.after_work.agent, SwarmAgent): + assert agent.after_work.agent in agents, "Agent in hand-off must be in the agents list" tool_execution = SwarmAgent( name=__TOOL_EXECUTOR_NAME__, diff --git a/autogen/agentchat/contrib/vectordb/chromadb.py b/autogen/agentchat/contrib/vectordb/chromadb.py index 03e32764d4..2879a8fc43 100644 --- a/autogen/agentchat/contrib/vectordb/chromadb.py +++ b/autogen/agentchat/contrib/vectordb/chromadb.py @@ -17,9 +17,8 @@ import chromadb.utils.embedding_functions as ef from chromadb.api.models.Collection import Collection -if result.is_successful: - if chromadb.__version__ < "0.4.15": - raise ImportError("Please upgrade chromadb to version 0.4.15 or later.") +if result.is_successful and chromadb.__version__ < "0.4.15": + raise ImportError("Please upgrade chromadb to version 0.4.15 or later.") CHROMADB_MAX_BATCH_SIZE = os.environ.get("CHROMADB_MAX_BATCH_SIZE", 40000) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index c1b2dbb7fe..4f29f30123 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -1500,13 +1500,12 @@ def _valid_resume_messages(self, messages: list[dict]): # Check that all agents in the chat messages exist in the group chat for message in messages: - if message.get("name"): - if ( - not self._groupchat.agent_by_name(message["name"]) - and not message["name"] == self._groupchat.admin_name # ignore group chat's name - and not message["name"] == self.name # ignore group chat manager's name - ): - raise Exception(f"Agent name in message doesn't exist as agent in group chat: {message['name']}") + if message.get("name") and ( + not self._groupchat.agent_by_name(message["name"]) + and not message["name"] == self._groupchat.admin_name # ignore group chat's name + and not message["name"] == self.name # ignore group chat manager's name + ): + raise Exception(f"Agent name in message doesn't exist as agent in group chat: {message['name']}") def _process_resume_termination( self, remove_termination_string: Union[str, Callable[[str], str]], messages: list[dict] @@ -1532,14 +1531,12 @@ def _remove_termination_string(content: str) -> str: else: _remove_termination_string = remove_termination_string - if _remove_termination_string: - if messages[-1].get("content"): - messages[-1]["content"] = _remove_termination_string(messages[-1]["content"]) + if _remove_termination_string and messages[-1].get("content"): + messages[-1]["content"] = _remove_termination_string(messages[-1]["content"]) # Check if the last message meets termination (if it has one) - if self._is_termination_msg: - if self._is_termination_msg(last_message): - logger.warning("WARNING: Last message meets termination criteria and this may terminate the chat.") + if self._is_termination_msg and self._is_termination_msg(last_message): + logger.warning("WARNING: Last message meets termination criteria and this may terminate the chat.") def messages_from_string(self, message_string: str) -> list[dict]: """Reads the saved state of messages in Json format for resume and returns as a messages list diff --git a/autogen/coding/jupyter/embedded_ipython_code_executor.py b/autogen/coding/jupyter/embedded_ipython_code_executor.py index 8ef9ddf7dc..a9154ab793 100644 --- a/autogen/coding/jupyter/embedded_ipython_code_executor.py +++ b/autogen/coding/jupyter/embedded_ipython_code_executor.py @@ -181,7 +181,6 @@ def _process_code(self, code: str) -> str: for i, line in enumerate(lines): # use regex to find lines that start with `! pip install` or `!pip install`. match = re.search(r"^! ?pip install", line) - if match is not None: - if "-qqq" not in line: - lines[i] = line.replace(match.group(0), match.group(0) + " -qqq") + if match is not None and "-qqq" not in line: + lines[i] = line.replace(match.group(0), match.group(0) + " -qqq") return "\n".join(lines) diff --git a/autogen/coding/utils.py b/autogen/coding/utils.py index 122c41861f..40369f384c 100644 --- a/autogen/coding/utils.py +++ b/autogen/coding/utils.py @@ -51,7 +51,6 @@ def silence_pip(code: str, lang: str) -> str: for i, line in enumerate(lines): # use regex to find lines that start with pip install. match = re.search(regex, line) - if match is not None: - if "-qqq" not in line: - lines[i] = line.replace(match.group(0), match.group(0) + " -qqq") + if match is not None and "-qqq" not in line: + lines[i] = line.replace(match.group(0), match.group(0) + " -qqq") return "\n".join(lines) diff --git a/autogen/math_utils.py b/autogen/math_utils.py index 197dfd38df..4ff384eec7 100644 --- a/autogen/math_utils.py +++ b/autogen/math_utils.py @@ -242,9 +242,8 @@ def _strip_string(string: str) -> str: string = "0" + string # to consider: get rid of e.g. "k = " or "q = " at beginning - if len(string.split("=")) == 2: - if len(string.split("=")[0]) <= 2: - string = string.split("=")[1] + if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2: + string = string.split("=")[1] # fix sqrt3 --> sqrt{3} string = _fix_sqrt(string) diff --git a/autogen/oai/client_utils.py b/autogen/oai/client_utils.py index b9f53efab5..7273b5a15a 100644 --- a/autogen/oai/client_utils.py +++ b/autogen/oai/client_utils.py @@ -96,9 +96,8 @@ def validate_parameter( elif allowed_values: # Check if the value matches any allowed values - if not (allow_None and param_value is None): - if param_value not in allowed_values: - warning = f"must be one of these values [{allowed_values}]{', or can be None' if allow_None else ''}" + if not (allow_None and param_value is None) and param_value not in allowed_values: + warning = f"must be one of these values [{allowed_values}]{', or can be None' if allow_None else ''}" # If we failed any checks, warn and set to default value if warning: diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index 5149c1c4d6..daa704b2a5 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -424,20 +424,19 @@ def oai_messages_to_ollama_messages(self, messages: list[dict[str, Any]], tools: ollama_messages[0]["content"] = ollama_messages[0]["content"] + manual_instruction.rstrip() # If we are still in the function calling or evaluating process, append the steps instruction - if not have_tool_calls or tool_result_is_last_msg: - if ollama_messages[0]["role"] == "system": - # NOTE: we require a system message to exist for the manual steps texts - # Append the manual step instructions - content_to_append = ( - self._manual_tool_call_step1 if not have_tool_results else self._manual_tool_call_step2 - ) - - if content_to_append != "": - # Append the relevant tool call instruction to the latest user message - if ollama_messages[-1]["role"] == "user": - ollama_messages[-1]["content"] = ollama_messages[-1]["content"] + content_to_append - else: - ollama_messages.append({"role": "user", "content": content_to_append}) + if (not have_tool_calls or tool_result_is_last_msg) and ollama_messages[0]["role"] == "system": + # NOTE: we require a system message to exist for the manual steps texts + # Append the manual step instructions + content_to_append = ( + self._manual_tool_call_step1 if not have_tool_results else self._manual_tool_call_step2 + ) + + if content_to_append != "": + # Append the relevant tool call instruction to the latest user message + if ollama_messages[-1]["role"] == "user": + ollama_messages[-1]["content"] = ollama_messages[-1]["content"] + content_to_append + else: + ollama_messages.append({"role": "user", "content": content_to_append}) # Convert tool call and tool result messages to normal text messages for Ollama for i, message in enumerate(ollama_messages): diff --git a/notebook/agentchat_transform_messages.ipynb b/notebook/agentchat_transform_messages.ipynb index 5e0147785a..45f3989b78 100644 --- a/notebook/agentchat_transform_messages.ipynb +++ b/notebook/agentchat_transform_messages.ipynb @@ -486,9 +486,8 @@ " count += 1\n", " elif isinstance(message[\"content\"], list):\n", " for item in message[\"content\"]:\n", - " if isinstance(item, dict) and \"text\" in item:\n", - " if \"REDACTED\" in item[\"text\"]:\n", - " count += 1\n", + " if isinstance(item, dict) and \"text\" in item and \"REDACTED\" in item[\"text\"]:\n", + " count += 1\n", " return count" ] }, From 40c91a11a321c9d8405c72c21a4649d31d280b4d Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 21:29:21 +0100 Subject: [PATCH 04/13] Add lint fixes --- .../contrib/capabilities/teachability.py | 2 +- .../contrib/capabilities/transforms.py | 17 +++++++-------- autogen/agentchat/contrib/swarm_agent.py | 4 +--- autogen/oai/client.py | 21 +++++++++++-------- autogen/oai/client_utils.py | 2 +- autogen/oai/completion.py | 5 ++--- autogen/oai/ollama.py | 2 +- website/process_notebooks.py | 6 +++--- 8 files changed, 29 insertions(+), 30 deletions(-) diff --git a/autogen/agentchat/contrib/capabilities/teachability.py b/autogen/agentchat/contrib/capabilities/teachability.py index acb79779d8..5e7c252d30 100644 --- a/autogen/agentchat/contrib/capabilities/teachability.py +++ b/autogen/agentchat/contrib/capabilities/teachability.py @@ -203,7 +203,7 @@ def _retrieve_relevant_memos(self, input_text: str) -> list: input_text, n_results=self.max_num_retrievals, threshold=self.recall_threshold ) - if self.verbosity >= 1: + if self.verbosity >= 1: # noqa: SIM102 # Was anything retrieved? if len(memo_list) == 0: # No. Look at the closest memo. diff --git a/autogen/agentchat/contrib/capabilities/transforms.py b/autogen/agentchat/contrib/capabilities/transforms.py index 88ad4d7018..f8f01867a2 100644 --- a/autogen/agentchat/contrib/capabilities/transforms.py +++ b/autogen/agentchat/contrib/capabilities/transforms.py @@ -96,7 +96,7 @@ def apply_transform(self, messages: list[dict]) -> list[dict]: for i in range(len(messages) - 1, 0, -1): if remaining_count > 1: truncated_messages.insert(1 if self._keep_first_message else 0, messages[i]) - if remaining_count == 1: + if remaining_count == 1: # noqa: SIM102 # If there's only 1 slot left and it's a 'tools' message, ignore it. if messages[i].get("role") != "tool": truncated_messages.insert(1, messages[i]) @@ -287,15 +287,14 @@ def _validate_max_tokens(self, max_tokens: Optional[int] = None) -> Optional[int print(colored(f"Model {self._model} not found in token_count_utils.", "yellow")) allowed_tokens = None - if max_tokens is not None and allowed_tokens is not None: - if max_tokens > allowed_tokens: - print( - colored( - f"Max token was set to {max_tokens}, but {self._model} can only accept {allowed_tokens} tokens. Capping it to {allowed_tokens}.", - "yellow", - ) + if max_tokens is not None and allowed_tokens is not None and max_tokens > allowed_tokens: + print( + colored( + f"Max token was set to {max_tokens}, but {self._model} can only accept {allowed_tokens} tokens. Capping it to {allowed_tokens}.", + "yellow", ) - return allowed_tokens + ) + return allowed_tokens return max_tokens if max_tokens is not None else sys.maxsize diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index fd261b503c..83f02b7e82 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -72,9 +72,7 @@ class ON_CONDITION: # noqa: N801 def __post_init__(self): # Ensure valid types if self.target is not None: - assert isinstance(self.target, (SwarmAgent, dict)), ( - "'target' must be a SwarmAgent or a Dict" - ) + assert isinstance(self.target, (SwarmAgent, dict)), "'target' must be a SwarmAgent or a Dict" # Ensure they have a condition assert isinstance(self.condition, str) and self.condition.strip(), "'condition' must be a non-empty string" diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 7063ead302..8877c2cce4 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -351,15 +351,18 @@ def wrapper(*args: Any, **kwargs: Any): except openai.BadRequestError as e: response_json = e.response.json() # Check if the error message is related to the agent name. If so, raise a ValueError with a more informative message. - if "error" in response_json and "message" in response_json["error"]: - if OpenAIClient._is_agent_name_error_message(response_json["error"]["message"]): - error_message = ( - f"This error typically occurs when the agent name contains invalid characters, such as spaces or special symbols.\n" - "Please ensure that your agent name follows the correct format and doesn't include any unsupported characters.\n" - "Check the agent name and try again.\n" - f"Here is the full BadRequestError from openai:\n{e.message}." - ) - raise ValueError(error_message) + if ( + "error" in response_json + and "message" in response_json["error"] + and OpenAIClient._is_agent_name_error_message(response_json["error"]["message"]) + ): + error_message = ( + f"This error typically occurs when the agent name contains invalid characters, such as spaces or special symbols.\n" + "Please ensure that your agent name follows the correct format and doesn't include any unsupported characters.\n" + "Check the agent name and try again.\n" + f"Here is the full BadRequestError from openai:\n{e.message}." + ) + raise ValueError(error_message) raise e diff --git a/autogen/oai/client_utils.py b/autogen/oai/client_utils.py index 7273b5a15a..fb9b389f51 100644 --- a/autogen/oai/client_utils.py +++ b/autogen/oai/client_utils.py @@ -94,7 +94,7 @@ def validate_parameter( if allow_None: warning += ", or can be None" - elif allowed_values: + elif allowed_values: # noqa: SIM102 # Check if the value matches any allowed values if not (allow_None and param_value is None) and param_value not in allowed_values: warning = f"must be one of these values [{allowed_values}]{', or can be None' if allow_None else ''}" diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index 9b55a6567f..fe0b5694fd 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -889,9 +889,8 @@ def _construct_params(cls, context, config, prompt=None, messages=None, allow_fo messages = config.get("messages") if messages is None else messages # either "prompt" should be in config (for being compatible with non-chat models) # or "messages" should be in config (for tuning chat models only) - if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)): - if messages is None: - raise ValueError("Either prompt or messages should be in config for chat models.") + if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)) and messages is None: + raise ValueError("Either prompt or messages should be in config for chat models.") if prompt is None: params["messages"] = ( [ diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index daa704b2a5..4ab5b8216a 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -335,7 +335,7 @@ def create(self, params: dict) -> ChatCompletion: # Blank the message content response_content = "" - if ollama_finish == "stop": + if ollama_finish == "stop": # noqa: SIM102 # Not a tool call, so let's check if we need to process structured output if self._response_format and response_content: try: diff --git a/website/process_notebooks.py b/website/process_notebooks.py index f6004ebfbf..ac2a847d1d 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -131,7 +131,7 @@ def skip_reason_or_none_if_ok(notebook: Path) -> Union[str, None, dict[str, Any] # Make sure tags is a list of strings if front_matter["tags"] is not None and not all([isinstance(tag, str) for tag in front_matter["tags"]]): - return "tags must be a list of strings" + return "tags must be a list of strings" # Make sure description is a string if not isinstance(front_matter["description"], str): @@ -185,7 +185,7 @@ def process_notebook(src_notebook: Path, website_dir: Path, notebook_dir: Path, # If the intermediate_notebook already exists, check if it is newer than the source file if target_file.exists() and target_file.stat().st_mtime > src_notebook.stat().st_mtime: - return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") + return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") if dry_run: return colored(f"Would process {src_notebook.name}", "green") @@ -214,7 +214,7 @@ def process_notebook(src_notebook: Path, website_dir: Path, notebook_dir: Path, # If the intermediate_notebook already exists, check if it is newer than the source file if target_file.exists() and target_file.stat().st_mtime > src_notebook.stat().st_mtime: - return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") + return fmt_skip(src_notebook, f"target file ({target_file.name}) is newer ☑️") if dry_run: return colored(f"Would process {src_notebook.name}", "green") From 3a0f9179d43ceb27d0ed3d15a47fccb3ceb3ca07 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 21:34:04 +0100 Subject: [PATCH 05/13] Add lint fixes --- autogen/oai/ollama.py | 5 +-- ...tchat_groupchat_finite_state_machine.ipynb | 4 +- notebook/agentchat_guidance.ipynb | 19 ++++------ pyproject.toml | 2 +- test/agentchat/test_nested.py | 38 +++++++------------ 5 files changed, 25 insertions(+), 43 deletions(-) diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index 4ab5b8216a..a1380cfc1a 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -608,7 +608,4 @@ def is_valid_tool_call_item(call_item: dict) -> bool: if "name" not in call_item or not isinstance(call_item["name"], str): return False - if set(call_item.keys()) - {"name", "arguments"}: - return False - - return True + return set(call_item.keys()) - {"name", "arguments"} diff --git a/notebook/agentchat_groupchat_finite_state_machine.ipynb b/notebook/agentchat_groupchat_finite_state_machine.ipynb index 1c70fd01a5..5bfe90c811 100644 --- a/notebook/agentchat_groupchat_finite_state_machine.ipynb +++ b/notebook/agentchat_groupchat_finite_state_machine.ipynb @@ -391,9 +391,7 @@ "\n", "def is_termination_msg(content) -> bool:\n", " have_content = content.get(\"content\", None) is not None\n", - " if have_content and \"TERMINATE\" in content[\"content\"]:\n", - " return True\n", - " return False\n", + " return have_content and \"TERMINATE\" in content[\"content\"]\n", "\n", "\n", "# Terminates the conversation when TERMINATE is detected.\n", diff --git a/notebook/agentchat_guidance.ipynb b/notebook/agentchat_guidance.ipynb index 930f903fa8..10b5581194 100644 --- a/notebook/agentchat_guidance.ipynb +++ b/notebook/agentchat_guidance.ipynb @@ -122,13 +122,10 @@ } ], "source": [ - "def is_valid_code_block(code):\n", + "def is_valid_code_block(code) -> bool:\n", " pattern = r\"```[\\w\\s]*\\n([\\s\\S]*?)\\n```\"\n", " match = re.search(pattern, code)\n", - " if match:\n", - " return True\n", - " else:\n", - " return False\n", + " return match\n", "\n", "\n", "def generate_structured_response(recipient, messages, sender, config):\n", @@ -304,12 +301,12 @@ ], "metadata": { "front_matter": { - "description": "Constrained responses via guidance.", - "tags": [ - "guidance", - "integration", - "JSON" - ] + "description": "Constrained responses via guidance.", + "tags": [ + "guidance", + "integration", + "JSON" + ] }, "kernelspec": { "display_name": "Python 3", diff --git a/pyproject.toml b/pyproject.toml index e8c8dfc9ea..57f24be91b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM102", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM103", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/test_nested.py b/test/agentchat/test_nested.py index 5e08b747d0..7455d7cb6d 100755 --- a/test/agentchat/test_nested.py +++ b/test/agentchat/test_nested.py @@ -157,11 +157,9 @@ def writing_message(recipient, messages, sender, config): def test_sync_nested_chat(): def is_termination(msg): - if (isinstance(msg, str) and msg == "FINAL_RESULT") or ( + return (isinstance(msg, str) and msg == "FINAL_RESULT") or ( isinstance(msg, dict) and msg.get("content") == "FINAL_RESULT" - ): - return True - return False + ) inner_assistant = autogen.AssistantAgent( "Inner-assistant", @@ -195,12 +193,10 @@ def is_termination(msg): @pytest.mark.asyncio async def test_async_nested_chat(): - def is_termination(msg): - if (isinstance(msg, str) and msg == "FINAL_RESULT") or ( + def is_termination(msg) -> bool: + return (isinstance(msg, str) and msg == "FINAL_RESULT") or ( isinstance(msg, dict) and msg.get("content") == "FINAL_RESULT" - ): - return True - return False + ) inner_assistant = autogen.AssistantAgent( "Inner-assistant", @@ -236,12 +232,10 @@ def is_termination(msg): @pytest.mark.asyncio async def test_async_nested_chat_chat_id_validation(): - def is_termination(msg): - if (isinstance(msg, str) and msg == "FINAL_RESULT") or ( + def is_termination(msg) -> bool: + return (isinstance(msg, str) and msg == "FINAL_RESULT") or ( isinstance(msg, dict) and msg.get("content") == "FINAL_RESULT" - ): - return True - return False + ) inner_assistant = autogen.AssistantAgent( "Inner-assistant", @@ -273,12 +267,10 @@ def is_termination(msg): def test_sync_nested_chat_in_group(): - def is_termination(msg): - if (isinstance(msg, str) and msg == "FINAL_RESULT") or ( + def is_termination(msg) -> bool: + return (isinstance(msg, str) and msg == "FINAL_RESULT") or ( isinstance(msg, dict) and msg.get("content") == "FINAL_RESULT" - ): - return True - return False + ) inner_assistant = autogen.AssistantAgent( "Inner-assistant", @@ -320,12 +312,10 @@ def is_termination(msg): @pytest.mark.asyncio async def test_async_nested_chat_in_group(): - def is_termination(msg): - if (isinstance(msg, str) and msg == "FINAL_RESULT") or ( + def is_termination(msg) -> bool: + return (isinstance(msg, str) and msg == "FINAL_RESULT") or ( isinstance(msg, dict) and msg.get("content") == "FINAL_RESULT" - ): - return True - return False + ) inner_assistant = autogen.AssistantAgent( "Inner-assistant", From 8f24b9cc1e9eaa7292b1a51ae8c89247d5683ab2 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 21:58:43 +0100 Subject: [PATCH 06/13] Add lint fixes --- autogen/agentchat/contrib/gpt_assistant_agent.py | 10 ++-------- autogen/agentchat/contrib/vectordb/chromadb.py | 5 +---- autogen/agentchat/contrib/vectordb/pgvectordb.py | 5 +---- autogen/agentchat/conversable_agent.py | 5 +---- autogen/coding/local_commandline_code_executor.py | 5 +---- autogen/interop/pydantic_ai/pydantic_ai.py | 9 +++++---- autogen/logger/file_logger.py | 6 +----- autogen/logger/sqlite_logger.py | 6 +----- autogen/math_utils.py | 5 +---- autogen/oai/bedrock.py | 6 +----- autogen/oai/cohere.py | 5 +---- autogen/oai/gemini.py | 5 +---- autogen/oai/openai_utils.py | 5 +---- autogen/retrieve_utils.py | 5 +---- autogen/runtime_logging.py | 5 +---- notebook/agentchat_nestedchat_optiguide.ipynb | 7 +++---- pyproject.toml | 2 +- test/agentchat/test_groupchat.py | 6 ++---- test/coding/test_embedded_ipython_code_executor.py | 6 ++---- test/test_code_utils.py | 5 +---- 20 files changed, 29 insertions(+), 84 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index fcd6e55e10..180439ff08 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -510,15 +510,9 @@ def _process_assistant_config(self, llm_config, assistant_config): if llm_config is False: raise ValueError("llm_config=False is not supported for GPTAssistantAgent.") - if llm_config is None: - openai_client_cfg = {} - else: - openai_client_cfg = copy.deepcopy(llm_config) + openai_client_cfg = {} if llm_config is None else copy.deepcopy(llm_config) - if assistant_config is None: - openai_assistant_cfg = {} - else: - openai_assistant_cfg = copy.deepcopy(assistant_config) + openai_assistant_cfg = {} if assistant_config is None else copy.deepcopy(assistant_config) # Move the assistant related configurations to assistant_config # It's important to keep forward compatibility diff --git a/autogen/agentchat/contrib/vectordb/chromadb.py b/autogen/agentchat/contrib/vectordb/chromadb.py index 2879a8fc43..bca7a3ca7d 100644 --- a/autogen/agentchat/contrib/vectordb/chromadb.py +++ b/autogen/agentchat/contrib/vectordb/chromadb.py @@ -192,10 +192,7 @@ def insert_docs(self, docs: list[Document], collection_name: str = None, upsert: embeddings = None else: embeddings = [doc.get("embedding") for doc in docs] - if docs[0].get("metadata") is None: - metadatas = None - else: - metadatas = [doc.get("metadata") for doc in docs] + metadatas = None if docs[0].get("metadata") is None else [doc.get("metadata") for doc in docs] self._batch_insert(collection, embeddings, ids, metadatas, documents, upsert) def update_docs(self, docs: list[Document], collection_name: str = None) -> None: diff --git a/autogen/agentchat/contrib/vectordb/pgvectordb.py b/autogen/agentchat/contrib/vectordb/pgvectordb.py index fdcd1e918f..720f600201 100644 --- a/autogen/agentchat/contrib/vectordb/pgvectordb.py +++ b/autogen/agentchat/contrib/vectordb/pgvectordb.py @@ -833,10 +833,7 @@ def insert_docs(self, docs: list[Document], collection_name: str = None, upsert: embeddings = None else: embeddings = [doc.get("embedding") for doc in docs] - if docs[0].get("metadata") is None: - metadatas = None - else: - metadatas = [doc.get("metadata") for doc in docs] + metadatas = None if docs[0].get("metadata") is None else [doc.get("metadata") for doc in docs] self._batch_insert(collection, embeddings, ids, metadatas, documents, upsert) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 0a6c33adae..dd2af91af1 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -2242,10 +2242,7 @@ def execute_code_blocks(self, code_blocks): if lang in ["bash", "shell", "sh"]: exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config) elif lang in PYTHON_VARIANTS: - if code.startswith("# filename: "): - filename = code[11 : code.find("\n")].strip() - else: - filename = None + filename = code[11 : code.find("\n")].strip() if code.startswith("# filename: ") else None exitcode, logs, image = self.run_code( code, lang="python", diff --git a/autogen/coding/local_commandline_code_executor.py b/autogen/coding/local_commandline_code_executor.py index f2a288fdde..8786d94f1c 100644 --- a/autogen/coding/local_commandline_code_executor.py +++ b/autogen/coding/local_commandline_code_executor.py @@ -217,10 +217,7 @@ def _setup_functions(self) -> None: required_packages = list(set(flattened_packages)) if len(required_packages) > 0: logging.info("Ensuring packages are installed in executor.") - if self._virtual_env_context: - py_executable = self._virtual_env_context.env_exe - else: - py_executable = sys.executable + py_executable = self._virtual_env_context.env_exe if self._virtual_env_context else sys.executable cmd = [py_executable, "-m", "pip", "install"] + required_packages try: result = subprocess.run( diff --git a/autogen/interop/pydantic_ai/pydantic_ai.py b/autogen/interop/pydantic_ai/pydantic_ai.py index c1591a3be1..25324a5bac 100644 --- a/autogen/interop/pydantic_ai/pydantic_ai.py +++ b/autogen/interop/pydantic_ai/pydantic_ai.py @@ -123,8 +123,8 @@ def convert_tool(cls, tool: Any, deps: Any = None, **kwargs: Any) -> AG2Pydantic UserWarning, ) - if tool.takes_ctx: - ctx = RunContext( + ctx = ( + RunContext( deps=deps, retry=0, # All messages send to or returned by a model. @@ -132,8 +132,9 @@ def convert_tool(cls, tool: Any, deps: Any = None, **kwargs: Any) -> AG2Pydantic messages=[], # TODO: check in the future if this is needed on Tool level tool_name=pydantic_ai_tool.name, ) - else: - ctx = None + if tool.takes_ctx + else None + ) func = PydanticAIInteroperability.inject_params( ctx=ctx, diff --git a/autogen/logger/file_logger.py b/autogen/logger/file_logger.py index f30a7a5202..fa7fb9cb1f 100644 --- a/autogen/logger/file_logger.py +++ b/autogen/logger/file_logger.py @@ -92,11 +92,7 @@ def log_chat_completion( ) -> None: """Log a chat completion.""" thread_id = threading.get_ident() - source_name = None - if isinstance(source, str): - source_name = source - else: - source_name = source.name + source_name = source if isinstance(source, str) else source.name try: log_data = json.dumps( { diff --git a/autogen/logger/sqlite_logger.py b/autogen/logger/sqlite_logger.py index 2771f2f3c8..2e6fa06944 100644 --- a/autogen/logger/sqlite_logger.py +++ b/autogen/logger/sqlite_logger.py @@ -273,11 +273,7 @@ def log_chat_completion( else: response_messages = json.dumps(to_dict(response), indent=4) - source_name = None - if isinstance(source, str): - source_name = source - else: - source_name = source.name + source_name = source if isinstance(source, str) else source.name query = """ INSERT INTO chat_completions ( diff --git a/autogen/math_utils.py b/autogen/math_utils.py index 4ff384eec7..d2440ccc0c 100644 --- a/autogen/math_utils.py +++ b/autogen/math_utils.py @@ -75,10 +75,7 @@ def last_boxed_only_string(string: str) -> Optional[str]: break i += 1 - if right_brace_idx is None: - retval = None - else: - retval = string[idx : right_brace_idx + 1] + retval = None if right_brace_idx is None else string[idx : right_brace_idx + 1] return retval diff --git a/autogen/oai/bedrock.py b/autogen/oai/bedrock.py index d4d73f6090..8c953732bf 100644 --- a/autogen/oai/bedrock.py +++ b/autogen/oai/bedrock.py @@ -232,11 +232,7 @@ def create(self, params) -> ChatCompletion: finish_reason = convert_stop_reason_to_finish_reason(response["stopReason"]) response_message = response["output"]["message"] - if finish_reason == "tool_calls": - tool_calls = format_tool_calls(response_message["content"]) - # text = "" - else: - tool_calls = None + tool_calls = format_tool_calls(response_message["content"]) if finish_reason == "tool_calls" else None text = "" for content in response_message["content"]: diff --git a/autogen/oai/cohere.py b/autogen/oai/cohere.py index 34cebd0637..c730806cfd 100644 --- a/autogen/oai/cohere.py +++ b/autogen/oai/cohere.py @@ -354,10 +354,7 @@ def oai_messages_to_cohere_messages( for index, message in enumerate(messages): if "role" in message and message["role"] == "system": # System message - if preamble == "": - preamble = message["content"] - else: - preamble = preamble + "\n" + message["content"] + preamble = (preamble if preamble == "" else f"{preamble}\n") + message["content"] elif "tool_calls" in message: # Suggested tool calls, build up the list before we put it into the tool_results for tool_call in message["tool_calls"]: diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index a2abc1aaa9..278fab8073 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -207,10 +207,7 @@ def create(self, params: dict) -> ChatCompletion: n_response = params.get("n", 1) system_instruction = params.get("system_instruction") response_validation = params.get("response_validation", True) - if "tools" in params: - tools = self._tools_to_gemini_tools(params["tools"]) - else: - tools = None + tools = self._tools_to_gemini_tools(params["tools"]) if "tools" in params else None generation_config = { gemini_term: params[autogen_term] diff --git a/autogen/oai/openai_utils.py b/autogen/oai/openai_utils.py index b99de99455..3a10091eb2 100644 --- a/autogen/oai/openai_utils.py +++ b/autogen/oai/openai_utils.py @@ -541,10 +541,7 @@ def config_list_from_json( else: # The environment variable does not exist. # So, `env_or_file` is a filename. We should use the file location. - if file_location is not None: - config_list_path = os.path.join(file_location, env_or_file) - else: - config_list_path = env_or_file + config_list_path = os.path.join(file_location, env_or_file) if file_location is not None else env_or_file with open(config_list_path) as json_file: config_list = json.load(json_file) diff --git a/autogen/retrieve_utils.py b/autogen/retrieve_utils.py index 65c79e186e..ccd643dd8c 100644 --- a/autogen/retrieve_utils.py +++ b/autogen/retrieve_utils.py @@ -98,10 +98,7 @@ def split_text_to_chunks( lines_tokens = [count_token(line) for line in lines] sum_tokens = sum(lines_tokens) while sum_tokens > max_tokens: - if chunk_mode == "one_line": - estimated_line_cut = 2 - else: - estimated_line_cut = max(int(max_tokens / sum_tokens * len(lines)), 2) + estimated_line_cut = 2 if sum_tokens > max_tokens else max(int(max_tokens / sum_tokens * len(lines)), 2) cnt = 0 prev = "" for cnt in reversed(range(estimated_line_cut)): diff --git a/autogen/runtime_logging.py b/autogen/runtime_logging.py index f87e33526a..f87abedf5c 100644 --- a/autogen/runtime_logging.py +++ b/autogen/runtime_logging.py @@ -54,10 +54,7 @@ def start( global autogen_logger global is_logging - if logger: - autogen_logger = logger - else: - autogen_logger = LoggerFactory.get_logger(logger_type=logger_type, config=config) + autogen_logger = logger or LoggerFactory.get_logger(logger_type=logger_type, config=config) try: session_id = autogen_logger.start() diff --git a/notebook/agentchat_nestedchat_optiguide.ipynb b/notebook/agentchat_nestedchat_optiguide.ipynb index 6ee5656d54..e6793a425d 100644 --- a/notebook/agentchat_nestedchat_optiguide.ipynb +++ b/notebook/agentchat_nestedchat_optiguide.ipynb @@ -441,10 +441,9 @@ " sender_history = recipient.chat_messages[sender]\n", " user_chat_history = f\"\\nHere are the history of discussions:\\n{sender_history}\"\n", "\n", - " if sender.name == \"user\":\n", - " execution_result = msg_content # TODO: get the execution result of the original source code\n", - " else:\n", - " execution_result = \"\"\n", + " # TODO: get the execution result of the original source code\n", + " execution_result = msg_content if sender.name == \"user\" else \"\"\n", + "\n", " writer_sys_msg = (\n", " WRITER_SYSTEM_MSG.format(\n", " source_code=recipient.source_code,\n", diff --git a/pyproject.toml b/pyproject.toml index 57f24be91b..0a9694e7bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM103", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM10", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index 23850bcd4d..49a51243c1 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -1911,10 +1911,8 @@ def test_manager_resume_functions(): # Tests termination message replacement with function def termination_func(x: str) -> str: - if "APPROVED" in x: - x = x.replace("APPROVED", "") - else: - x = x.replace("TERMINATE", "") + old = "APPROVED" if "APPROVED" in x else "TERMINATE" + x = x.replace(old, "") return x final_msg1 = "Product_Manager has created 3 new product ideas. APPROVED" diff --git a/test/coding/test_embedded_ipython_code_executor.py b/test/coding/test_embedded_ipython_code_executor.py index 08602883a3..b0f151c805 100644 --- a/test/coding/test_embedded_ipython_code_executor.py +++ b/test/coding/test_embedded_ipython_code_executor.py @@ -57,10 +57,8 @@ def __init__(self, **kwargs): else: classes_to_test = [EmbeddedIPythonCodeExecutor, LocalJupyterCodeExecutor] -if not is_docker_running() or not decide_use_docker(use_docker=None): - skip_docker_test = True -else: - skip_docker_test = False +skip_docker_test = not (is_docker_running() and decide_use_docker(use_docker=None)) + if not skip_docker_test: classes_to_test.append(DockerJupyterExecutor) diff --git a/test/test_code_utils.py b/test/test_code_utils.py index 15b70fb9f2..bc18cc40db 100755 --- a/test/test_code_utils.py +++ b/test/test_code_utils.py @@ -36,10 +36,7 @@ here = os.path.abspath(os.path.dirname(__file__)) -if not is_docker_running() or not decide_use_docker(use_docker=None): - skip_docker_test = True -else: - skip_docker_test = False +skip_docker_test = not (is_docker_running() and decide_use_docker(use_docker=None)) def test_infer_lang(): From c3e57a89a15434934ad57b6320108b6f1d0c09e9 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:08:41 +0100 Subject: [PATCH 07/13] Add lint fixes --- autogen/agentchat/contrib/agent_optimizer.py | 6 +++--- .../agentchat/contrib/captainagent/agent_builder.py | 2 +- .../agentchat/contrib/captainagent/captainagent.py | 2 +- autogen/agentchat/contrib/vectordb/chromadb.py | 2 +- autogen/agentchat/contrib/vectordb/mongodb.py | 2 +- autogen/agentchat/conversable_agent.py | 10 +++++----- autogen/agentchat/groupchat.py | 2 +- autogen/agentchat/realtime_agent/realtime_agent.py | 2 +- autogen/cache/cache.py | 2 +- autogen/oai/completion.py | 4 ++-- notebook/agentchat_MathChat.ipynb | 10 +++++----- notebook/agentchat_captainagent.ipynb | 2 +- notebook/agentchat_human_feedback.ipynb | 2 +- pyproject.toml | 2 +- scripts/build-setup-files.py | 2 +- test/agentchat/contrib/rag/test_parser_utils.py | 10 ++++------ test/agentchat/contrib/test_gpt_assistant.py | 4 ++-- test/agentchat/test_dependancy_injection.py | 2 +- test/io/test_websockets.py | 2 +- test/oai/test_gemini.py | 2 +- test/oai/test_utils.py | 12 ++++++------ 21 files changed, 41 insertions(+), 43 deletions(-) diff --git a/autogen/agentchat/contrib/agent_optimizer.py b/autogen/agentchat/contrib/agent_optimizer.py index d297f075a6..7950125902 100644 --- a/autogen/agentchat/contrib/agent_optimizer.py +++ b/autogen/agentchat/contrib/agent_optimizer.py @@ -284,8 +284,8 @@ def step(self): incumbent_functions = self._update_function_call(incumbent_functions, actions) remove_functions = list( - {key for dictionary in self._trial_functions for key in dictionary.keys()} - - {key for dictionary in incumbent_functions for key in dictionary.keys()} + {key for dictionary in self._trial_functions for key in dictionary} + - {key for dictionary in incumbent_functions for key in dictionary} ) register_for_llm = [] @@ -408,7 +408,7 @@ def _validate_actions(self, actions, incumbent_functions): function_args = action.function.arguments try: function_args = json.loads(function_args.strip('"')) - if "arguments" in function_args.keys(): + if "arguments" in function_args: json.loads(function_args.get("arguments").strip('"')) except Exception as e: print("JSON is invalid:", e) diff --git a/autogen/agentchat/contrib/captainagent/agent_builder.py b/autogen/agentchat/contrib/captainagent/agent_builder.py index 26ac247462..e94038e409 100644 --- a/autogen/agentchat/contrib/captainagent/agent_builder.py +++ b/autogen/agentchat/contrib/captainagent/agent_builder.py @@ -355,7 +355,7 @@ def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True): def clear_all_agents(self, recycle_endpoint: Optional[bool] = True): """Clear all cached agents.""" - for agent_name in [agent_name for agent_name in self.agent_procs_assign.keys()]: + for agent_name in [agent_name for agent_name in self.agent_procs_assign]: self.clear_agent(agent_name, recycle_endpoint) print(colored("All agents have been cleared.", "yellow"), flush=True) diff --git a/autogen/agentchat/contrib/captainagent/captainagent.py b/autogen/agentchat/contrib/captainagent/captainagent.py index f496333f6b..39eb10ca32 100644 --- a/autogen/agentchat/contrib/captainagent/captainagent.py +++ b/autogen/agentchat/contrib/captainagent/captainagent.py @@ -374,7 +374,7 @@ def _run_autobuild(self, group_name: str, execution_task: str, building_task: st builder = AgentBuilder(**self._nested_config["autobuild_init_config"]) # if the group is already built, load from history - if group_name in self.build_history.keys(): + if group_name in self.build_history: agent_list, agent_configs = builder.load(config_json=json.dumps(self.build_history[group_name])) if self._nested_config.get("autobuild_tool_config", None) and agent_configs["coding"] is True: # tool library is enabled, reload tools and bind them to the agents diff --git a/autogen/agentchat/contrib/vectordb/chromadb.py b/autogen/agentchat/contrib/vectordb/chromadb.py index bca7a3ca7d..4aceca6a4e 100644 --- a/autogen/agentchat/contrib/vectordb/chromadb.py +++ b/autogen/agentchat/contrib/vectordb/chromadb.py @@ -287,7 +287,7 @@ def _chroma_get_results_to_list_documents(data_dict) -> list[Document]: for i in range(len(data_dict[keys[0]])): sub_dict = {} - for key in data_dict.keys(): + for key in data_dict: if data_dict[key] is not None and len(data_dict[key]) > i: sub_dict[key[:-1]] = data_dict[key][i] results.append(sub_dict) diff --git a/autogen/agentchat/contrib/vectordb/mongodb.py b/autogen/agentchat/contrib/vectordb/mongodb.py index 9e1d1ec3fc..7fb9de7f2f 100644 --- a/autogen/agentchat/contrib/vectordb/mongodb.py +++ b/autogen/agentchat/contrib/vectordb/mongodb.py @@ -321,7 +321,7 @@ def insert_docs( text_batch = [] metadata_batch = [] size = 0 - i += 1 + i += 1 # noqa: SIM113 if text_batch: result_ids.update(self._insert_batch(collection, text_batch, metadata_batch, id_batch)) # type: ignore input_ids.update(id_batch) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index dd2af91af1..7fe77b6d02 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -658,7 +658,7 @@ def last_message(self, agent: Optional[Agent] = None) -> Optional[dict]: for conversation in self._oai_messages.values(): return conversation[-1] raise ValueError("More than one conversation is found. Please specify the sender to get the last message.") - if agent not in self._oai_messages.keys(): + if agent not in self._oai_messages: raise KeyError( f"The agent '{agent.name}' is not present in any conversation. No history available for this agent." ) @@ -2507,7 +2507,7 @@ def register_function(self, function_map: dict[str, Union[Callable, None]]): """ for name, func in function_map.items(): self._assert_valid_name(name) - if func is None and name not in self._function_map.keys(): + if func is None and name not in self._function_map: warnings.warn(f"The function {name} to remove doesn't exist", name) if name in self._function_map: warnings.warn(f"Function '{name}' is being overridden.", UserWarning) @@ -2530,7 +2530,7 @@ def update_function_signature(self, func_sig: Union[str, dict], is_remove: None) raise AssertionError(error_msg) if is_remove: - if "functions" not in self.llm_config.keys(): + if "functions" not in self.llm_config: error_msg = f"The agent config doesn't have function {func_sig}." logger.error(error_msg) raise AssertionError(error_msg) @@ -2546,7 +2546,7 @@ def update_function_signature(self, func_sig: Union[str, dict], is_remove: None) if "name" not in func_sig: raise ValueError(f"The function signature must have a 'name' key. Received: {func_sig}") self._assert_valid_name(func_sig["name"]), func_sig - if "functions" in self.llm_config.keys(): + if "functions" in self.llm_config: if any(func["name"] == func_sig["name"] for func in self.llm_config["functions"]): warnings.warn(f"Function '{func_sig['name']}' is being overridden.", UserWarning) @@ -2574,7 +2574,7 @@ def update_tool_signature(self, tool_sig: Union[str, dict], is_remove: bool): raise AssertionError(error_msg) if is_remove: - if "tools" not in self.llm_config.keys(): + if "tools" not in self.llm_config: error_msg = f"The agent config doesn't have tool {tool_sig}." logger.error(error_msg) raise AssertionError(error_msg) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 4f29f30123..896eba3798 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -635,7 +635,7 @@ def _register_custom_model_clients(self, agent: ConversableAgent): if not self.select_speaker_auto_llm_config: return - config_format_is_list = "config_list" in self.select_speaker_auto_llm_config.keys() + config_format_is_list = "config_list" in self.select_speaker_auto_llm_config if config_format_is_list: for config in self.select_speaker_auto_llm_config["config_list"]: self._register_client_from_config(agent, config) diff --git a/autogen/agentchat/realtime_agent/realtime_agent.py b/autogen/agentchat/realtime_agent/realtime_agent.py index bb0a809106..ded56cd45a 100644 --- a/autogen/agentchat/realtime_agent/realtime_agent.py +++ b/autogen/agentchat/realtime_agent/realtime_agent.py @@ -107,7 +107,7 @@ async def start_observers(self) -> None: async def run(self) -> None: """Run the agent.""" # everything is run in the same task group to enable easy cancellation using self._tg.cancel_scope.cancel() - async with create_task_group() as self._tg: + async with create_task_group() as self._tg: # noqa: SIM117 # connect with the client first (establishes a connection and initializes a session) async with self._realtime_client.connect(): # start the observers and wait for them to be ready diff --git a/autogen/cache/cache.py b/autogen/cache/cache.py index 63e2c7e4d0..5198ff8ee9 100644 --- a/autogen/cache/cache.py +++ b/autogen/cache/cache.py @@ -100,7 +100,7 @@ def __init__(self, config: dict[str, Any]): self.config["cache_seed"] = str(self.config.get("cache_seed", 42)) # validate config - for key in self.config.keys(): + for key in self.config: if key not in self.ALLOWED_CONFIG_KEYS: raise ValueError(f"Invalid config key: {key}") # create cache instance diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index fe0b5694fd..5b07e05a78 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -510,7 +510,7 @@ def _eval(cls, config: dict, prune=True, eval_only=False): result[key] += value else: result = metrics - for key in result.keys(): + for key in result: if isinstance(result[key], (float, int)): result[key] /= data_limit result["total_cost"] = cls._total_cost @@ -1009,7 +1009,7 @@ def eval_func(responses, **data): return if not metric_keys: metric_keys = [] - for k in metrics.keys(): + for k in metrics: try: _ = float(metrics[k]) metric_keys.append(k) diff --git a/notebook/agentchat_MathChat.ipynb b/notebook/agentchat_MathChat.ipynb index f8cdcc9f99..a1e557a151 100644 --- a/notebook/agentchat_MathChat.ipynb +++ b/notebook/agentchat_MathChat.ipynb @@ -982,7 +982,7 @@ "source": [ "# The wolfram alpha app id is required for this example (the assistant may choose to query Wolfram Alpha).\n", "if \"WOLFRAM_ALPHA_APPID\" not in os.environ:\n", - " os.environ[\"WOLFRAM_ALPHA_APPID\"] = open(\"wolfram.txt\").read().strip()\n", + " os.environ[\"WOLFRAM_ALPHA_APPID\"] = open(\"wolfram.txt\").read().strip() # noqa: SIM115\n", "\n", "# we set the prompt_type to \"two_tools\", which allows the assistant to select wolfram alpha when necessary.\n", "math_problem = \"Find all numbers $a$ for which the graph of $y=x^2+a$ and the graph of $y=ax$ intersect. Express your answer in interval notation.\"\n", @@ -994,10 +994,10 @@ ], "metadata": { "front_matter": { - "description": "Using MathChat to Solve Math Problems", - "tags": [ - "math" - ] + "description": "Using MathChat to Solve Math Problems", + "tags": [ + "math" + ] }, "kernelspec": { "display_name": "flaml_dev", diff --git a/notebook/agentchat_captainagent.ipynb b/notebook/agentchat_captainagent.ipynb index 252de3741e..4909e9d00f 100644 --- a/notebook/agentchat_captainagent.ipynb +++ b/notebook/agentchat_captainagent.ipynb @@ -1863,7 +1863,7 @@ "from autogen import UserProxyAgent\n", "from autogen.agentchat.contrib.captainagent import CaptainAgent\n", "\n", - "os.environ[\"BING_API_key\"] = \"\" # set your bing api key here, if you donot need search engine, you can skip this step\n", + "os.environ[\"BING_API_KEY\"] = \"\" # set your bing api key here, if you donot need search engine, you can skip this step\n", "os.environ[\"RAPID_API_KEY\"] = (\n", " \"\" # set your rapid api key here, in order for this example to work, you need to subscribe to the youtube transcription api(https://rapidapi.com/solid-api-solid-api-default/api/youtube-transcript3)\n", ")\n", diff --git a/notebook/agentchat_human_feedback.ipynb b/notebook/agentchat_human_feedback.ipynb index a28e63229f..8753861c93 100644 --- a/notebook/agentchat_human_feedback.ipynb +++ b/notebook/agentchat_human_feedback.ipynb @@ -364,7 +364,7 @@ "metadata": {}, "outputs": [], "source": [ - "json.dump(user_proxy.chat_messages[assistant], open(\"conversations.json\", \"w\"), indent=2)" + "json.dump(user_proxy.chat_messages[assistant], open(\"conversations.json\", \"w\"), indent=2) # noqa: SIM115" ] } ], diff --git a/pyproject.toml b/pyproject.toml index 0a9694e7bf..e5fd36db7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM10", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim +# "SIM1", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/scripts/build-setup-files.py b/scripts/build-setup-files.py index f153681530..08b80cbefd 100755 --- a/scripts/build-setup-files.py +++ b/scripts/build-setup-files.py @@ -22,7 +22,7 @@ def get_optional_dependencies(pyproject_path: str) -> dict: # Example usage pyproject_path = Path(__file__).parent.joinpath("../pyproject.toml") optional_dependencies = get_optional_dependencies(pyproject_path) -optional_groups = [group for group in optional_dependencies.keys()] +optional_groups = [group for group in optional_dependencies] # for group, dependencies in optional_dependencies.items(): # print(f"Group: {group}") diff --git a/test/agentchat/contrib/rag/test_parser_utils.py b/test/agentchat/contrib/rag/test_parser_utils.py index 2dc0aa14b6..308f279ca1 100644 --- a/test/agentchat/contrib/rag/test_parser_utils.py +++ b/test/agentchat/contrib/rag/test_parser_utils.py @@ -106,13 +106,11 @@ def test_logs_conversion_time_and_document_conversion_info( "autogen.agentchat.contrib.rag.parser_utils.DocumentConverter.convert_all", return_value=[mock_conversion_result], ), + caplog.at_level(logging.INFO), ): - with caplog.at_level(logging.INFO): - docling_parse_docs(input_file_path, output_dir_path) - assert "Document converted in" in caplog.text - assert ( - f"Document input_file_path converted.\nSaved markdown output to: {output_dir_path}" in caplog.text - ) + docling_parse_docs(input_file_path, output_dir_path) + assert "Document converted in" in caplog.text + assert f"Document input_file_path converted.\nSaved markdown output to: {output_dir_path}" in caplog.text def test_handles_invalid_input_file_paths_and_output_directory_paths(self, tmp_path: Path) -> None: """Test that the function handles invalid input file paths and output directory paths. diff --git a/test/agentchat/contrib/test_gpt_assistant.py b/test/agentchat/contrib/test_gpt_assistant.py index 2a698d4ab7..c6e9ab00d9 100755 --- a/test/agentchat/contrib/test_gpt_assistant.py +++ b/test/agentchat/contrib/test_gpt_assistant.py @@ -356,8 +356,8 @@ def test_assistant_mismatch_retrieval(credentials_gpt_4o_mini: Credentials) -> N openai_client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list)._clients[0]._oai_client current_file_path = os.path.abspath(__file__) - file_1 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") - file_2 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") + file_1 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") # noqa: SIM115 + file_2 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") # noqa: SIM115 try: # keep it to test older version of assistant config diff --git a/test/agentchat/test_dependancy_injection.py b/test/agentchat/test_dependancy_injection.py index 00fdc7084b..7cab4a4396 100644 --- a/test/agentchat/test_dependancy_injection.py +++ b/test/agentchat/test_dependancy_injection.py @@ -193,7 +193,7 @@ async def test_register_tools( expected_tools[0]["function"]["name"] = func_name assert agent.llm_config["tools"] == expected_tools - assert func_name in agent.function_map.keys() + assert func_name in agent.function_map retval = agent.function_map[func_name](1) actual = await retval if is_async else retval diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 1fc46cafc9..bbc4510055 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -140,7 +140,7 @@ def on_connect(iostream: IOWebsockets) -> None: ) # we will use a temporary directory as the cache path root to ensure fresh completion each time - with TemporaryDirectory() as cache_path_root: + with TemporaryDirectory() as cache_path_root: # noqa: SIM117 with Cache.disk(cache_path_root=cache_path_root) as cache: print( f" - on_connect(): Initiating chat with agent {agent} using message '{initial_msg}'", diff --git a/test/oai/test_gemini.py b/test/oai/test_gemini.py index be193af771..d5361279dd 100644 --- a/test/oai/test_gemini.py +++ b/test/oai/test_gemini.py @@ -220,7 +220,7 @@ def test_vertexai_default_safety_settings_dict(gemini_client): expected_safety_settings = {category: VertexAIHarmBlockThreshold.BLOCK_ONLY_HIGH for category in safety_settings} def compare_safety_settings(converted_safety_settings, expected_safety_settings): - for expected_setting_key in expected_safety_settings.keys(): + for expected_setting_key in expected_safety_settings: expected_setting = expected_safety_settings[expected_setting_key] converted_setting = converted_safety_settings[expected_setting_key] yield expected_setting == converted_setting diff --git a/test/oai/test_utils.py b/test/oai/test_utils.py index 0ab0e138e1..be3a76d4fd 100755 --- a/test/oai/test_utils.py +++ b/test/oai/test_utils.py @@ -134,9 +134,9 @@ def test_config_list_from_json(): for key in config: assert key in json_data[i] assert config[key] == json_data[i][key] - i += 1 + i += 1 # noqa: SIM113 - os.environ["config_list_test"] = JSON_SAMPLE + os.environ["CONFIG_LIST_TEST"] = JSON_SAMPLE config_list_2 = autogen.config_list_from_json("config_list_test") assert config_list == config_list_2 @@ -146,7 +146,7 @@ def test_config_list_from_json(): ) assert all(config.get("model") in ["gpt-4", "gpt"] for config in config_list_3) - del os.environ["config_list_test"] + del os.environ["CONFIG_LIST_TEST"] # Test: using the `file_location` parameter. config_list_4 = autogen.config_list_from_json( @@ -160,11 +160,11 @@ def test_config_list_from_json(): # Test: the env variable is set to a file path. fd, temp_name = tempfile.mkstemp() json.dump(config_list, os.fdopen(fd, "w+"), indent=4) - os.environ["config_list_test"] = temp_name + os.environ["CONFIG_LIST_TEST"] = temp_name config_list_5 = autogen.config_list_from_json("config_list_test") assert config_list_5 == config_list_2 - del os.environ["config_list_test"] + del os.environ["CONFIG_LIST_TEST"] # Test that an error is thrown when the config list is missing with pytest.raises(FileNotFoundError): @@ -312,7 +312,7 @@ def test_config_list_from_dotenv(mock_os_environ, caplog): invalid_model_api_key_map = { "gpt-4": "INVALID_API_KEY", # Simulate an environment var name that doesn't exist } - with caplog.at_level(logging.ERROR): + with caplog.at_level(logging.ERROR): # noqa: SIM117 # Mocking `config_list_from_json` to return an empty list and raise an exception when called with mock.patch("autogen.config_list_from_json", return_value=[], side_effect=Exception("Mock called")): # Call the function with the invalid map From 7684a2a09aa0db9be76e365c254e7ff6416da809 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:12:52 +0100 Subject: [PATCH 08/13] fix test --- test/oai/test_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/oai/test_utils.py b/test/oai/test_utils.py index be3a76d4fd..53fd9071e1 100755 --- a/test/oai/test_utils.py +++ b/test/oai/test_utils.py @@ -137,7 +137,7 @@ def test_config_list_from_json(): i += 1 # noqa: SIM113 os.environ["CONFIG_LIST_TEST"] = JSON_SAMPLE - config_list_2 = autogen.config_list_from_json("config_list_test") + config_list_2 = autogen.config_list_from_json("CONFIG_LIST_TEST") assert config_list == config_list_2 # Test: the env variable is set to a file path with folder name inside. @@ -161,7 +161,7 @@ def test_config_list_from_json(): fd, temp_name = tempfile.mkstemp() json.dump(config_list, os.fdopen(fd, "w+"), indent=4) os.environ["CONFIG_LIST_TEST"] = temp_name - config_list_5 = autogen.config_list_from_json("config_list_test") + config_list_5 = autogen.config_list_from_json("CONFIG_LIST_TEST") assert config_list_5 == config_list_2 del os.environ["CONFIG_LIST_TEST"] From 27e8aff32a9051b5d334d7eb52f32098d6f1c84c Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:21:58 +0100 Subject: [PATCH 09/13] Add lint fixes --- notebook/agentchat_reasoning_agent.ipynb | 2 +- notebook/agenteval_cq_math.ipynb | 20 +++++++++---------- notebook/autobuild_agent_library.ipynb | 2 +- pyproject.toml | 2 +- .../contrib/agent_eval/test_agent_eval.py | 8 ++++---- .../contrib/agent_eval/test_criterion.py | 2 +- .../contrib/rag/test_parser_utils.py | 2 +- test/agentchat/contrib/test_agent_builder.py | 4 ++-- test/agentchat/contrib/test_gpt_assistant.py | 6 +++--- 9 files changed, 23 insertions(+), 25 deletions(-) diff --git a/notebook/agentchat_reasoning_agent.ipynb b/notebook/agentchat_reasoning_agent.ipynb index bd67bab644..3d5deae4d7 100644 --- a/notebook/agentchat_reasoning_agent.ipynb +++ b/notebook/agentchat_reasoning_agent.ipynb @@ -3737,7 +3737,7 @@ " json.dump(data, f)\n", "\n", "# recover the node\n", - "new_node = ThinkNode.from_dict(json.load(open(\"reasoning_tree.json\", \"r\")))" + "new_node = ThinkNode.from_dict(json.load(open(\"reasoning_tree.json\", \"r\"))) # noqa: SIM115" ] }, { diff --git a/notebook/agenteval_cq_math.ipynb b/notebook/agenteval_cq_math.ipynb index 869f7e7a44..1f2ffbefdf 100644 --- a/notebook/agenteval_cq_math.ipynb +++ b/notebook/agenteval_cq_math.ipynb @@ -250,9 +250,9 @@ "\n", "\n", "# Reading one successful and one failed example of the task\n", - "success_str = open(\"../test/test_files/agenteval-in-out/samples/sample_math_response_successful.txt\", \"r\").read()\n", + "success_str = open(\"../test/test_files/agenteval-in-out/samples/sample_math_response_successful.txt\", \"r\").read() # noqa: SIM115\n", "response_successful = remove_ground_truth(success_str)[0]\n", - "failed_str = open(\"../test/test_files/agenteval-in-out/samples/sample_math_response_failed.txt\", \"r\").read()\n", + "failed_str = open(\"../test/test_files/agenteval-in-out/samples/sample_math_response_failed.txt\", \"r\").read() # noqa: SIM115\n", "response_failed = remove_ground_truth(failed_str)[0]\n", "\n", "task = Task(\n", @@ -290,7 +290,7 @@ "outputs": [], "source": [ "current_task_name = \"_\".join(task.name.split()).lower()\n", - "cr_file = open(f\"../test/test_files/agenteval-in-out/{current_task_name}_criteria.json\", \"w\")\n", + "cr_file = open(f\"../test/test_files/agenteval-in-out/{current_task_name}_criteria.json\", \"w\") # noqa: SIM115\n", "cr_file.write(Criterion.write_json(criteria))\n", "cr_file.close()" ] @@ -325,7 +325,7 @@ "outputs": [], "source": [ "criteria_file = f\"../test/test_files/agenteval-in-out/{current_task_name}_criteria.json\"\n", - "criteria = open(criteria_file, \"r\").read()\n", + "criteria = open(criteria_file, \"r\").read() # noqa: SIM115\n", "criteria = Criterion.parse_json_str(criteria)" ] }, @@ -516,7 +516,7 @@ } ], "source": [ - "test_case = open(\"../test/test_files/agenteval-in-out/samples/sample_test_case.json\", \"r\").read()\n", + "test_case = open(\"../test/test_files/agenteval-in-out/samples/sample_test_case.json\", \"r\").read() # noqa: SIM115\n", "test_case, ground_truth = remove_ground_truth(test_case)\n", "quantifier_output = quantify_criteria(\n", " llm_config={\"config_list\": config_list},\n", @@ -2587,14 +2587,14 @@ ], "source": [ "criteria_file = \"../test/test_files/agenteval-in-out/samples/sample_math_criteria.json\"\n", - "criteria = Criterion.parse_json_str(open(criteria_file, \"r\").read())\n", + "criteria = Criterion.parse_json_str(open(criteria_file, \"r\").read()) # noqa: SIM115\n", "outcome = {}\n", "\n", "for prefix in os.listdir(log_path):\n", " for file_name in os.listdir(log_path + \"/\" + prefix):\n", " gameid = prefix + \"_\" + file_name\n", " if file_name.split(\".\")[-1] == \"json\":\n", - " test_case, ground_truth = remove_ground_truth(open(log_path + \"/\" + prefix + \"/\" + file_name, \"r\").read())\n", + " test_case, ground_truth = remove_ground_truth(open(log_path + \"/\" + prefix + \"/\" + file_name, \"r\").read()) # noqa: SIM115\n", " quantifier_output = quantify_criteria(\n", " llm_config={\"config_list\": config_list},\n", " criteria=criteria,\n", @@ -2669,15 +2669,13 @@ "\n", "\n", "with suppress(Exception):\n", - " criteria = Criterion.parse_json_str(open(criteria_file, \"r\").read())\n", + " criteria = Criterion.parse_json_str(open(criteria_file, \"r\").read()) # noqa: SIM115\n", "\n", "\n", "nl2int = {}\n", "for criterion in criteria:\n", - " score = 0\n", - " for v in criterion.accepted_values:\n", + " for score, v in enumerate(criterion.accepted_values):\n", " nl2int[v] = score\n", - " score += 1\n", "print(nl2int)\n", "\n", "average_s = {}\n", diff --git a/notebook/autobuild_agent_library.ipynb b/notebook/autobuild_agent_library.ipynb index 2c2010c3f4..2f971925f6 100644 --- a/notebook/autobuild_agent_library.ipynb +++ b/notebook/autobuild_agent_library.ipynb @@ -320,7 +320,7 @@ }, "outputs": [], "source": [ - "json.dump(sys_msg_list, open(\"./agent_library_example.json\", \"w\"), indent=4)" + "json.dump(sys_msg_list, open(\"./agent_library_example.json\", \"w\"), indent=4) # noqa: SIM115" ] }, { diff --git a/pyproject.toml b/pyproject.toml index e5fd36db7b..77d82cd0b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 -# "SIM1", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM1", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/contrib/agent_eval/test_agent_eval.py b/test/agentchat/contrib/agent_eval/test_agent_eval.py index 37ee052eee..c3dfd4422c 100644 --- a/test/agentchat/contrib/agent_eval/test_agent_eval.py +++ b/test/agentchat/contrib/agent_eval/test_agent_eval.py @@ -28,9 +28,9 @@ def remove_ground_truth(test_case: str): @pytest.fixture def task() -> Task: - success_str = open("test/test_files/agenteval-in-out/samples/sample_math_response_successful.txt").read() + success_str = open("test/test_files/agenteval-in-out/samples/sample_math_response_successful.txt").read() # noqa: SIM115 response_successful = remove_ground_truth(success_str)[0] - failed_str = open("test/test_files/agenteval-in-out/samples/sample_math_response_failed.txt").read() + failed_str = open("test/test_files/agenteval-in-out/samples/sample_math_response_failed.txt").read() # noqa: SIM115 response_failed = remove_ground_truth(failed_str)[0] task = Task( **{ @@ -56,10 +56,10 @@ def test_generate_criteria(credentials_azure: Credentials, task: Task): @pytest.mark.openai def test_quantify_criteria(credentials_azure: Credentials, task: Task): criteria_file = "test/test_files/agenteval-in-out/samples/sample_math_criteria.json" - criteria = open(criteria_file).read() + criteria = open(criteria_file).read() # noqa: SIM115 criteria = Criterion.parse_json_str(criteria) - test_case = open("test/test_files/agenteval-in-out/samples/sample_test_case.json").read() + test_case = open("test/test_files/agenteval-in-out/samples/sample_test_case.json").read() # noqa: SIM115 test_case, ground_truth = remove_ground_truth(test_case) quantified = quantify_criteria( diff --git a/test/agentchat/contrib/agent_eval/test_criterion.py b/test/agentchat/contrib/agent_eval/test_criterion.py index af0600cd3c..8374eccd5e 100644 --- a/test/agentchat/contrib/agent_eval/test_criterion.py +++ b/test/agentchat/contrib/agent_eval/test_criterion.py @@ -11,7 +11,7 @@ def test_parse_json_str(): criteria_file = "test/test_files/agenteval-in-out/samples/sample_math_criteria.json" - criteria = open(criteria_file).read() + criteria = open(criteria_file).read() # noqa: SIM115 criteria = Criterion.parse_json_str(criteria) assert criteria assert len(criteria) == 6 diff --git a/test/agentchat/contrib/rag/test_parser_utils.py b/test/agentchat/contrib/rag/test_parser_utils.py index 308f279ca1..defd48334b 100644 --- a/test/agentchat/contrib/rag/test_parser_utils.py +++ b/test/agentchat/contrib/rag/test_parser_utils.py @@ -36,7 +36,7 @@ def mock_conversion_result(self, mock_document_input: MagicMock) -> MagicMock: def test_no_documents_found(self) -> None: """Test that ValueError is raised when no documents are found.""" - with patch("autogen.agentchat.contrib.rag.parser_utils.handle_input", return_value=[]): + with patch("autogen.agentchat.contrib.rag.parser_utils.handle_input", return_value=[]): # noqa: SIM117 with raises(ValueError, match="No documents found."): list(docling_parse_docs("input_file_path", "output_dir_path")) diff --git a/test/agentchat/contrib/test_agent_builder.py b/test/agentchat/contrib/test_agent_builder.py index 5d4860b5cb..3e07a64079 100755 --- a/test/agentchat/contrib/test_agent_builder.py +++ b/test/agentchat/contrib/test_agent_builder.py @@ -138,7 +138,7 @@ def test_save(builder: AgentBuilder): # check config file path assert os.path.isfile(saved_files) - saved_configs = json.load(open(saved_files)) + saved_configs = json.load(open(saved_files)) # noqa: SIM115 _config_check(saved_configs) @@ -146,7 +146,7 @@ def test_save(builder: AgentBuilder): @pytest.mark.openai def test_load(builder: AgentBuilder): config_save_path = f"{here}/example_test_agent_builder_config.json" - json.load(open(config_save_path)) + json.load(open(config_save_path)) # noqa: SIM115 _, loaded_agent_configs = builder.load( config_save_path, diff --git a/test/agentchat/contrib/test_gpt_assistant.py b/test/agentchat/contrib/test_gpt_assistant.py index c6e9ab00d9..c5227b4eb7 100755 --- a/test/agentchat/contrib/test_gpt_assistant.py +++ b/test/agentchat/contrib/test_gpt_assistant.py @@ -228,7 +228,7 @@ def test_get_assistant_files(credentials_gpt_4o_mini: Credentials) -> None: """ current_file_path = os.path.abspath(__file__) openai_client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list)._clients[0]._oai_client - file = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") + file = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") # noqa: SIM115 name = f"For test_get_assistant_files {uuid.uuid4()}" gpt_assistant_api_version = detect_gpt_assistant_api_version() @@ -281,8 +281,8 @@ def test_assistant_retrieval(credentials_gpt_4o_mini: Credentials) -> None: openai_client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list)._clients[0]._oai_client current_file_path = os.path.abspath(__file__) - file_1 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") - file_2 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") + file_1 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") # noqa: SIM115 + file_2 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants") # noqa: SIM115 try: all_llm_config = { From d1a516573df926e44b975a5782e291c595326038 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:33:12 +0100 Subject: [PATCH 10/13] fix test --- autogen/retrieve_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogen/retrieve_utils.py b/autogen/retrieve_utils.py index ccd643dd8c..01dd3b7f81 100644 --- a/autogen/retrieve_utils.py +++ b/autogen/retrieve_utils.py @@ -98,7 +98,7 @@ def split_text_to_chunks( lines_tokens = [count_token(line) for line in lines] sum_tokens = sum(lines_tokens) while sum_tokens > max_tokens: - estimated_line_cut = 2 if sum_tokens > max_tokens else max(int(max_tokens / sum_tokens * len(lines)), 2) + estimated_line_cut = 2 if chunk_mode == "one_line" else max(int(max_tokens / sum_tokens * len(lines)), 2) cnt = 0 prev = "" for cnt in reversed(range(estimated_line_cut)): From e50636ac947b0fc5cf899fb35daca236205638fb Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:43:12 +0100 Subject: [PATCH 11/13] Add lint fixes --- autogen/agentchat/contrib/agent_optimizer.py | 2 +- autogen/agentchat/contrib/captainagent/agent_builder.py | 4 ++-- autogen/agentchat/contrib/retrieve_user_proxy_agent.py | 2 +- autogen/oai/client.py | 2 +- autogen/oai/cohere.py | 2 +- notebook/agentchat_teachability.ipynb | 2 +- notebook/agentchat_teaching.ipynb | 8 ++++---- pyproject.toml | 2 +- test/agentchat/test_function_call.py | 2 +- test/agentchat/test_groupchat.py | 2 +- test/agentchat/test_tool_calls.py | 6 +++--- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/autogen/agentchat/contrib/agent_optimizer.py b/autogen/agentchat/contrib/agent_optimizer.py index 7950125902..8380dfd039 100644 --- a/autogen/agentchat/contrib/agent_optimizer.py +++ b/autogen/agentchat/contrib/agent_optimizer.py @@ -227,7 +227,7 @@ def record_one_conversation(self, conversation_history: list[dict], is_satisfied "0", "1", ], "The input is invalid. Please input 1 or 0. 1 represents satisfied. 0 represents not satisfied." - is_satisfied = True if reply == "1" else False + is_satisfied = reply == "1" self._trial_conversations_history.append( {f"Conversation {len(self._trial_conversations_history)}": conversation_history} ) diff --git a/autogen/agentchat/contrib/captainagent/agent_builder.py b/autogen/agentchat/contrib/captainagent/agent_builder.py index e94038e409..4a912c7889 100644 --- a/autogen/agentchat/contrib/captainagent/agent_builder.py +++ b/autogen/agentchat/contrib/captainagent/agent_builder.py @@ -473,7 +473,7 @@ def build( .choices[0] .message.content ) - coding = True if resp == "YES" else False + coding = resp == "YES" self.cached_configs.update( { @@ -640,7 +640,7 @@ def build_from_library( .choices[0] .message.content ) - coding = True if resp == "YES" else False + coding = resp == "YES" self.cached_configs.update( { diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index 73eb885629..66792456b5 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -285,7 +285,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = self._custom_text_types = self._retrieve_config.get("custom_text_types", TEXT_FORMATS) self._recursive = self._retrieve_config.get("recursive", True) self._context_max_tokens = self._retrieve_config.get("context_max_tokens", self._max_tokens * 0.8) - self._collection = True if self._docs_path is None else False # whether the collection is created + self._collection = self._docs_path is None # whether the collection is created self._ipython = get_ipython() self._doc_idx = -1 # the index of the current used doc self._results = [] # the results of the current query diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 8877c2cce4..b583e2fb39 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -293,7 +293,7 @@ def _format_content(content: str) -> str: @staticmethod def _is_agent_name_error_message(message: str) -> bool: pattern = re.compile(r"Invalid 'messages\[\d+\]\.name': string does not match pattern.") - return True if pattern.match(message) else False + return pattern.match(message) @staticmethod def _move_system_message_to_beginning(messages: list[dict[str, Any]]) -> None: diff --git a/autogen/oai/cohere.py b/autogen/oai/cohere.py index c730806cfd..0d05677ef4 100644 --- a/autogen/oai/cohere.py +++ b/autogen/oai/cohere.py @@ -178,7 +178,7 @@ def create(self, params: dict) -> ChatCompletion: total_tokens = 0 # Stream if in parameters - streaming = True if params.get("stream") else False + streaming = params.get("stream") cohere_finish = "stop" tool_calls = None ans = None diff --git a/notebook/agentchat_teachability.ipynb b/notebook/agentchat_teachability.ipynb index 2038d632e5..d35c06ae3d 100644 --- a/notebook/agentchat_teachability.ipynb +++ b/notebook/agentchat_teachability.ipynb @@ -126,7 +126,7 @@ "user = UserProxyAgent(\n", " name=\"user\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=0,\n", " code_execution_config={\n", " \"use_docker\": False\n", diff --git a/notebook/agentchat_teaching.ipynb b/notebook/agentchat_teaching.ipynb index 13589e9319..b3d9d02e6b 100644 --- a/notebook/agentchat_teaching.ipynb +++ b/notebook/agentchat_teaching.ipynb @@ -83,13 +83,13 @@ "assistant = autogen.AssistantAgent(\n", " name=\"assistant\",\n", " llm_config=llm_config,\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", ")\n", "# create a UserProxyAgent instance named \"user_proxy\"\n", "user_proxy = autogen.UserProxyAgent(\n", " name=\"user_proxy\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=10,\n", " code_execution_config={\n", " \"work_dir\": \"work_dir\",\n", @@ -809,13 +809,13 @@ "assistant = autogen.AssistantAgent(\n", " name=\"assistant\",\n", " llm_config=llm_config,\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", ")\n", "# create a UserProxyAgent instance named \"user_proxy\"\n", "user_proxy = autogen.UserProxyAgent(\n", " name=\"user_proxy\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=10,\n", " code_execution_config={\n", " \"work_dir\": \"work_dir\",\n", diff --git a/pyproject.toml b/pyproject.toml index 77d82cd0b8..2217ef51ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM1", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM2", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/test_function_call.py b/test/agentchat/test_function_call.py index 7c3b704000..1ac484dd50 100755 --- a/test/agentchat/test_function_call.py +++ b/test/agentchat/test_function_call.py @@ -229,7 +229,7 @@ def test_update_function(credentials_gpt_4o_mini: Credentials): user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) assistant = autogen.AssistantAgent(name="test", llm_config=llm_config) diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index 49a51243c1..fcca024390 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -133,7 +133,7 @@ def _test_selection_method(method: str): messages=[], max_round=6, speaker_selection_method=method, - allow_repeat_speaker=False if method == "manual" else True, + allow_repeat_speaker=method != "manual", ) group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False) diff --git a/test/agentchat/test_tool_calls.py b/test/agentchat/test_tool_calls.py index f769927aae..e71bc2e61e 100755 --- a/test/agentchat/test_tool_calls.py +++ b/test/agentchat/test_tool_calls.py @@ -142,7 +142,7 @@ def test_update_tool(credentials_gpt_4o: Credentials): user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) assistant = autogen.AssistantAgent(name="test", llm_config=llm_config) @@ -216,7 +216,7 @@ def receive( user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) user_proxy.register_function({"echo": lambda str: str}) @@ -313,7 +313,7 @@ async def a_receive( user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) def echo(str): From d34f6873f809cd64498cdd2b9b4f861bda570462 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 23:04:51 +0100 Subject: [PATCH 12/13] Add lint fixes --- autogen/browser_utils.py | 2 +- autogen/oai/bedrock.py | 5 +---- autogen/oai/client.py | 2 +- autogen/oai/ollama.py | 8 ++++---- pyproject.toml | 2 +- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/autogen/browser_utils.py b/autogen/browser_utils.py index 224133eb90..e12e9ec7ed 100644 --- a/autogen/browser_utils.py +++ b/autogen/browser_utils.py @@ -191,7 +191,7 @@ def _bing_search(self, query: str) -> None: for dl in page["deepLinks"]: idx += 1 web_snippets.append( - f"{idx}. [{dl['name']}]({dl['url']})\n{dl['snippet'] if 'snippet' in dl else ''}" # type: ignore[index] + f"{idx}. [{dl['name']}]({dl['url']})\n{dl.get('snippet', '')}" # type: ignore[index] ) news_snippets = list() diff --git a/autogen/oai/bedrock.py b/autogen/oai/bedrock.py index 8c953732bf..d6bffa8fda 100644 --- a/autogen/oai/bedrock.py +++ b/autogen/oai/bedrock.py @@ -179,10 +179,7 @@ def parse_params(self, params: dict[str, Any]) -> tuple[dict[str, Any], dict[str ) # Streaming - if "stream" in params: - self._streaming = params["stream"] - else: - self._streaming = False + self._streaming = params.get("stream", False) # For this release we will not support streaming as many models do not support streaming with tool use if self._streaming: diff --git a/autogen/oai/client.py b/autogen/oai/client.py index b583e2fb39..6c82668a66 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -293,7 +293,7 @@ def _format_content(content: str) -> str: @staticmethod def _is_agent_name_error_message(message: str) -> bool: pattern = re.compile(r"Invalid 'messages\[\d+\]\.name': string does not match pattern.") - return pattern.match(message) + return bool(pattern.match(message)) @staticmethod def _move_system_message_to_beginning(messages: list[dict[str, Any]]) -> None: diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index a1380cfc1a..bc06db150a 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -255,15 +255,15 @@ def create(self, params: dict) -> ChatCompletion: ans = ans + (chunk["message"]["content"] or "") if "done_reason" in chunk: - prompt_tokens = chunk["prompt_eval_count"] if "prompt_eval_count" in chunk else 0 - completion_tokens = chunk["eval_count"] if "eval_count" in chunk else 0 + prompt_tokens = chunk.get("prompt_eval_count", 0) + completion_tokens = chunk.get("eval_count", 0) total_tokens = prompt_tokens + completion_tokens else: # Non-streaming finished ans: str = response["message"]["content"] - prompt_tokens = response["prompt_eval_count"] if "prompt_eval_count" in response else 0 - completion_tokens = response["eval_count"] if "eval_count" in response else 0 + prompt_tokens = response.get("prompt_eval_count", 0) + completion_tokens = response.get("eval_count", 0) total_tokens = prompt_tokens + completion_tokens if response is not None: diff --git a/pyproject.toml b/pyproject.toml index 2217ef51ff..bf4d818972 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM2", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch From ca0bfd2a87219a68a48c9ad15966154e3ad98bae Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 23:26:30 +0100 Subject: [PATCH 13/13] Fix test --- autogen/oai/ollama.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index bc06db150a..bc982e2f8d 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -608,4 +608,7 @@ def is_valid_tool_call_item(call_item: dict) -> bool: if "name" not in call_item or not isinstance(call_item["name"], str): return False - return set(call_item.keys()) - {"name", "arguments"} + if set(call_item.keys()) - {"name", "arguments"}: # noqa: SIM103 + return False + + return True