From e50636ac947b0fc5cf899fb35daca236205638fb Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 25 Jan 2025 22:43:12 +0100 Subject: [PATCH] Add lint fixes --- autogen/agentchat/contrib/agent_optimizer.py | 2 +- autogen/agentchat/contrib/captainagent/agent_builder.py | 4 ++-- autogen/agentchat/contrib/retrieve_user_proxy_agent.py | 2 +- autogen/oai/client.py | 2 +- autogen/oai/cohere.py | 2 +- notebook/agentchat_teachability.ipynb | 2 +- notebook/agentchat_teaching.ipynb | 8 ++++---- pyproject.toml | 2 +- test/agentchat/test_function_call.py | 2 +- test/agentchat/test_groupchat.py | 2 +- test/agentchat/test_tool_calls.py | 6 +++--- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/autogen/agentchat/contrib/agent_optimizer.py b/autogen/agentchat/contrib/agent_optimizer.py index 7950125902..8380dfd039 100644 --- a/autogen/agentchat/contrib/agent_optimizer.py +++ b/autogen/agentchat/contrib/agent_optimizer.py @@ -227,7 +227,7 @@ def record_one_conversation(self, conversation_history: list[dict], is_satisfied "0", "1", ], "The input is invalid. Please input 1 or 0. 1 represents satisfied. 0 represents not satisfied." - is_satisfied = True if reply == "1" else False + is_satisfied = reply == "1" self._trial_conversations_history.append( {f"Conversation {len(self._trial_conversations_history)}": conversation_history} ) diff --git a/autogen/agentchat/contrib/captainagent/agent_builder.py b/autogen/agentchat/contrib/captainagent/agent_builder.py index e94038e409..4a912c7889 100644 --- a/autogen/agentchat/contrib/captainagent/agent_builder.py +++ b/autogen/agentchat/contrib/captainagent/agent_builder.py @@ -473,7 +473,7 @@ def build( .choices[0] .message.content ) - coding = True if resp == "YES" else False + coding = resp == "YES" self.cached_configs.update( { @@ -640,7 +640,7 @@ def build_from_library( .choices[0] .message.content ) - coding = True if resp == "YES" else False + coding = resp == "YES" self.cached_configs.update( { diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index 73eb885629..66792456b5 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -285,7 +285,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = self._custom_text_types = self._retrieve_config.get("custom_text_types", TEXT_FORMATS) self._recursive = self._retrieve_config.get("recursive", True) self._context_max_tokens = self._retrieve_config.get("context_max_tokens", self._max_tokens * 0.8) - self._collection = True if self._docs_path is None else False # whether the collection is created + self._collection = self._docs_path is None # whether the collection is created self._ipython = get_ipython() self._doc_idx = -1 # the index of the current used doc self._results = [] # the results of the current query diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 8877c2cce4..b583e2fb39 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -293,7 +293,7 @@ def _format_content(content: str) -> str: @staticmethod def _is_agent_name_error_message(message: str) -> bool: pattern = re.compile(r"Invalid 'messages\[\d+\]\.name': string does not match pattern.") - return True if pattern.match(message) else False + return pattern.match(message) @staticmethod def _move_system_message_to_beginning(messages: list[dict[str, Any]]) -> None: diff --git a/autogen/oai/cohere.py b/autogen/oai/cohere.py index c730806cfd..0d05677ef4 100644 --- a/autogen/oai/cohere.py +++ b/autogen/oai/cohere.py @@ -178,7 +178,7 @@ def create(self, params: dict) -> ChatCompletion: total_tokens = 0 # Stream if in parameters - streaming = True if params.get("stream") else False + streaming = params.get("stream") cohere_finish = "stop" tool_calls = None ans = None diff --git a/notebook/agentchat_teachability.ipynb b/notebook/agentchat_teachability.ipynb index 2038d632e5..d35c06ae3d 100644 --- a/notebook/agentchat_teachability.ipynb +++ b/notebook/agentchat_teachability.ipynb @@ -126,7 +126,7 @@ "user = UserProxyAgent(\n", " name=\"user\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=0,\n", " code_execution_config={\n", " \"use_docker\": False\n", diff --git a/notebook/agentchat_teaching.ipynb b/notebook/agentchat_teaching.ipynb index 13589e9319..b3d9d02e6b 100644 --- a/notebook/agentchat_teaching.ipynb +++ b/notebook/agentchat_teaching.ipynb @@ -83,13 +83,13 @@ "assistant = autogen.AssistantAgent(\n", " name=\"assistant\",\n", " llm_config=llm_config,\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", ")\n", "# create a UserProxyAgent instance named \"user_proxy\"\n", "user_proxy = autogen.UserProxyAgent(\n", " name=\"user_proxy\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=10,\n", " code_execution_config={\n", " \"work_dir\": \"work_dir\",\n", @@ -809,13 +809,13 @@ "assistant = autogen.AssistantAgent(\n", " name=\"assistant\",\n", " llm_config=llm_config,\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", ")\n", "# create a UserProxyAgent instance named \"user_proxy\"\n", "user_proxy = autogen.UserProxyAgent(\n", " name=\"user_proxy\",\n", " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: True if \"TERMINATE\" in x.get(\"content\") else False,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", " max_consecutive_auto_reply=10,\n", " code_execution_config={\n", " \"work_dir\": \"work_dir\",\n", diff --git a/pyproject.toml b/pyproject.toml index 77d82cd0b8..2217ef51ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ select = [ # "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q # "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "SIM1", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM2", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim # "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt # "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth # "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch diff --git a/test/agentchat/test_function_call.py b/test/agentchat/test_function_call.py index 7c3b704000..1ac484dd50 100755 --- a/test/agentchat/test_function_call.py +++ b/test/agentchat/test_function_call.py @@ -229,7 +229,7 @@ def test_update_function(credentials_gpt_4o_mini: Credentials): user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) assistant = autogen.AssistantAgent(name="test", llm_config=llm_config) diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index 49a51243c1..fcca024390 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -133,7 +133,7 @@ def _test_selection_method(method: str): messages=[], max_round=6, speaker_selection_method=method, - allow_repeat_speaker=False if method == "manual" else True, + allow_repeat_speaker=method != "manual", ) group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False) diff --git a/test/agentchat/test_tool_calls.py b/test/agentchat/test_tool_calls.py index f769927aae..e71bc2e61e 100755 --- a/test/agentchat/test_tool_calls.py +++ b/test/agentchat/test_tool_calls.py @@ -142,7 +142,7 @@ def test_update_tool(credentials_gpt_4o: Credentials): user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) assistant = autogen.AssistantAgent(name="test", llm_config=llm_config) @@ -216,7 +216,7 @@ def receive( user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) user_proxy.register_function({"echo": lambda str: str}) @@ -313,7 +313,7 @@ async def a_receive( user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), ) def echo(str):