Skip to content

Commit

Permalink
Merge pull request #647 from ag2ai/add-linting-rules
Browse files Browse the repository at this point in the history
Add linting rules
  • Loading branch information
davorrunje authored Jan 25, 2025
2 parents cfbba1b + ca0bfd2 commit 5d90873
Show file tree
Hide file tree
Showing 85 changed files with 318 additions and 449 deletions.
5 changes: 2 additions & 3 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,5 @@ def __init__(

# Update the provided description if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
self.description = self.DEFAULT_DESCRIPTION
if description is None and system_message == self.DEFAULT_SYSTEM_MESSAGE:
self.description = self.DEFAULT_DESCRIPTION
8 changes: 4 additions & 4 deletions autogen/agentchat/contrib/agent_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def record_one_conversation(self, conversation_history: list[dict], is_satisfied
"0",
"1",
], "The input is invalid. Please input 1 or 0. 1 represents satisfied. 0 represents not satisfied."
is_satisfied = True if reply == "1" else False
is_satisfied = reply == "1"
self._trial_conversations_history.append(
{f"Conversation {len(self._trial_conversations_history)}": conversation_history}
)
Expand Down Expand Up @@ -284,8 +284,8 @@ def step(self):
incumbent_functions = self._update_function_call(incumbent_functions, actions)

remove_functions = list(
{key for dictionary in self._trial_functions for key in dictionary.keys()}
- {key for dictionary in incumbent_functions for key in dictionary.keys()}
{key for dictionary in self._trial_functions for key in dictionary}
- {key for dictionary in incumbent_functions for key in dictionary}
)

register_for_llm = []
Expand Down Expand Up @@ -408,7 +408,7 @@ def _validate_actions(self, actions, incumbent_functions):
function_args = action.function.arguments
try:
function_args = json.loads(function_args.strip('"'))
if "arguments" in function_args.keys():
if "arguments" in function_args:
json.loads(function_args.get("arguments").strip('"'))
except Exception as e:
print("JSON is invalid:", e)
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/capabilities/teachability.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def _retrieve_relevant_memos(self, input_text: str) -> list:
input_text, n_results=self.max_num_retrievals, threshold=self.recall_threshold
)

if self.verbosity >= 1:
if self.verbosity >= 1: # noqa: SIM102
# Was anything retrieved?
if len(memo_list) == 0:
# No. Look at the closest memo.
Expand Down
4 changes: 1 addition & 3 deletions autogen/agentchat/contrib/capabilities/tools_capability.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ def __init__(self, tool_list: list[Tool]):
self.tools = [tool for tool in tool_list]

def add_to_agent(self, agent: ConversableAgent):
"""
Add tools to the given agent.
"""
"""Add tools to the given agent."""
for tool in self.tools:
tool.register_tool(agent=agent)
17 changes: 8 additions & 9 deletions autogen/agentchat/contrib/capabilities/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def apply_transform(self, messages: list[dict]) -> list[dict]:
for i in range(len(messages) - 1, 0, -1):
if remaining_count > 1:
truncated_messages.insert(1 if self._keep_first_message else 0, messages[i])
if remaining_count == 1:
if remaining_count == 1: # noqa: SIM102
# If there's only 1 slot left and it's a 'tools' message, ignore it.
if messages[i].get("role") != "tool":
truncated_messages.insert(1, messages[i])
Expand Down Expand Up @@ -287,15 +287,14 @@ def _validate_max_tokens(self, max_tokens: Optional[int] = None) -> Optional[int
print(colored(f"Model {self._model} not found in token_count_utils.", "yellow"))
allowed_tokens = None

if max_tokens is not None and allowed_tokens is not None:
if max_tokens > allowed_tokens:
print(
colored(
f"Max token was set to {max_tokens}, but {self._model} can only accept {allowed_tokens} tokens. Capping it to {allowed_tokens}.",
"yellow",
)
if max_tokens is not None and allowed_tokens is not None and max_tokens > allowed_tokens:
print(
colored(
f"Max token was set to {max_tokens}, but {self._model} can only accept {allowed_tokens} tokens. Capping it to {allowed_tokens}.",
"yellow",
)
return allowed_tokens
)
return allowed_tokens

return max_tokens if max_tokens is not None else sys.maxsize

Expand Down
6 changes: 3 additions & 3 deletions autogen/agentchat/contrib/captainagent/agent_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True):

def clear_all_agents(self, recycle_endpoint: Optional[bool] = True):
"""Clear all cached agents."""
for agent_name in [agent_name for agent_name in self.agent_procs_assign.keys()]:
for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
self.clear_agent(agent_name, recycle_endpoint)
print(colored("All agents have been cleared.", "yellow"), flush=True)

Expand Down Expand Up @@ -473,7 +473,7 @@ def build(
.choices[0]
.message.content
)
coding = True if resp == "YES" else False
coding = resp == "YES"

self.cached_configs.update(
{
Expand Down Expand Up @@ -640,7 +640,7 @@ def build_from_library(
.choices[0]
.message.content
)
coding = True if resp == "YES" else False
coding = resp == "YES"

self.cached_configs.update(
{
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/captainagent/captainagent.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ def _run_autobuild(self, group_name: str, execution_task: str, building_task: st

builder = AgentBuilder(**self._nested_config["autobuild_init_config"])
# if the group is already built, load from history
if group_name in self.build_history.keys():
if group_name in self.build_history:
agent_list, agent_configs = builder.load(config_json=json.dumps(self.build_history[group_name]))
if self._nested_config.get("autobuild_tool_config", None) and agent_configs["coding"] is True:
# tool library is enabled, reload tools and bind them to the agents
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
#
# SPDX-License-Identifier: Apache-2.0
from contextlib import suppress


def modular_inverse_sum(expressions, modulus):
"""Calculates the sum of modular inverses of the given expressions modulo the specified modulus.
Expand All @@ -15,8 +18,6 @@ def modular_inverse_sum(expressions, modulus):

mod_sum = 0
for number in expressions:
try:
with suppress(ValueError):
mod_sum += mod_inverse(number, modulus)
except ValueError:
pass # If modular inverse does not exist, skip the term
return mod_sum % modulus
10 changes: 2 additions & 8 deletions autogen/agentchat/contrib/gpt_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,15 +510,9 @@ def _process_assistant_config(self, llm_config, assistant_config):
if llm_config is False:
raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")

if llm_config is None:
openai_client_cfg = {}
else:
openai_client_cfg = copy.deepcopy(llm_config)
openai_client_cfg = {} if llm_config is None else copy.deepcopy(llm_config)

if assistant_config is None:
openai_assistant_cfg = {}
else:
openai_assistant_cfg = copy.deepcopy(assistant_config)
openai_assistant_cfg = {} if assistant_config is None else copy.deepcopy(assistant_config)

# Move the assistant related configurations to assistant_config
# It's important to keep forward compatibility
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ def _get_last_question(self, message: Union[dict, str]):
"""Retrieves the last message from the conversation history."""
if isinstance(message, str):
return message
if isinstance(message, dict):
if "content" in message:
return message["content"]
if isinstance(message, dict) and "content" in message:
return message["content"]
return None
5 changes: 2 additions & 3 deletions autogen/agentchat/contrib/llamaindex_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,6 @@ def _extract_message_and_history(
for history_message in history:
content = history_message.get("content", "")
role = history_message.get("role", "user")
if role:
if role == "user" or role == "assistant":
history_messages.append(ChatMessage(content=content, role=role, additional_kwargs={}))
if role and (role == "user" or role == "assistant"):
history_messages.append(ChatMessage(content=content, role=role, additional_kwargs={}))
return message, history_messages
11 changes: 4 additions & 7 deletions autogen/agentchat/contrib/rag/document_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@


def is_url(url: str) -> bool:
"""
Check if the string is a valid URL.
"""Check if the string is a valid URL.
It checks whether the URL has a valid scheme and network location.
"""
Expand All @@ -37,8 +36,7 @@ def is_url(url: str) -> bool:

@require_optional_import(["selenium", "webdriver_manager"], "rag")
def _download_rendered_html(url: str) -> str:
"""
Downloads a rendered HTML page of a given URL using headless ChromeDriver.
"""Downloads a rendered HTML page of a given URL using headless ChromeDriver.
Args:
url (str): URL of the page to download.
Expand Down Expand Up @@ -82,7 +80,7 @@ def download_url(url: Any, output_dir: Optional[Union[str, Path]] = None) -> Pat
filename = url_path.name or "downloaded_content.html"
if len(filename) < 5 or filename[-5:] != ".html":
filename += ".html"
output_dir = Path(output_dir) if output_dir else Path(".")
output_dir = Path(output_dir) if output_dir else Path()
filepath = output_dir / filename
with filepath.open("w", encoding="utf-8") as f:
f.write(rendered_html)
Expand All @@ -91,8 +89,7 @@ def download_url(url: Any, output_dir: Optional[Union[str, Path]] = None) -> Pat


def list_files(directory: Union[Path, str]) -> list[Path]:
"""
Recursively list all files in a directory.
"""Recursively list all files in a directory.
This function will raise an exception if the directory does not exist.
"""
Expand Down
5 changes: 2 additions & 3 deletions autogen/agentchat/contrib/rag/parser_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ def docling_parse_docs( # type: ignore[no-any-unimported]
input_file_path: Union[Path, str],
output_dir_path: Union[Path, str],
) -> list["ConversionResult"]:
"""
Convert documents into a Deep Search document format using EasyOCR
"""Convert documents into a Deep Search document format using EasyOCR
with CPU only, and export the document and its tables to the specified
output directory.
Expand Down Expand Up @@ -84,7 +83,7 @@ def docling_parse_docs( # type: ignore[no-any-unimported]
for res in conv_results:
out_path = Path(output_dir_path)
doc_filename = res.input.file.stem
_log.info(f"Document {res.input.file.name} converted.\nSaved markdown output to: {str(out_path)}")
_log.info(f"Document {res.input.file.name} converted.\nSaved markdown output to: {out_path!s}")
_log.debug(res.document._export_to_indented_text(max_text_len=16))
# Export Docling document format to markdowndoc:
with (out_path / f"{doc_filename}.md").open("w") as fp:
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/retrieve_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str =
self._custom_text_types = self._retrieve_config.get("custom_text_types", TEXT_FORMATS)
self._recursive = self._retrieve_config.get("recursive", True)
self._context_max_tokens = self._retrieve_config.get("context_max_tokens", self._max_tokens * 0.8)
self._collection = True if self._docs_path is None else False # whether the collection is created
self._collection = self._docs_path is None # whether the collection is created
self._ipython = get_ipython()
self._doc_idx = -1 # the index of the current used doc
self._results = [] # the results of the current query
Expand Down
16 changes: 7 additions & 9 deletions autogen/agentchat/contrib/society_of_mind_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
# ruff: noqa: E722
import copy
import traceback
from contextlib import suppress
from typing import Callable, Literal, Optional, Union

from ... import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper
Expand Down Expand Up @@ -110,15 +111,12 @@ def _llm_response_preparer(self, prompt, messages):
del message["tool_calls"]
if "tool_responses" in message:
del message["tool_responses"]
if "function_call" in message:
if message["content"] == "":
try:
message["content"] = (
message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")"
)
except KeyError:
pass
del message["function_call"]
if "function_call" in message and message["content"] == "":
with suppress(KeyError):
message["content"] = (
message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")"
)
del message["function_call"]

# Add the modified message to the transcript
_messages.append(message)
Expand Down
9 changes: 3 additions & 6 deletions autogen/agentchat/contrib/swarm_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,7 @@ class ON_CONDITION: # noqa: N801
def __post_init__(self):
# Ensure valid types
if self.target is not None:
assert isinstance(self.target, SwarmAgent) or isinstance(self.target, dict), (
"'target' must be a SwarmAgent or a Dict"
)
assert isinstance(self.target, (SwarmAgent, dict)), "'target' must be a SwarmAgent or a Dict"

# Ensure they have a condition
assert isinstance(self.condition, str) and self.condition.strip(), "'condition' must be a non-empty string"
Expand Down Expand Up @@ -134,9 +132,8 @@ def _prepare_swarm_agents(

# Ensure all agents in hand-off after-works are in the passed in agents list
for agent in agents:
if agent.after_work is not None:
if isinstance(agent.after_work.agent, SwarmAgent):
assert agent.after_work.agent in agents, "Agent in hand-off must be in the agents list"
if agent.after_work is not None and isinstance(agent.after_work.agent, SwarmAgent):
assert agent.after_work.agent in agents, "Agent in hand-off must be in the agents list"

tool_execution = SwarmAgent(
name=__TOOL_EXECUTOR_NAME__,
Expand Down
12 changes: 4 additions & 8 deletions autogen/agentchat/contrib/vectordb/chromadb.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@
import chromadb.utils.embedding_functions as ef
from chromadb.api.models.Collection import Collection

if result.is_successful:
if chromadb.__version__ < "0.4.15":
raise ImportError("Please upgrade chromadb to version 0.4.15 or later.")
if result.is_successful and chromadb.__version__ < "0.4.15":
raise ImportError("Please upgrade chromadb to version 0.4.15 or later.")


CHROMADB_MAX_BATCH_SIZE = os.environ.get("CHROMADB_MAX_BATCH_SIZE", 40000)
Expand Down Expand Up @@ -193,10 +192,7 @@ def insert_docs(self, docs: list[Document], collection_name: str = None, upsert:
embeddings = None
else:
embeddings = [doc.get("embedding") for doc in docs]
if docs[0].get("metadata") is None:
metadatas = None
else:
metadatas = [doc.get("metadata") for doc in docs]
metadatas = None if docs[0].get("metadata") is None else [doc.get("metadata") for doc in docs]
self._batch_insert(collection, embeddings, ids, metadatas, documents, upsert)

def update_docs(self, docs: list[Document], collection_name: str = None) -> None:
Expand Down Expand Up @@ -291,7 +287,7 @@ def _chroma_get_results_to_list_documents(data_dict) -> list[Document]:

for i in range(len(data_dict[keys[0]])):
sub_dict = {}
for key in data_dict.keys():
for key in data_dict:
if data_dict[key] is not None and len(data_dict[key]) > i:
sub_dict[key[:-1]] = data_dict[key][i]
results.append(sub_dict)
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/vectordb/mongodb.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def insert_docs(
text_batch = []
metadata_batch = []
size = 0
i += 1
i += 1 # noqa: SIM113
if text_batch:
result_ids.update(self._insert_batch(collection, text_batch, metadata_batch, id_batch)) # type: ignore
input_ids.update(id_batch)
Expand Down
5 changes: 1 addition & 4 deletions autogen/agentchat/contrib/vectordb/pgvectordb.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,10 +833,7 @@ def insert_docs(self, docs: list[Document], collection_name: str = None, upsert:
embeddings = None
else:
embeddings = [doc.get("embedding") for doc in docs]
if docs[0].get("metadata") is None:
metadatas = None
else:
metadatas = [doc.get("metadata") for doc in docs]
metadatas = None if docs[0].get("metadata") is None else [doc.get("metadata") for doc in docs]

self._batch_insert(collection, embeddings, ids, metadatas, documents, upsert)

Expand Down
Loading

0 comments on commit 5d90873

Please sign in to comment.