Skip to content

Commit d0ffb72

Browse files
committed
chore(langchain): cleanup langchain_v1 ruff config
1 parent cdae9e4 commit d0ffb72

31 files changed

+380
-476
lines changed

libs/langchain_v1/langchain/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
__version__ = "1.0.0a6"
66

77

8-
def __getattr__(name: str) -> Any: # noqa: ANN401
8+
def __getattr__(name: str) -> Any:
99
"""Get an attribute from the package.
1010
1111
TODO: will be removed in a future alpha version.

libs/langchain_v1/langchain/_internal/_prompts.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
def resolve_prompt(
27-
prompt: str | None | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
27+
prompt: str | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]] | None,
2828
state: StateT,
2929
runtime: Runtime[ContextT],
3030
default_user_content: str,
@@ -86,9 +86,9 @@ def custom_prompt(state, runtime):
8686

8787
async def aresolve_prompt(
8888
prompt: str
89-
| None
9089
| Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]]
91-
| Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
90+
| Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]]
91+
| None,
9292
state: StateT,
9393
runtime: Runtime[ContextT],
9494
default_user_content: str,

libs/langchain_v1/langchain/agents/middleware/prompt_caching.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,19 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
1616

1717
def __init__(
1818
self,
19-
type: Literal["ephemeral"] = "ephemeral",
19+
cache_type: Literal["ephemeral"] = "ephemeral",
2020
ttl: Literal["5m", "1h"] = "5m",
2121
min_messages_to_cache: int = 0,
2222
) -> None:
2323
"""Initialize the middleware with cache control settings.
2424
2525
Args:
26-
type: The type of cache to use, only "ephemeral" is supported.
26+
cache_type: The type of cache to use, only "ephemeral" is supported.
2727
ttl: The time to live for the cache, only "5m" and "1h" are supported.
2828
min_messages_to_cache: The minimum number of messages until the cache is used,
2929
default is 0.
3030
"""
31-
self.type = type
31+
self.cache_type = cache_type
3232
self.ttl = ttl
3333
self.min_messages_to_cache = min_messages_to_cache
3434

@@ -39,27 +39,27 @@ def modify_model_request( # type: ignore[override]
3939
"""Modify the model request to add cache control blocks."""
4040
try:
4141
from langchain_anthropic import ChatAnthropic
42-
except ImportError:
42+
except ImportError as e:
4343
msg = (
4444
"AnthropicPromptCachingMiddleware caching middleware only supports "
4545
"Anthropic models."
4646
"Please install langchain-anthropic."
4747
)
48-
raise ValueError(msg)
48+
raise ValueError(msg) from e
4949

5050
if not isinstance(request.model, ChatAnthropic):
5151
msg = (
5252
"AnthropicPromptCachingMiddleware caching middleware only supports "
5353
f"Anthropic models, not instances of {type(request.model)}"
5454
)
55-
raise ValueError(msg)
55+
raise TypeError(msg)
5656

5757
messages_count = (
5858
len(request.messages) + 1 if request.system_prompt else len(request.messages)
5959
)
6060
if messages_count < self.min_messages_to_cache:
6161
return request
6262

63-
request.model_settings["cache_control"] = {"type": self.type, "ttl": self.ttl}
63+
request.model_settings["cache_control"] = {"type": self.cache_type, "ttl": self.ttl}
6464

6565
return request

libs/langchain_v1/langchain/agents/middleware/summarization.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
<messages>
4949
Messages to summarize:
5050
{messages}
51-
</messages>""" # noqa: E501
51+
</messages>"""
5252

5353
SUMMARY_PREFIX = "## Previous conversation summary:"
5454

@@ -229,7 +229,7 @@ def _create_summary(self, messages_to_summarize: list[AnyMessage]) -> str:
229229
try:
230230
response = self.model.invoke(self.summary_prompt.format(messages=trimmed_messages))
231231
return cast("str", response.content).strip()
232-
except Exception as e: # noqa: BLE001
232+
except Exception as e:
233233
return f"Error generating summary: {e!s}"
234234

235235
def _trim_messages_for_summary(self, messages: list[AnyMessage]) -> list[AnyMessage]:
@@ -244,5 +244,5 @@ def _trim_messages_for_summary(self, messages: list[AnyMessage]) -> list[AnyMess
244244
allow_partial=True,
245245
include_system=True,
246246
)
247-
except Exception: # noqa: BLE001
247+
except Exception:
248248
return messages[-_DEFAULT_FALLBACK_MESSAGE_COUNT:]

libs/langchain_v1/langchain/agents/middleware_agent.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None
7272
def _extract_metadata(type_: type) -> list:
7373
"""Extract metadata from a field type, handling Required/NotRequired and Annotated wrappers."""
7474
# Handle Required[Annotated[...]] or NotRequired[Annotated[...]]
75-
if get_origin(type_) in (Required, NotRequired):
75+
if get_origin(type_) in {Required, NotRequired}:
7676
inner_type = get_args(type_)[0]
7777
if get_origin(inner_type) is Annotated:
7878
return list(get_args(inner_type)[1:])
@@ -133,7 +133,7 @@ def _handle_structured_output_error(
133133
ResponseT = TypeVar("ResponseT")
134134

135135

136-
def create_agent( # noqa: PLR0915
136+
def create_agent(
137137
*,
138138
model: str | BaseChatModel,
139139
tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode | None = None,
@@ -290,8 +290,7 @@ def _handle_model_output(output: AIMessage) -> dict[str, Any]:
290290

291291
tool_message_content = (
292292
response_format.tool_message_content
293-
if response_format.tool_message_content
294-
else f"Returning structured response: {structured_response}"
293+
or f"Returning structured response: {structured_response}"
295294
)
296295

297296
return {
@@ -305,13 +304,13 @@ def _handle_model_output(output: AIMessage) -> dict[str, Any]:
305304
],
306305
"response": structured_response,
307306
}
308-
except Exception as exc: # noqa: BLE001
307+
except Exception as exc:
309308
exception = StructuredOutputValidationError(tool_call["name"], exc)
310309
should_retry, error_message = _handle_structured_output_error(
311310
exception, response_format
312311
)
313312
if not should_retry:
314-
raise exception
313+
raise exception from exc
315314

316315
return {
317316
"messages": [

libs/langchain_v1/langchain/agents/react_agent.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ def _get_prompt_runnable(prompt: Prompt | None) -> Runnable:
122122
lambda state: _get_state_value(state, "messages"), name=PROMPT_RUNNABLE_NAME
123123
)
124124
elif isinstance(prompt, str):
125-
_system_message: BaseMessage = SystemMessage(content=prompt)
125+
system_message: BaseMessage = SystemMessage(content=prompt)
126126
prompt_runnable = RunnableCallable(
127-
lambda state: [_system_message, *_get_state_value(state, "messages")],
127+
lambda state: [system_message, *_get_state_value(state, "messages")],
128128
name=PROMPT_RUNNABLE_NAME,
129129
)
130130
elif isinstance(prompt, SystemMessage):
@@ -220,7 +220,7 @@ def __init__(
220220
"The `model` parameter should not have pre-bound tools, "
221221
"simply pass the model and tools separately."
222222
)
223-
raise ValueError(msg)
223+
raise TypeError(msg)
224224

225225
self._setup_tools()
226226
self._setup_state_schema()
@@ -397,13 +397,13 @@ def _handle_single_structured_output(
397397
"structured_response": structured_response,
398398
}
399399
)
400-
except Exception as exc: # noqa: BLE001
400+
except Exception as exc:
401401
exception = StructuredOutputValidationError(tool_call["name"], exc)
402402

403403
should_retry, error_message = self._handle_structured_output_error(exception)
404404

405405
if not should_retry:
406-
raise exception
406+
raise exception from exc
407407

408408
return Command(
409409
update={
@@ -583,7 +583,7 @@ def _are_more_steps_needed(state: StateT, response: BaseMessage) -> bool:
583583
remaining_steps is not None # type: ignore[return-value]
584584
and (
585585
(remaining_steps < 1 and all_tools_return_direct)
586-
or (remaining_steps < 2 and has_tool_calls)
586+
or (remaining_steps < 2 and has_tool_calls) # noqa: PLR2004
587587
)
588588
)
589589

@@ -1188,7 +1188,7 @@ def check_weather(location: str) -> str:
11881188
response_format = ToolStrategy(
11891189
schema=response_format,
11901190
)
1191-
elif isinstance(response_format, tuple) and len(response_format) == 2:
1191+
elif isinstance(response_format, tuple) and len(response_format) == 2: # noqa: PLR2004
11921192
msg = "Passing a 2-tuple as response_format is no longer supported. "
11931193
raise ValueError(msg)
11941194

libs/langchain_v1/langchain/agents/structured_output.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ def __init__(
217217

218218
def _iter_variants(schema: Any) -> Iterable[Any]:
219219
"""Yield leaf variants from Union and JSON Schema oneOf."""
220-
if get_origin(schema) in (UnionType, Union):
220+
if get_origin(schema) in {UnionType, Union}:
221221
for arg in get_args(schema):
222222
yield from _iter_variants(arg)
223223
return

0 commit comments

Comments
 (0)