Skip to content

Commit aaadf05

Browse files
lints (#8987)
1 parent e842ba1 commit aaadf05

File tree

6 files changed

+49
-38
lines changed

6 files changed

+49
-38
lines changed

dspy/adapters/types/base_type.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,6 @@ def parse_stream_chunk(cls, chunk: ModelResponseStream) -> Optional["Type"]:
8888
"""
8989
return None
9090

91-
9291
@classmethod
9392
def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Type"]:
9493
"""Parse a LM response into the custom type.
@@ -101,6 +100,7 @@ def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Type"]:
101100
"""
102101
return None
103102

103+
104104
def split_message_content_for_custom_types(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
105105
"""Split user message content into a list of content blocks.
106106

dspy/adapters/types/citation.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ class AnswerWithSources(Signature):
5454

5555
class Citation(Type):
5656
"""Individual citation with character location information."""
57+
5758
type: str = "char_location"
5859
cited_text: str
5960
document_index: int
@@ -73,7 +74,7 @@ def format(self) -> dict[str, Any]:
7374
"cited_text": self.cited_text,
7475
"document_index": self.document_index,
7576
"start_char_index": self.start_char_index,
76-
"end_char_index": self.end_char_index
77+
"end_char_index": self.end_char_index,
7778
}
7879

7980
if self.document_title:
@@ -134,9 +135,7 @@ def validate_input(cls, data: Any):
134135
return data
135136

136137
# Handle case where data is a list of dicts with citation info
137-
if isinstance(data, list) and all(
138-
isinstance(item, dict) and "cited_text" in item for item in data
139-
):
138+
if isinstance(data, list) and all(isinstance(item, dict) and "cited_text" in item for item in data):
140139
return {"citations": [cls.Citation(**item) for item in data]}
141140

142141
# Handle case where data is a dict
@@ -147,8 +146,7 @@ def validate_input(cls, data: Any):
147146
if isinstance(citations_data, list):
148147
return {
149148
"citations": [
150-
cls.Citation(**item) if isinstance(item, dict) else item
151-
for item in citations_data
149+
cls.Citation(**item) if isinstance(item, dict) else item for item in citations_data
152150
]
153151
}
154152
elif "cited_text" in data:
@@ -197,7 +195,6 @@ def parse_stream_chunk(cls, chunk) -> Optional["Citations"]:
197195
pass
198196
return None
199197

200-
201198
@classmethod
202199
def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Citations"]:
203200
"""Parse a LM response into Citations.

dspy/clients/lm.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,6 @@ def __init__(
8888
model_pattern = re.match(r"^(?:o[1345]|gpt-5)(?:-(?:mini|nano))?", model_family)
8989

9090
if model_pattern:
91-
9291
if (temperature and temperature != 1.0) or (max_tokens and max_tokens < 16000):
9392
raise ValueError(
9493
"OpenAI's reasoning models require passing temperature=1.0 or None and max_tokens >= 16000 or None to "
@@ -228,9 +227,7 @@ def thread_function_wrapper():
228227

229228
return job
230229

231-
def reinforce(
232-
self, train_kwargs
233-
) -> ReinforceJob:
230+
def reinforce(self, train_kwargs) -> ReinforceJob:
234231
# TODO(GRPO Team): Should we return an initialized job here?
235232
from dspy import settings as settings
236233

@@ -482,6 +479,7 @@ def _convert_chat_request_to_responses_request(request: dict[str, Any]):
482479

483480
return request
484481

482+
485483
def _get_headers(headers: dict[str, Any] | None = None):
486484
headers = headers or {}
487485
return {

dspy/streaming/streaming_listener.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,9 @@ def _output_type(self) -> type | None:
287287
return None
288288

289289

290-
291-
def find_predictor_for_stream_listeners(program: "Module", stream_listeners: list[StreamListener]) -> dict[int, list[StreamListener]]:
290+
def find_predictor_for_stream_listeners(
291+
program: "Module", stream_listeners: list[StreamListener]
292+
) -> dict[int, list[StreamListener]]:
292293
"""Find the predictor for each stream listener.
293294
294295
This is a utility function to automatically find the predictor for each stream listener. It is used when some
@@ -337,6 +338,7 @@ def find_predictor_for_stream_listeners(program: "Module", stream_listeners: lis
337338
predict_id_to_listener[id(listener.predict)].append(listener)
338339
return predict_id_to_listener
339340

341+
340342
def _is_streamable(field_type: type | None) -> bool:
341343
if field_type is None:
342344
return False

tests/adapters/test_json_adapter.py

Lines changed: 28 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -334,8 +334,16 @@ class MySignature(dspy.Signature):
334334
adapter = dspy.JSONAdapter()
335335
messages = adapter.format(MySignature, [], {"document": document_wrapper})
336336

337-
expected_doc1_content = {"type": "document", "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world!"}, "citations": {"enabled": True}}
338-
expected_doc2_content = {"type": "document", "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world 2!"}, "citations": {"enabled": True}}
337+
expected_doc1_content = {
338+
"type": "document",
339+
"source": {"type": "text", "media_type": "text/plain", "data": "Hello, world!"},
340+
"citations": {"enabled": True},
341+
}
342+
expected_doc2_content = {
343+
"type": "document",
344+
"source": {"type": "text", "media_type": "text/plain", "data": "Hello, world 2!"},
345+
"citations": {"enabled": True},
346+
}
339347

340348
assert expected_doc1_content in messages[1]["content"]
341349
assert expected_doc2_content in messages[1]["content"]
@@ -643,6 +651,7 @@ class TestSignature(dspy.Signature):
643651
_, second_call_kwargs = mock_completion.call_args_list[1]
644652
assert second_call_kwargs.get("response_format") == {"type": "json_object"}
645653

654+
646655
def test_json_adapter_json_mode_no_structured_outputs():
647656
class TestSignature(dspy.Signature):
648657
question: str = dspy.InputField()
@@ -651,11 +660,15 @@ class TestSignature(dspy.Signature):
651660
dspy.configure(lm=dspy.LM(model="openai/gpt-4o", cache=False), adapter=dspy.JSONAdapter())
652661
program = dspy.Predict(TestSignature)
653662

654-
with mock.patch("litellm.completion") as mock_completion, \
655-
mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, \
656-
mock.patch("litellm.supports_response_schema") as mock_supports_response_schema:
663+
with (
664+
mock.patch("litellm.completion") as mock_completion,
665+
mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params,
666+
mock.patch("litellm.supports_response_schema") as mock_supports_response_schema,
667+
):
657668
# Call a model that allows json but not structured outputs
658-
mock_completion.return_value = ModelResponse(choices=[Choices(message=Message(content="{'answer': 'Test output'}"))])
669+
mock_completion.return_value = ModelResponse(
670+
choices=[Choices(message=Message(content="{'answer': 'Test output'}"))]
671+
)
659672
mock_get_supported_openai_params.return_value = ["response_format"]
660673
mock_supports_response_schema.return_value = False
661674

@@ -676,11 +689,15 @@ class TestSignature(dspy.Signature):
676689

677690
program = dspy.Predict(TestSignature)
678691

679-
with mock.patch("litellm.acompletion") as mock_acompletion, \
680-
mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, \
681-
mock.patch("litellm.supports_response_schema") as mock_supports_response_schema:
692+
with (
693+
mock.patch("litellm.acompletion") as mock_acompletion,
694+
mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params,
695+
mock.patch("litellm.supports_response_schema") as mock_supports_response_schema,
696+
):
682697
# Call a model that allows json but not structured outputs
683-
mock_acompletion.return_value = ModelResponse(choices=[Choices(message=Message(content="{'answer': 'Test output'}"))])
698+
mock_acompletion.return_value = ModelResponse(
699+
choices=[Choices(message=Message(content="{'answer': 'Test output'}"))]
700+
)
684701
mock_get_supported_openai_params.return_value = ["response_format"]
685702
mock_supports_response_schema.return_value = False
686703

@@ -890,9 +907,7 @@ class TestSignature(dspy.Signature):
890907
"type": "message",
891908
"role": "assistant",
892909
"status": "completed",
893-
"content": [
894-
{"type": "output_text", "text": '{"answer": "Washington, D.C."}', "annotations": []}
895-
],
910+
"content": [{"type": "output_text", "text": '{"answer": "Washington, D.C."}', "annotations": []}],
896911
},
897912
),
898913
],

tests/clients/test_lm.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def make_response(output_blocks):
3030
model="openai/dspy-test-model",
3131
object="response",
3232
output=output_blocks,
33-
metadata = {},
33+
metadata={},
3434
parallel_tool_calls=False,
3535
temperature=1.0,
3636
tool_choice="auto",
@@ -107,9 +107,11 @@ def test_disabled_cache_skips_cache_key(monkeypatch):
107107
cache = dspy.cache
108108

109109
try:
110-
with mock.patch.object(cache, "cache_key", wraps=cache.cache_key) as cache_key_spy, \
111-
mock.patch.object(cache, "get", wraps=cache.get) as cache_get_spy, \
112-
mock.patch.object(cache, "put", wraps=cache.put) as cache_put_spy:
110+
with (
111+
mock.patch.object(cache, "cache_key", wraps=cache.cache_key) as cache_key_spy,
112+
mock.patch.object(cache, "get", wraps=cache.get) as cache_get_spy,
113+
mock.patch.object(cache, "put", wraps=cache.put) as cache_put_spy,
114+
):
113115

114116
def fake_completion(*, cache, num_retries, retry_strategy, **request):
115117
return ModelResponse(
@@ -315,6 +317,7 @@ def test_reasoning_model_token_parameter():
315317
assert "max_tokens" in lm.kwargs
316318
assert lm.kwargs["max_tokens"] == 1000
317319

320+
318321
@pytest.mark.parametrize("model_name", ["openai/o1", "openai/gpt-5-nano"])
319322
def test_reasoning_model_requirements(model_name):
320323
# Should raise assertion error if temperature or max_tokens requirements not met
@@ -516,6 +519,7 @@ def test_disable_history():
516519
model="openai/gpt-4o-mini",
517520
)
518521

522+
519523
def test_responses_api():
520524
api_response = make_response(
521525
output_blocks=[
@@ -562,20 +566,15 @@ def test_responses_api():
562566

563567

564568
def test_lm_replaces_system_with_developer_role():
565-
with mock.patch(
566-
"dspy.clients.lm.litellm_responses_completion", return_value={"choices": []}
567-
) as mock_completion:
569+
with mock.patch("dspy.clients.lm.litellm_responses_completion", return_value={"choices": []}) as mock_completion:
568570
lm = dspy.LM(
569571
"openai/gpt-4o-mini",
570572
cache=False,
571573
model_type="responses",
572574
use_developer_role=True,
573575
)
574576
lm.forward(messages=[{"role": "system", "content": "hi"}])
575-
assert (
576-
mock_completion.call_args.kwargs["request"]["messages"][0]["role"]
577-
== "developer"
578-
)
577+
assert mock_completion.call_args.kwargs["request"]["messages"][0]["role"] == "developer"
579578

580579

581580
def test_responses_api_tool_calls(litellm_test_server):

0 commit comments

Comments
 (0)