From 1fe2cd980d55391d6c810d1e8a2767efd3d35e6f Mon Sep 17 00:00:00 2001 From: Adrien Brault Date: Fri, 5 Sep 2025 00:48:03 +0200 Subject: [PATCH] fix: reasoning LM dump_state Optimized .json with reasoning models fail to load because the json has max_completion_tokens instead of max_tokens --- dspy/clients/lm.py | 7 ++++++- tests/clients/test_lm.py | 24 ++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/dspy/clients/lm.py b/dspy/clients/lm.py index 3c133a7c03..2bb0e0a21c 100644 --- a/dspy/clients/lm.py +++ b/dspy/clients/lm.py @@ -280,7 +280,12 @@ def dump_state(self): "launch_kwargs", "train_kwargs", ] - return {key: getattr(self, key) for key in state_keys} | self.kwargs + + kwargs = self.kwargs + if "max_completion_tokens" in kwargs: + kwargs["max_tokens"] = kwargs.pop("max_completion_tokens") + + return {key: getattr(self, key) for key in state_keys} | kwargs def _check_truncation(self, results): if self.model_type != "responses" and any(c.finish_reason == "length" for c in results["choices"]): diff --git a/tests/clients/test_lm.py b/tests/clients/test_lm.py index 54068e512d..41cf603b60 100644 --- a/tests/clients/test_lm.py +++ b/tests/clients/test_lm.py @@ -326,6 +326,30 @@ def test_dump_state(): } +def test_dump_state_with_reasoning_model(): + lm = dspy.LM( + model="openai/gpt-5", + model_type="chat", + temperature=1, + max_tokens=32000, + num_retries=10, + launch_kwargs={"temperature": 1}, + train_kwargs={"temperature": 5}, + ) + + assert lm.dump_state() == { + "model": "openai/gpt-5", + "model_type": "chat", + "temperature": 1, + "max_tokens": 32000, + "num_retries": 10, + "cache": True, + "finetuning_model": None, + "launch_kwargs": {"temperature": 1}, + "train_kwargs": {"temperature": 5}, + } + + def test_exponential_backoff_retry(): time_counter = []