Skip to content

Commit 8501bc8

Browse files
authoredMar 12, 2025··
Support for answer iterations in the gen_answer flow (#911)
1 parent 68d8201 commit 8501bc8

File tree

4 files changed

+47
-1
lines changed

4 files changed

+47
-1
lines changed
 

‎paperqa/docs.py

+6
Original file line numberDiff line numberDiff line change
@@ -778,6 +778,11 @@ async def aquery( # noqa: PLR0912
778778
answer_reasoning = None
779779
else:
780780
with set_llm_session_ids(session.id):
781+
prior_answer_prompt = ""
782+
if prompt_config.answer_iteration_prompt and session.answer:
783+
prior_answer_prompt = prompt_config.answer_iteration_prompt.format(
784+
prior_answer=session.answer
785+
)
781786
messages = [
782787
Message(role="system", content=prompt_config.system),
783788
Message(
@@ -787,6 +792,7 @@ async def aquery( # noqa: PLR0912
787792
answer_length=answer_config.answer_length,
788793
question=session.question,
789794
example_citation=prompt_config.EXAMPLE_CITATION,
795+
prior_answer_prompt=prior_answer_prompt,
790796
),
791797
),
792798
]

‎paperqa/prompts.py

+10
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,15 @@
2121
# 1. Lead to complete tool being called with has_successful_answer=False
2222
# 2. Can be used for unit testing
2323
CANNOT_ANSWER_PHRASE = "I cannot answer"
24+
25+
answer_iteration_prompt_template = (
26+
"You are iterating on a prior answer, with a potentially different context:\n\n"
27+
"{prior_answer}\n\n"
28+
"Create a new answer only using keys and data from the included context."
29+
" You can not use context keys from the prior answer which are not "
30+
"also included in the above context.\n\n"
31+
)
32+
2433
qa_prompt = (
2534
"Answer the question below with the context.\n\n"
2635
"Context (with relevance scores):\n\n{context}\n\n----\n\n"
@@ -37,6 +46,7 @@
3746
"so there may inaccuracies or ambiguities. If quotes are present and "
3847
"relevant, use them in the answer. This answer will go directly onto "
3948
"Wikipedia, so do not add any extraneous information.\n\n"
49+
"{prior_answer_prompt}"
4050
"Answer ({answer_length}):"
4151
)
4252

‎paperqa/settings.py

+8
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
from paperqa.prompts import (
4343
CONTEXT_INNER_PROMPT,
4444
CONTEXT_OUTER_PROMPT,
45+
answer_iteration_prompt_template,
4546
citation_prompt,
4647
default_system_prompt,
4748
env_reset_prompt,
@@ -265,6 +266,13 @@ class PromptSettings(BaseModel):
265266

266267
summary: str = summary_prompt
267268
qa: str = qa_prompt
269+
answer_iteration_prompt: str | None = Field(
270+
default=answer_iteration_prompt_template,
271+
description=(
272+
"Prompt to inject existing prior answers into the qa prompt to allow the model to iterate. "
273+
"If None, then no prior answers will be injected."
274+
),
275+
)
268276
select: str = select_paper_prompt
269277
pre: str | None = Field(
270278
default=None,

‎tests/test_paperqa.py

+23-1
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,29 @@ def test_location_awareness(docs_fixture) -> None:
539539

540540

541541
def test_query(docs_fixture) -> None:
542-
docs_fixture.query("Is XAI usable in chemistry?")
542+
settings = Settings(prompts={"answer_iteration_prompt": None})
543+
docs_fixture.query("Is XAI usable in chemistry?", settings=settings)
544+
545+
546+
def test_query_with_iteration(docs_fixture) -> None:
547+
# we store these results to check that the prompts are OK
548+
my_results: list[LLMResult] = []
549+
# explicitly set the prompt to use QA iterations
550+
settings = Settings()
551+
llm = settings.get_llm()
552+
llm.llm_result_callback = my_results.append
553+
prior_answer = "No, it isn't usable in chemistry."
554+
question = "Is XAI usable in chemistry?"
555+
prior_session = PQASession(question=question, answer=prior_answer)
556+
docs_fixture.query(prior_session, llm_model=llm, settings=settings)
557+
assert prior_answer in cast(
558+
"str", my_results[-1].prompt[1].content # type: ignore[union-attr, index]
559+
), "prior answer not in prompt"
560+
# run without a prior session to check that the flow works correctly
561+
docs_fixture.query(question, llm_model=llm, settings=settings)
562+
assert settings.prompts.answer_iteration_prompt[:10] not in cast( # type: ignore[index]
563+
"str", my_results[-1].prompt[1].content # type: ignore[union-attr, index]
564+
), "prior answer prompt should not be inserted"
543565

544566

545567
def test_llmresult_callback(docs_fixture: Docs) -> None:

0 commit comments

Comments
 (0)
Please sign in to comment.