Skip to content

Commit bdec53e

Browse files
authored
Minor prompt adjustments and docs (#1107)
1 parent 2c5e82f commit bdec53e

File tree

2 files changed

+18
-14
lines changed

2 files changed

+18
-14
lines changed

src/paperqa/prompts.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@
77
" answer the question, instead summarize to give evidence to help answer the"
88
" question. Stay detailed; report specific numbers, equations, or direct quotes"
99
' (marked with quotation marks). Reply "Not applicable" if the excerpt is'
10-
" irrelevant. At the end of your response, provide an integer score from 1-10 on a"
11-
" newline indicating relevance to question. Do not explain your score.\n\nRelevant"
12-
" Information Summary ({summary_length}):"
10+
" irrelevant. At the end of your response,"
11+
"provide an integer score from 1-10 on a newline indicating relevance to question." # Don't use 0-10 since we mention "not applicable" instead # noqa: E501
12+
" Do not explain your score."
13+
"\n\nRelevant Information Summary ({summary_length}):"
1314
)
1415
# This prompt template integrates with `text` variable of the above `summary_prompt`
1516
text_with_tables_prompt_template = (
@@ -111,14 +112,14 @@
111112
" Your summary, combined with many others,"
112113
" will be given to the model to generate an answer."
113114
" Respond with the following JSON format:"
114-
'\n\n{{\n "summary": "...",\n "relevance_score": "..."\n "used_images"\n}}'
115+
'\n\n{{\n "summary": "...",\n "relevance_score": 0-10,\n "used_images"\n}}'
115116
"\n\nwhere `summary` is relevant information from the text - {summary_length} words."
116117
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
117118
" `used_images` is a boolean flag indicating"
118119
" if any images present in a multimodal message were used,"
119120
" and if no images were present it should be false."
120-
"\n\nThe excerpt may or may not contain relevant information. If not, leave `summary` empty, "
121-
"and make `relevance_score` be 0."
121+
"\n\nThe excerpt may or may not contain relevant information."
122+
" If not, leave `summary` empty, and make `relevance_score` be 0."
122123
)
123124

124125
env_system_prompt = (

tests/test_paperqa.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -529,14 +529,17 @@ async def test_json_evidence(docs_fixture: Docs) -> None:
529529
settings = Settings.from_name("fast")
530530
settings.prompts.use_json = True
531531
settings.prompts.summary_json_system = (
532-
"Provide a summary of the excerpt that could help answer the question based on"
533-
" the excerpt. The excerpt may be irrelevant. Do not directly answer the"
534-
" question - only summarize relevant information. Respond with the following"
535-
' JSON format:\n\n {{\n"summary": "...",\n"author_name":'
536-
' "...",\n"relevance_score": "..."}}\n\n where `summary` is relevant'
537-
" information from text - about 100 words words, `author_name` specifies the"
538-
" author , and `relevance_score` is the relevance of `summary` to answer the"
539-
" question (integer out of 10)."
532+
"Provide a summary of the relevant information"
533+
" that could help answer the question based on the excerpt."
534+
" Your summary, combined with many others,"
535+
" will be given to the model to generate an answer."
536+
" Respond with the following JSON format:"
537+
'\n\n{{\n "summary": "...",\n "author_name": "...",\n "relevance_score": 0-10,\n}}'
538+
"\n\nwhere `summary` is relevant information from the text - about 100 words."
539+
" `author_name` specifies the author."
540+
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
541+
"\n\nThe excerpt may or may not contain relevant information."
542+
" If not, leave `summary` empty, and make `relevance_score` be 0."
540543
)
541544
orig_acompletion = litellm.acompletion
542545
has_made_bad_json_context = False

0 commit comments

Comments
 (0)