Skip to content

Commit 8b47b69

Browse files
committed
Made model name customizable
1 parent 7173e05 commit 8b47b69

File tree

4 files changed

+8
-6
lines changed

4 files changed

+8
-6
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,9 @@ print(answer.formatted_answer)
101101

102102
## FAQ
103103

104-
### How is this different from gpt-index?
104+
### How is this different from LlamaIndex?
105105

106-
It's not that different! This is similar to the tree response method in GPT-index. I just have included some prompts I find useful, readers that give page numbers/line numbers, and am focused on one tasks - answering technical questions with cited sources.
106+
It's not that different! This is similar to the tree response method in LlamaIndex. I just have included some prompts I find useful, readers that give page numbers/line numbers, and am focused on one tasks - answering technical questions with cited sources.
107107

108108
### How is this different from LangChain?
109109

paperqa/docs.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ def __init__(
6464
summary_llm: Optional[LLM] = None,
6565
name: str = "default",
6666
index_path: Optional[Path] = None,
67+
model_name: str = 'gpt-3.5-turbo'
6768
) -> None:
6869
"""Initialize the collection of documents.
6970
@@ -75,13 +76,14 @@ def __init__(
7576
summary_llm: The language model to use for summarizing documents. If None, llm is used.
7677
name: The name of the collection.
7778
index_path: The path to the index file IF pickled. If None, defaults to using name in $HOME/.paperqa/name
79+
model_name: The name of the model to use assuming OpenAI. Default - gpt-3.5-turbo
7880
"""
7981
self.docs = dict()
8082
self.chunk_size_limit = chunk_size_limit
8183
self.keys = set()
8284
self._faiss_index = None
8385
if llm is None:
84-
llm = OpenAIChat(temperature=0.1, max_tokens=512, prefix_messages=chat_pref)
86+
llm = OpenAIChat(temperature=0.1, max_tokens=512, prefix_messages=chat_pref, model_name=model_name)
8587
if summary_llm is None:
8688
summary_llm = llm
8789
self.update_llm(llm, summary_llm)

paperqa/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.0.21"
1+
__version__ = "0.0.30"

tests/test_paperqa.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,14 +94,14 @@ def test_docs_pickle():
9494
list(
9595
docs.get_evidence(
9696
paperqa.Answer("What date is flag day in Canada?"),
97-
k=1,
97+
k=3,
9898
max_sources=1,
9999
)
100100
)[-1].context,
101101
list(
102102
docs2.get_evidence(
103103
paperqa.Answer("What date is flag day in Canada?"),
104-
k=1,
104+
k=3,
105105
max_sources=1,
106106
)
107107
)[-1].context,

0 commit comments

Comments
 (0)