Skip to content

Commit d4f3ca4

Browse files
Chatbot improvements (#188)
1 parent 39a8ef3 commit d4f3ca4

File tree

4 files changed

+19
-7
lines changed

4 files changed

+19
-7
lines changed

example-apps/chatbot-rag-app/api/chat.py

+7-3
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,21 @@ def ask_question(question, session_id):
3838
condense_question_prompt = render_template(
3939
'condense_question_prompt.txt', question=question,
4040
chat_history=chat_history.messages)
41-
question = get_llm().invoke(condense_question_prompt).content
41+
condensed_question = get_llm().invoke(condense_question_prompt).content
42+
else:
43+
condensed_question = question
4244

45+
current_app.logger.debug('Condensed question: %s', condensed_question)
4346
current_app.logger.debug('Question: %s', question)
4447

45-
docs = store.as_retriever().invoke(question)
48+
docs = store.as_retriever().invoke(condensed_question)
4649
for doc in docs:
4750
doc_source = {**doc.metadata, 'page_content': doc.page_content}
4851
current_app.logger.debug('Retrieved document passage from: %s', doc.metadata['name'])
4952
yield f'data: {SOURCE_TAG} {json.dumps(doc_source)}\n\n'
5053

51-
qa_prompt = render_template('rag_prompt.txt', question=question, docs=docs)
54+
qa_prompt = render_template('rag_prompt.txt', question=question, docs=docs,
55+
chat_history=chat_history.messages)
5256

5357
answer = ''
5458
for chunk in get_llm().stream(qa_prompt):

example-apps/chatbot-rag-app/api/templates/rag_prompt.txt

+6-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
Use the following passages to answer the user's question.
1+
Use the following passages and chat history to answer the user's question.
22
Each passage has a NAME which is the title of the document. After your answer, leave a blank line and then give the source name of the passages you answered from. Put them in a comma separated list, prefixed with SOURCES:.
33

44
Example:
@@ -22,5 +22,10 @@ PASSAGE:
2222

2323
{% endfor -%}
2424
----
25+
Chat history:
26+
{% for dialogue_turn in chat_history -%}
27+
{% if dialogue_turn.type == 'human' %}Question: {{ dialogue_turn.content }}{% elif dialogue_turn.type == 'ai' %}Response: {{ dialogue_turn.content }}{% endif %}
28+
{% endfor -%}
29+
2530
Question: {{ question }}
2631
Response:

example-apps/chatbot-rag-app/data/index_data.py

+3
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,9 @@ def main():
9191
es_connection=elasticsearch_client,
9292
index_name=INDEX,
9393
strategy=ElasticsearchStore.SparseVectorRetrievalStrategy(model_id=ELSER_MODEL),
94+
bulk_kwargs={
95+
'request_timeout': 60,
96+
},
9497
)
9598

9699

example-apps/chatbot-rag-app/frontend/src/store/provider.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ export const thunkActions = {
238238
dispatch(
239239
actions.updateMessage({
240240
id: conversationId,
241-
content: message.replace(/SOURCES: (.+)+/, ''),
241+
content: message.replace(/SOURCES:(.+)*/, ''),
242242
})
243243
)
244244
}
@@ -300,8 +300,8 @@ const parseSources = (
300300
message: string
301301
) => {
302302
message = message.replaceAll("\"", "");
303-
const match = message.match(/SOURCES: (.+)+/)
304-
if (match) {
303+
const match = message.match(/SOURCES:(.+)*/)
304+
if (match && match[1]) {
305305
return match[1].split(',').map(element => {
306306
return element.trim();
307307
});

0 commit comments

Comments
 (0)