Skip to content

Commit c1b3d5d

Browse files
dlqqqbrichet
andauthored
Dedicate one set of chat handlers per room (#9)
* create new set of chat handlers per room * make YChat an instance attribute on BaseChatHandler * revert changes to chat handlers * pre-commit * use room_id local var Co-authored-by: Nicolas Brichet <[email protected]> --------- Co-authored-by: Nicolas Brichet <[email protected]>
1 parent 7498eb5 commit c1b3d5d

File tree

13 files changed

+210
-202
lines changed

13 files changed

+210
-202
lines changed

packages/jupyter-ai-test/jupyter_ai_test/test_slash_commands.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
22
from jupyter_ai.models import HumanChatMessage
3-
from jupyterlab_chat.ychat import YChat
43

54

65
class TestSlashCommand(BaseChatHandler):
@@ -26,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
2625
def __init__(self, *args, **kwargs):
2726
super().__init__(*args, **kwargs)
2827

29-
async def process_message(self, message: HumanChatMessage, chat: YChat):
30-
self.reply("This is the `/test` slash command.", chat)
28+
async def process_message(self, message: HumanChatMessage):
29+
self.reply("This is the `/test` slash command.")

packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py

+7-8
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
import argparse
2-
from typing import Dict, Optional, Type
2+
from typing import Dict, Type
33

44
from jupyter_ai.models import HumanChatMessage
55
from jupyter_ai_magics.providers import BaseProvider
6-
from jupyterlab_chat.ychat import YChat
76
from langchain.chains import ConversationalRetrievalChain
87
from langchain.memory import ConversationBufferWindowMemory
98
from langchain_core.prompts import PromptTemplate
@@ -60,32 +59,32 @@ def create_llm_chain(
6059
verbose=False,
6160
)
6261

63-
async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]):
64-
args = self.parse_args(message, chat)
62+
async def process_message(self, message: HumanChatMessage):
63+
args = self.parse_args(message)
6564
if args is None:
6665
return
6766
query = " ".join(args.query)
6867
if not query:
69-
self.reply(f"{self.parser.format_usage()}", chat, message)
68+
self.reply(f"{self.parser.format_usage()}", message)
7069
return
7170

7271
self.get_llm_chain()
7372

7473
try:
75-
with self.pending("Searching learned documents", message, chat=chat):
74+
with self.pending("Searching learned documents", message):
7675
assert self.llm_chain
7776
# TODO: migrate this class to use a LCEL `Runnable` instead of
7877
# `Chain`, then remove the below ignore comment.
7978
result = await self.llm_chain.acall( # type:ignore[attr-defined]
8079
{"question": query}
8180
)
8281
response = result["answer"]
83-
self.reply(response, chat, message)
82+
self.reply(response, message)
8483
except AssertionError as e:
8584
self.log.error(e)
8685
response = """Sorry, an error occurred while reading the from the learned documents.
8786
If you have changed the embedding provider, try deleting the existing index by running
8887
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
8988
and then asking the question again.
9089
"""
91-
self.reply(response, chat, message)
90+
self.reply(response, message)

0 commit comments

Comments
 (0)