16
16
from server .api .constants import SUPABASE_AUDIO_MESSAGES_BUCKET_NAME , LLM
17
17
from server .api .utils import add_memories , authorize_user , get_stream_content
18
18
from prisma import Prisma , enums , types
19
- from datetime import datetime
19
+ from pydantic import BaseModel
20
+ from datetime import datetime , timedelta
20
21
from server .api .analytics import track_sent_message
21
22
from server .agent .index import generate_response
22
23
from server .logger .index import fetch_logger
@@ -92,8 +93,12 @@ async def call_update_chat(
92
93
audio_messages_enabled : bool ,
93
94
audio_id : Optional [str ],
94
95
):
95
- new_user_message = next (msg for msg in reversed (messages ) if msg ["role" ] == "user" )
96
- new_user_message = message_to_fixed_string_content (new_user_message )["content" ]
96
+ # We default the new_user_message to empty if the length of the messages array is 1
97
+ # This handles the case where the agent is sending the first message in the conversation to greet the user
98
+ new_user_message = ''
99
+ if len (messages ) > 1 :
100
+ new_user_message = next (msg for msg in reversed (messages ) if msg ["role" ] == "user" )
101
+ new_user_message = message_to_fixed_string_content (new_user_message )["content" ]
97
102
98
103
data = {
99
104
"new_user_message" : new_user_message ,
@@ -129,10 +134,9 @@ def stream_and_update_chat(
129
134
user_first_name : str ,
130
135
user_gender : str ,
131
136
audio_messages_enabled : bool ,
132
- audio_id : Optional [ str ] = None ,
133
- skip_final_processing : Optional [ bool ] = False ,
137
+ audio_id : str ,
138
+ skip_final_processing : bool ,
134
139
):
135
- user_message_timestamp = datetime .now ()
136
140
client = OpenAI (
137
141
api_key = os .environ .get ("OPENAI_API_KEY" ),
138
142
)
@@ -171,6 +175,8 @@ def stream_and_update_chat(
171
175
content = choice .delta .content
172
176
agent_response += content
173
177
178
+ user_message_timestamp = datetime .now ()
179
+ agent_message_timestamp = user_message_timestamp - timedelta (seconds = 1 )
174
180
# Run asynchronous operations in a separate thread, which is necessary to prevent the main
175
181
# thread from getting blocked during synchronous tasks with high latency, like network requests.
176
182
# This is important when streaming voice responses because the voice will pause in the middle of
@@ -192,6 +198,7 @@ def stream_and_update_chat(
192
198
user_message_timestamp = user_message_timestamp ,
193
199
audio_messages_enabled = audio_messages_enabled ,
194
200
audio_id = audio_id ,
201
+ agent_message_timestamp = agent_message_timestamp ,
195
202
)
196
203
),
197
204
daemon = True ,
@@ -208,9 +215,8 @@ async def final_processing_coroutine(
208
215
user_message_timestamp : datetime ,
209
216
audio_messages_enabled : bool ,
210
217
audio_id : Optional [str ],
218
+ agent_message_timestamp : datetime ,
211
219
) -> None :
212
- agent_message_timestamp = datetime .now ()
213
-
214
220
await call_update_chat (
215
221
messages = messages ,
216
222
agent_response = agent_response ,
@@ -256,6 +262,7 @@ def stream_text(
256
262
user_gender = user_gender ,
257
263
audio_messages_enabled = audio_messages_enabled ,
258
264
audio_id = audio_id ,
265
+ skip_final_processing = False ,
259
266
)
260
267
for chunk in stream :
261
268
for choice in chunk .choices :
@@ -349,6 +356,7 @@ def sync_function():
349
356
user_id = user_id ,
350
357
chat_type = "type" ,
351
358
user_message_timestamp = user_message_timestamp ,
359
+ agent_message_timestamp = datetime .timestamp (),
352
360
audio_messages_enabled = audio_messages_enabled ,
353
361
audio_id = audio_id ,
354
362
)
@@ -435,15 +443,16 @@ async def handle_update_chat(request: UpdateChatRequest):
435
443
audio_messages_enabled = request .audio_messages_enabled
436
444
437
445
# Create new user chat message
438
- await prisma .chatmessages .create (
439
- data = types .ChatMessagesCreateInput (
440
- chatId = chat_id ,
441
- role = enums .OpenAIRole .user ,
442
- content = new_user_message ,
443
- created = datetime .fromtimestamp (request .user_message_timestamp ),
444
- displayType = "text" ,
446
+ if len (new_user_message ) > 0 :
447
+ await prisma .chatmessages .create (
448
+ data = types .ChatMessagesCreateInput (
449
+ chatId = chat_id ,
450
+ role = enums .OpenAIRole .user ,
451
+ content = new_user_message ,
452
+ created = datetime .fromtimestamp (request .user_message_timestamp ),
453
+ displayType = "text" ,
454
+ )
445
455
)
446
- )
447
456
448
457
display_type = "audio" if audio_messages_enabled else "text"
449
458
0 commit comments