|
| 1 | +// JS backend not used by default, see README for instructions. |
| 2 | + |
| 3 | +import { NextRequest, NextResponse } from "next/server"; |
| 4 | + |
| 5 | +import type { BaseLanguageModel } from "langchain/base_language"; |
| 6 | +import type { Document } from "langchain/document"; |
| 7 | +import type { BaseRetriever } from "langchain/schema/retriever"; |
| 8 | + |
| 9 | +import { RunnableSequence, RunnableMap } from "langchain/schema/runnable"; |
| 10 | +import { HumanMessage, AIMessage, BaseMessage } from "langchain/schema"; |
| 11 | +import { ChatOpenAI } from "langchain/chat_models/openai"; |
| 12 | +import { StringOutputParser } from "langchain/schema/output_parser"; |
| 13 | +import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from "langchain/prompts"; |
| 14 | + |
| 15 | +import weaviate from "weaviate-ts-client"; |
| 16 | +import { WeaviateStore } from "langchain/vectorstores/weaviate"; |
| 17 | +import { OpenAIEmbeddings } from "langchain/embeddings/openai"; |
| 18 | + |
| 19 | +export const runtime = "edge"; |
| 20 | + |
| 21 | +const RESPONSE_TEMPLATE = `You are an expert programmer and problem-solver, tasked to answer any question about Langchain. Using the provided context, answer the user's question to the best of your ability using the resources provided. |
| 22 | +Generate a comprehensive and informative answer (but no more than 80 words) for a given question based solely on the provided search results (URL and content). You must only use information from the provided search results. Use an unbiased and journalistic tone. Combine search results together into a coherent answer. Do not repeat text. Cite search results using [\${{number}}] notation. Only cite the most relevant results that answer the question accurately. Place these citations at the end of the sentence or paragraph that reference them - do not put them all at the end. If different results refer to different entities within the same name, write separate answers for each entity. |
| 23 | +If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure." Don't try to make up an answer. |
| 24 | +Anything between the following \`context\` html blocks is retrieved from a knowledge bank, not part of the conversation with the user. |
| 25 | +<context> |
| 26 | + {context} |
| 27 | +<context/> |
| 28 | +
|
| 29 | +REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure." Don't try to make up an answer. Anything between the preceding 'context' html blocks is retrieved from a knowledge bank, not part of the conversation with the user.`; |
| 30 | + |
| 31 | +const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. |
| 32 | +
|
| 33 | +Chat History: |
| 34 | +{chat_history} |
| 35 | +Follow Up Input: {question} |
| 36 | +Standalone Question:`; |
| 37 | + |
| 38 | +const getRetriever = async () => { |
| 39 | + const client = weaviate.client({ |
| 40 | + scheme: "https", |
| 41 | + host: process.env.WEAVIATE_HOST!, |
| 42 | + apiKey: new weaviate.ApiKey( |
| 43 | + process.env.WEAVIATE_API_KEY! |
| 44 | + ), |
| 45 | + }); |
| 46 | + const vectorstore = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings({}), { |
| 47 | + client, |
| 48 | + indexName: process.env.WEAVIATE_INDEX_NAME!, |
| 49 | + textKey: "text", |
| 50 | + metadataKeys: ["source", "title"], |
| 51 | + }); |
| 52 | + return vectorstore.asRetriever({ k: 6 }); |
| 53 | +}; |
| 54 | + |
| 55 | +const createRetrieverChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => { |
| 56 | + if (!useChatHistory) { |
| 57 | + return RunnableSequence.from([ |
| 58 | + ({ question }) => question, |
| 59 | + retriever |
| 60 | + ]); |
| 61 | + } else { |
| 62 | + const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(REPHRASE_TEMPLATE); |
| 63 | + const condenseQuestionChain = RunnableSequence.from([ |
| 64 | + CONDENSE_QUESTION_PROMPT, |
| 65 | + llm, |
| 66 | + new StringOutputParser() |
| 67 | + ]).withConfig({ |
| 68 | + tags: ["CondenseQuestion"] |
| 69 | + }); |
| 70 | + return condenseQuestionChain.pipe(retriever); |
| 71 | + } |
| 72 | +}; |
| 73 | + |
| 74 | +const formatDocs = (docs: Document[]) => { |
| 75 | + return docs.map((doc, i) => `<doc id='${i}'>${doc.pageContent}</doc>`).join("\n"); |
| 76 | +}; |
| 77 | + |
| 78 | +const formatChatHistoryAsString = (history: BaseMessage[]) => { |
| 79 | + return history.map((message) => `${message._getType()}: ${message.content}`).join('\n'); |
| 80 | +} |
| 81 | + |
| 82 | +const createChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => { |
| 83 | + const retrieverChain = createRetrieverChain(llm, retriever, useChatHistory).withConfig({ tags: ["FindDocs"] }); |
| 84 | + const context = new RunnableMap({ |
| 85 | + steps: { |
| 86 | + context: RunnableSequence.from([ |
| 87 | + ({question, chat_history}) => ({question, chat_history: formatChatHistoryAsString(chat_history)}), |
| 88 | + retrieverChain, |
| 89 | + formatDocs |
| 90 | + ]), |
| 91 | + question: ({ question }) => question, |
| 92 | + chat_history: ({ chat_history }) => chat_history |
| 93 | + } |
| 94 | + }).withConfig({ tags: ["RetrieveDocs"] }); |
| 95 | + const prompt = ChatPromptTemplate.fromMessages([ |
| 96 | + ["system", RESPONSE_TEMPLATE], |
| 97 | + new MessagesPlaceholder("chat_history"), |
| 98 | + ["human", "{question}"], |
| 99 | + ]); |
| 100 | + |
| 101 | + const responseSynthesizerChain = prompt.pipe(llm).pipe(new StringOutputParser()).withConfig({ |
| 102 | + tags: ["GenerateResponse"], |
| 103 | + }); |
| 104 | + return context.pipe(responseSynthesizerChain); |
| 105 | +} |
| 106 | + |
| 107 | +export async function POST(req: NextRequest) { |
| 108 | + try { |
| 109 | + const body = await req.json(); |
| 110 | + const question = body.message; |
| 111 | + const chatHistory = (Array.isArray(body.history) && body.history) ?? []; |
| 112 | + const conversationId = body.conversation_id; |
| 113 | + |
| 114 | + if (question === undefined || typeof question !== "string") { |
| 115 | + return NextResponse.json({ error: `Invalid "message" parameter.` }, { status: 400 }); |
| 116 | + } |
| 117 | + |
| 118 | + const convertedChatHistory = []; |
| 119 | + for (const historyMessage of chatHistory) { |
| 120 | + if (historyMessage.human) { |
| 121 | + convertedChatHistory.push(new HumanMessage({ content: historyMessage.human })); |
| 122 | + } else if (historyMessage.ai) { |
| 123 | + convertedChatHistory.push(new AIMessage({ content: historyMessage.ai })); |
| 124 | + } |
| 125 | + } |
| 126 | + |
| 127 | + const metadata = { conversation_id: conversationId }; |
| 128 | + const llm = new ChatOpenAI({ |
| 129 | + modelName: "gpt-3.5-turbo-16k", |
| 130 | + temperature: 0, |
| 131 | + }); |
| 132 | + const retriever = await getRetriever(); |
| 133 | + const answerChain = createChain(llm, retriever, !!convertedChatHistory.length); |
| 134 | + |
| 135 | + // Narrows streamed log output down to final output and the FindDocs tagged chain to |
| 136 | + // selectively stream back sources. |
| 137 | + const stream = await answerChain.streamLog({ |
| 138 | + question, |
| 139 | + chat_history: convertedChatHistory, |
| 140 | + }, { |
| 141 | + metadata |
| 142 | + }, { |
| 143 | + includeTags: ["FindDocs"], |
| 144 | + }); |
| 145 | + |
| 146 | + // Only return a selection of output to the frontend |
| 147 | + const textEncoder = new TextEncoder(); |
| 148 | + const clientStream = new ReadableStream({ |
| 149 | + async pull(controller) { |
| 150 | + const { value, done } = await stream.next(); |
| 151 | + if (done) { |
| 152 | + controller.close(); |
| 153 | + } else if (value) { |
| 154 | + let hasEnqueued = false; |
| 155 | + for (const op of value.ops) { |
| 156 | + if ("value" in op) { |
| 157 | + if (op.path === "/logs/0/final_output" && Array.isArray(op.value.output)) { |
| 158 | + const allSources = op.value.output.map((doc: Document) => { |
| 159 | + return { |
| 160 | + url: doc.metadata.source, |
| 161 | + title: doc.metadata.title, |
| 162 | + } |
| 163 | + }); |
| 164 | + if (allSources.length) { |
| 165 | + const chunk = textEncoder.encode(JSON.stringify({ sources: allSources }) + "\n"); |
| 166 | + controller.enqueue(chunk); |
| 167 | + hasEnqueued = true; |
| 168 | + } |
| 169 | + } else if (op.path === "/streamed_output/-") { |
| 170 | + const chunk = textEncoder.encode(JSON.stringify({tok: op.value}) + "\n"); |
| 171 | + controller.enqueue(chunk); |
| 172 | + hasEnqueued = true; |
| 173 | + } else if (op.path === "" && op.op === "replace") { |
| 174 | + const chunk = textEncoder.encode(JSON.stringify({run_id: op.value.id}) + "\n"); |
| 175 | + controller.enqueue(chunk); |
| 176 | + hasEnqueued = true; |
| 177 | + } |
| 178 | + } |
| 179 | + } |
| 180 | + // Pull must always enqueue a value |
| 181 | + if (!hasEnqueued) { |
| 182 | + controller.enqueue(textEncoder.encode("")); |
| 183 | + } |
| 184 | + } |
| 185 | + }, |
| 186 | + }); |
| 187 | + |
| 188 | + return new Response(clientStream); |
| 189 | + } catch (e: any) { |
| 190 | + console.log(e); |
| 191 | + return NextResponse.json({ error: e.message }, { status: 500 }); |
| 192 | + } |
| 193 | +} |
0 commit comments