Skip to content

Commit 1f5596b

Browse files
authored
Adds Next.js backend (langchain-ai#182)
* Adds Next.js backend * Update README * Clarify README * Fix typo * Emoji
1 parent e8de91a commit 1f5596b

File tree

8 files changed

+405
-9347
lines changed

8 files changed

+405
-9347
lines changed

README.md

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,26 +9,33 @@ The app leverages LangChain's streaming support and async API to update the page
99

1010
## ✅ Running locally
1111
1. Install backend dependencies: `poetry install`.
12+
1. Make sure to enter your environment variables to configure the application:
13+
```
14+
export OPENAI_API_KEY=
15+
export WEAVIATE_URL=
16+
export WEAVIATE_API_KEY=
17+
export RECORD_MANAGER_DB_URL=
18+
19+
# for tracing
20+
export LANGCHAIN_TRACING_V2=true
21+
export LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
22+
export LANGCHAIN_API_KEY=
23+
export LANGCHAIN_PROJECT=
24+
```
1225
1. Run `python ingest.py` to ingest LangChain docs data into the Weaviate vectorstore (only needs to be done once).
1326
1. You can use other [Document Loaders](https://langchain.readthedocs.io/en/latest/modules/document_loaders.html) to load your own data into the vectorstore.
14-
1. Run the backend with `make start`.
15-
1. Make sure to enter your environment variables to configure the application:
16-
```
17-
export OPENAI_API_KEY=
18-
export WEAVIATE_URL=
19-
export WEAVIATE_API_KEY=
20-
export RECORD_MANAGER_DB_URL=
21-
22-
# for tracing
23-
export LANGCHAIN_TRACING_V2=true
24-
export LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
25-
export LANGCHAIN_API_KEY=
26-
export LANGCHAIN_PROJECT=
27-
```
27+
1. Start the Python backend with `poetry run make start`.
2828
1. Install frontend dependencies by running `cd chat-langchain`, then `yarn`.
2929
1. Run the frontend with `yarn dev` for frontend.
3030
1. Open [localhost:3000](http://localhost:3000) in your browser.
3131

32+
## ☕ Running locally (JS backend)
33+
1. Follow the first three steps above to ingest LangChain docs data into the vectorstore.
34+
1. Install frontend dependencies by running `cd chat-langchain`, then `yarn`.
35+
1. Populate a `chat-langchain/.env.local` file with your own versions of keys from the `chat-langchain/.env.example` file, and set `NEXT_PUBLIC_API_BASE_URL` to `"http://localhost:3000/api"`.
36+
1. Run the app with `yarn dev`.
37+
1. Open [localhost:3000](http://localhost:3000) in your browser.
38+
3239
## 📚 Technical description
3340

3441
There are two components: ingestion and question-answering.

chat-langchain/.env.example

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
## For JS backend:
2+
3+
# LANGCHAIN_TRACING_V2=true
4+
# LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
5+
# LANGCHAIN_API_KEY="YOUR_LANGSMITH_KEY"
6+
# LANGCHAIN_PROJECT="YOUR_PROJECT_NAME"
7+
8+
# NEXT_PUBLIC_API_BASE_URL="http://localhost:3000/api"
9+
# OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
10+
# WEAVIATE_HOST="YOUR_WEAVIATE_HOST"
11+
# WEAVIATE_API_KEY="YOUR_WEAVIATE_API_KEY"
12+
# WEAVIATE_INDEX_NAME="YOUR_WEAVIATE_INDEX_NAME"

chat-langchain/app/api/chat/route.ts

Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
// JS backend not used by default, see README for instructions.
2+
3+
import { NextRequest, NextResponse } from "next/server";
4+
5+
import type { BaseLanguageModel } from "langchain/base_language";
6+
import type { Document } from "langchain/document";
7+
import type { BaseRetriever } from "langchain/schema/retriever";
8+
9+
import { RunnableSequence, RunnableMap } from "langchain/schema/runnable";
10+
import { HumanMessage, AIMessage, BaseMessage } from "langchain/schema";
11+
import { ChatOpenAI } from "langchain/chat_models/openai";
12+
import { StringOutputParser } from "langchain/schema/output_parser";
13+
import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from "langchain/prompts";
14+
15+
import weaviate from "weaviate-ts-client";
16+
import { WeaviateStore } from "langchain/vectorstores/weaviate";
17+
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
18+
19+
export const runtime = "edge";
20+
21+
const RESPONSE_TEMPLATE = `You are an expert programmer and problem-solver, tasked to answer any question about Langchain. Using the provided context, answer the user's question to the best of your ability using the resources provided.
22+
Generate a comprehensive and informative answer (but no more than 80 words) for a given question based solely on the provided search results (URL and content). You must only use information from the provided search results. Use an unbiased and journalistic tone. Combine search results together into a coherent answer. Do not repeat text. Cite search results using [\${{number}}] notation. Only cite the most relevant results that answer the question accurately. Place these citations at the end of the sentence or paragraph that reference them - do not put them all at the end. If different results refer to different entities within the same name, write separate answers for each entity.
23+
If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure." Don't try to make up an answer.
24+
Anything between the following \`context\` html blocks is retrieved from a knowledge bank, not part of the conversation with the user.
25+
<context>
26+
{context}
27+
<context/>
28+
29+
REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure." Don't try to make up an answer. Anything between the preceding 'context' html blocks is retrieved from a knowledge bank, not part of the conversation with the user.`;
30+
31+
const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
32+
33+
Chat History:
34+
{chat_history}
35+
Follow Up Input: {question}
36+
Standalone Question:`;
37+
38+
const getRetriever = async () => {
39+
const client = weaviate.client({
40+
scheme: "https",
41+
host: process.env.WEAVIATE_HOST!,
42+
apiKey: new weaviate.ApiKey(
43+
process.env.WEAVIATE_API_KEY!
44+
),
45+
});
46+
const vectorstore = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings({}), {
47+
client,
48+
indexName: process.env.WEAVIATE_INDEX_NAME!,
49+
textKey: "text",
50+
metadataKeys: ["source", "title"],
51+
});
52+
return vectorstore.asRetriever({ k: 6 });
53+
};
54+
55+
const createRetrieverChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => {
56+
if (!useChatHistory) {
57+
return RunnableSequence.from([
58+
({ question }) => question,
59+
retriever
60+
]);
61+
} else {
62+
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(REPHRASE_TEMPLATE);
63+
const condenseQuestionChain = RunnableSequence.from([
64+
CONDENSE_QUESTION_PROMPT,
65+
llm,
66+
new StringOutputParser()
67+
]).withConfig({
68+
tags: ["CondenseQuestion"]
69+
});
70+
return condenseQuestionChain.pipe(retriever);
71+
}
72+
};
73+
74+
const formatDocs = (docs: Document[]) => {
75+
return docs.map((doc, i) => `<doc id='${i}'>${doc.pageContent}</doc>`).join("\n");
76+
};
77+
78+
const formatChatHistoryAsString = (history: BaseMessage[]) => {
79+
return history.map((message) => `${message._getType()}: ${message.content}`).join('\n');
80+
}
81+
82+
const createChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => {
83+
const retrieverChain = createRetrieverChain(llm, retriever, useChatHistory).withConfig({ tags: ["FindDocs"] });
84+
const context = new RunnableMap({
85+
steps: {
86+
context: RunnableSequence.from([
87+
({question, chat_history}) => ({question, chat_history: formatChatHistoryAsString(chat_history)}),
88+
retrieverChain,
89+
formatDocs
90+
]),
91+
question: ({ question }) => question,
92+
chat_history: ({ chat_history }) => chat_history
93+
}
94+
}).withConfig({ tags: ["RetrieveDocs"] });
95+
const prompt = ChatPromptTemplate.fromMessages([
96+
["system", RESPONSE_TEMPLATE],
97+
new MessagesPlaceholder("chat_history"),
98+
["human", "{question}"],
99+
]);
100+
101+
const responseSynthesizerChain = prompt.pipe(llm).pipe(new StringOutputParser()).withConfig({
102+
tags: ["GenerateResponse"],
103+
});
104+
return context.pipe(responseSynthesizerChain);
105+
}
106+
107+
export async function POST(req: NextRequest) {
108+
try {
109+
const body = await req.json();
110+
const question = body.message;
111+
const chatHistory = (Array.isArray(body.history) && body.history) ?? [];
112+
const conversationId = body.conversation_id;
113+
114+
if (question === undefined || typeof question !== "string") {
115+
return NextResponse.json({ error: `Invalid "message" parameter.` }, { status: 400 });
116+
}
117+
118+
const convertedChatHistory = [];
119+
for (const historyMessage of chatHistory) {
120+
if (historyMessage.human) {
121+
convertedChatHistory.push(new HumanMessage({ content: historyMessage.human }));
122+
} else if (historyMessage.ai) {
123+
convertedChatHistory.push(new AIMessage({ content: historyMessage.ai }));
124+
}
125+
}
126+
127+
const metadata = { conversation_id: conversationId };
128+
const llm = new ChatOpenAI({
129+
modelName: "gpt-3.5-turbo-16k",
130+
temperature: 0,
131+
});
132+
const retriever = await getRetriever();
133+
const answerChain = createChain(llm, retriever, !!convertedChatHistory.length);
134+
135+
// Narrows streamed log output down to final output and the FindDocs tagged chain to
136+
// selectively stream back sources.
137+
const stream = await answerChain.streamLog({
138+
question,
139+
chat_history: convertedChatHistory,
140+
}, {
141+
metadata
142+
}, {
143+
includeTags: ["FindDocs"],
144+
});
145+
146+
// Only return a selection of output to the frontend
147+
const textEncoder = new TextEncoder();
148+
const clientStream = new ReadableStream({
149+
async pull(controller) {
150+
const { value, done } = await stream.next();
151+
if (done) {
152+
controller.close();
153+
} else if (value) {
154+
let hasEnqueued = false;
155+
for (const op of value.ops) {
156+
if ("value" in op) {
157+
if (op.path === "/logs/0/final_output" && Array.isArray(op.value.output)) {
158+
const allSources = op.value.output.map((doc: Document) => {
159+
return {
160+
url: doc.metadata.source,
161+
title: doc.metadata.title,
162+
}
163+
});
164+
if (allSources.length) {
165+
const chunk = textEncoder.encode(JSON.stringify({ sources: allSources }) + "\n");
166+
controller.enqueue(chunk);
167+
hasEnqueued = true;
168+
}
169+
} else if (op.path === "/streamed_output/-") {
170+
const chunk = textEncoder.encode(JSON.stringify({tok: op.value}) + "\n");
171+
controller.enqueue(chunk);
172+
hasEnqueued = true;
173+
} else if (op.path === "" && op.op === "replace") {
174+
const chunk = textEncoder.encode(JSON.stringify({run_id: op.value.id}) + "\n");
175+
controller.enqueue(chunk);
176+
hasEnqueued = true;
177+
}
178+
}
179+
}
180+
// Pull must always enqueue a value
181+
if (!hasEnqueued) {
182+
controller.enqueue(textEncoder.encode(""));
183+
}
184+
}
185+
},
186+
});
187+
188+
return new Response(clientStream);
189+
} catch (e: any) {
190+
console.log(e);
191+
return NextResponse.json({ error: e.message }, { status: 500 });
192+
}
193+
}
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
// JS backend not used by default, see README for instructions.
2+
3+
import { NextRequest, NextResponse } from "next/server";
4+
5+
import { Client } from "langsmith";
6+
7+
export const runtime = "edge";
8+
9+
const client = new Client();
10+
11+
export async function POST(req: NextRequest) {
12+
try {
13+
const body = await req.json();
14+
const { run_id, key = "user_score", ...rest } = body;
15+
if (!run_id) {
16+
return NextResponse.json({ error: "No LangSmith run ID provided" }, { status: 400 });
17+
}
18+
19+
await client.createFeedback(run_id, key, rest);
20+
21+
return NextResponse.json({ result: "posted feedback successfully" }, { status: 200 });
22+
} catch (e: any) {
23+
console.log(e);
24+
return NextResponse.json({ error: e.message }, { status: 500 });
25+
}
26+
}
27+
28+
export async function PATCH(req: NextRequest) {
29+
try {
30+
const body = await req.json();
31+
const { feedback_id, score, comment } = body;
32+
if (feedback_id === undefined) {
33+
return NextResponse.json({ error: "No feedback ID provided" }, { status: 400 });
34+
}
35+
36+
await client.updateFeedback(feedback_id, { score, comment });
37+
38+
return NextResponse.json({ result: "patched feedback successfully" }, { status: 200 });
39+
} catch (e: any) {
40+
console.log(e);
41+
return NextResponse.json({ error: e.message }, { status: 500 });
42+
}
43+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
// JS backend not used by default, see README for instructions.
2+
3+
import { NextRequest, NextResponse } from "next/server";
4+
5+
import { Client } from "langsmith";
6+
7+
export const runtime = "edge";
8+
9+
const client = new Client();
10+
11+
const pollForRun = async (runId: string, retryCount = 0): Promise<string> => {
12+
await new Promise((resolve) => setTimeout(resolve, retryCount * retryCount * 100));
13+
try {
14+
await client.readRun(runId);
15+
} catch (e) {
16+
return pollForRun(runId, retryCount + 1);
17+
}
18+
try {
19+
const sharedLink = await client.readRunSharedLink(runId);
20+
if (!sharedLink) {
21+
throw new Error("Run is not shared.");
22+
}
23+
return sharedLink;
24+
} catch (e) {
25+
return client.shareRun(runId);
26+
}
27+
}
28+
29+
export async function POST(req: NextRequest) {
30+
try {
31+
const body = await req.json();
32+
const { run_id } = body;
33+
if (run_id === undefined) {
34+
return NextResponse.json({ error: "No run ID provided" }, { status: 400 });
35+
}
36+
const response = await pollForRun(run_id);
37+
return NextResponse.json(response, { status: 200 });
38+
} catch (e: any) {
39+
console.log(e);
40+
return NextResponse.json({ error: e.message }, { status: 500 });
41+
}
42+
}

0 commit comments

Comments
 (0)