-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo.py
171 lines (129 loc) · 5.26 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
# client = OpenAI(api_key="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")
import os
# os.environ['OPENAI_API_KEY'] = "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"
os.environ['OPENAI_API_KEY'] = "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"
from langchain_openai import ChatOpenAI
# We will set streaming=True so that we can stream tokens
# See the streaming section for more information on this.
model = ChatOpenAI(temperature=0, streaming=True)
# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
from langgraph.prebuilt import ToolExecutor
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
@tool
def load_more_raw_text() -> str:
"""You are ready to parse more text"""
f = open('./raw_spans.txt', 'r')
lines = f.readlines()
f.close()
for line in lines[::10]:
yield '\n'.join(line)
tools = [search]
tool_executor = ToolExecutor(tools)
## set up the model
from langchain.tools.render import format_tool_to_openai_function
from langchain_core.utils.function_calling import convert_to_openai_function
tools = [search]
functions = [convert_to_openai_function(t) for t in tools]
model = model.bind_functions(functions)
## define the agent state
from typing import TypedDict, Annotated, Sequence
import operator
from langchain_core.messages import BaseMessage
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
## define the nodes
##############################################################
from langgraph.prebuilt import ToolInvocation
import json
from langchain_core.messages import FunctionMessage
# Define the function that determines whether to continue or not
def should_continue(state):
messages = state['messages']
last_message = messages[-1]
import ipdb;ipdb.set_trace()
# If there is no function call, then we finish
if "function_call" not in last_message.additional_kwargs:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define the function that calls the model
def call_model(state):
messages = state['messages']
import ipdb;ipdb.set_trace()
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Define the function to execute tools
def call_tool(state):
messages = state['messages']
# Based on the continue condition
# we know the last message involves a function call
last_message = messages[-1]
import ipdb;ipdb.set_trace()
# We construct an ToolInvocation from the function_call
action = ToolInvocation(
tool=last_message.additional_kwargs["function_call"]["name"],
tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
)
# We call the tool_executor and get back a response
response = tool_executor.invoke(action)
# We use the response to create a FunctionMessage
function_message = FunctionMessage(content=str(response), name=action.tool)
# We return a list, because this will get added to the existing list
return {"messages": [function_message]}
## define the graph
##############################################################
from langgraph.graph import StateGraph, END
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", call_tool)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "action",
# Otherwise we finish.
"end": END
}
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge('action', 'agent')
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
from langchain_core.messages import HumanMessage
inputs = {"messages": [HumanMessage(content="what is the weather in sf")]}
xxx = app.invoke(inputs)
import ipdb;ipdb.set_trace()
inputs = {"messages": [HumanMessage(content="what is the weather in sf")]}
for output in app.stream(inputs):
# stream() yields dictionaries with output keyed by node name
for key, value in output.items():
print(f"Output from node '{key}':")
print("---")
print(value)
print("\n---\n")