Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,43 @@ These traces are fully OpenTelemetry compatible and can be sent to an OpenTeleme

[![pypi](https://badge.fury.io/py/openinference-instrumentation-langchain.svg)](https://pypi.org/project/openinference-instrumentation-langchain/)

## Compatibility

This instrumentation works with:
- **LangChain 1.x** (`langchain>=1.0.0`): Modern agent framework built on LangGraph
- **LangChain Classic** (`langchain-classic>=1.0.0`): Legacy chains and tools (formerly `langchain 0.x`)
- All LangChain partner packages (`langchain-openai`, `langchain-anthropic`, `langchain-google-vertexai`, etc.)

The instrumentation hooks into `langchain-core`, which is the shared foundation used by all LangChain packages.

## Installation

### For LangChain 1.x (Recommended for New Projects)

```shell
pip install openinference-instrumentation-langchain langchain langchain-openai
```

### For LangChain Classic (Legacy Applications)

```shell
pip install openinference-instrumentation-langchain langchain-classic langchain-openai
```

### For Both (Migration Scenarios)

```shell
pip install openinference-instrumentation-langchain
pip install openinference-instrumentation-langchain langchain langchain-classic langchain-openai
```

## Quickstart

### Example with LangChain 1.x (New Agent Framework)

Install packages needed for this demonstration.

```shell
pip install openinference-instrumentation-langchain langchain arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp
pip install openinference-instrumentation-langchain langchain langchain-openai arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp
```

Start the Phoenix app in the background as a collector. By default, it listens on `http://localhost:6006`. You can visit the app via a browser at the same address.
Expand All @@ -31,9 +56,8 @@ python -m phoenix.server.main serve
The following Python code sets up the `LangChainInstrumentor` to trace `langchain` and send the traces to Phoenix at the endpoint shown below.

```python
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
from openinference.instrumentation.langchain import LangChainInstrumentor
from opentelemetry import trace as trace_api
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
Expand All @@ -49,17 +73,38 @@ tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
LangChainInstrumentor().instrument()
```

To demonstrate `langchain` tracing, we'll make a simple chain to tell a joke. First, configure your OpenAI credentials.
To demonstrate tracing, we'll create a simple agent. First, configure your OpenAI credentials.

```python
import os

os.environ["OPENAI_API_KEY"] = "<your openai key>"
```

Now we can create a chain and run it.
Now we can create an agent and run it.

```python
def get_weather(city: str) -> str:
"""Get the weather for a city."""
return f"The weather in {city} is sunny!"

model = ChatOpenAI(model="gpt-4")
agent = create_agent(model, tools=[get_weather])
result = agent.invoke({"messages": [{"role": "user", "content": "What's the weather in Paris?"}]})
print(result)
```

### Example with LangChain Classic (Legacy Chains)

For legacy applications using LangChain Classic:

```python
from langchain_classic.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI

# ... (same instrumentation setup as above)

prompt_template = "Tell me a {adjective} joke"
prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template)
llm = LLMChain(llm=OpenAI(), prompt=prompt, metadata={"category": "jokes"})
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# /// script
# dependencies = [
# "langchain>=1.0.0",
# "langchain-openai>=0.2.0",
# "openinference-instrumentation-langchain>=0.1.24",
# "opentelemetry-sdk>=1.25.0",
# "opentelemetry-exporter-otlp>=1.25.0",
# ]
# ///
import os
from typing import Literal

from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor

from openinference.instrumentation.langchain import LangChainInstrumentor

endpoint = "http://127.0.0.1:6006/v1/traces"
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))

LangChainInstrumentor().instrument(tracer_provider=tracer_provider)


# Tools
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
weather_data = {
"San Francisco": "Foggy, 60°F",
"New York": "Sunny, 75°F",
"London": "Rainy, 55°F",
"Tokyo": "Clear, 70°F",
}
return weather_data.get(city, f"Weather data not available for {city}")


def calculate(operation: Literal["add", "subtract", "multiply", "divide"], a: float, b: float) -> float:
"""Perform a mathematical calculation."""
if operation == "add":
return a + b
elif operation == "subtract":
return a - b
elif operation == "multiply":
return a * b
elif operation == "divide":
if b == 0:
return float("inf")
return a / b
else:
raise ValueError(f"Unknown operation: {operation}")


def search_web(query: str) -> str:
"""Search the web for information."""
return f"Here are the top results for '{query}': [Result 1] [Result 2] [Result 3]"


if __name__ == "__main__":
if not os.environ.get("OPENAI_API_KEY"):
print("Please set OPENAI_API_KEY environment variable")
exit(1)

model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
agent = create_agent(
model=model,
tools=[get_weather, calculate, search_web],
system_prompt=(
"You are a helpful assistant with access to weather information, "
"a calculator, and web search. Use these tools to help answer questions."
),
)

queries = [
"What's the weather in San Francisco?",
"Calculate 234 * 567",
"What's the weather in Tokyo and multiply the temperature by 2?",
]

for query in queries:
print(f"\nQuery: {query}")
result = agent.invoke({"messages": [{"role": "user", "content": query}]})
messages = result.get("messages", [])
if messages:
print(f"Response: {messages[-1].content}")
print()

Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# /// script
# dependencies = [
# "langchain>=1.0.0",
# "langchain-openai>=0.2.0",
# "openinference-instrumentation-langchain>=0.1.24",
# "opentelemetry-sdk>=1.25.0",
# "opentelemetry-exporter-otlp>=1.25.0",
# ]
# ///
import os
from typing import Any

from langchain.agents import create_agent
from langchain.agents.middleware import AgentMiddleware
from langchain_openai import ChatOpenAI
from langgraph.runtime import Runtime
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor

from openinference.instrumentation.langchain import LangChainInstrumentor

endpoint = "http://127.0.0.1:6006/v1/traces"
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))

LangChainInstrumentor().instrument(tracer_provider=tracer_provider)


# Custom middleware
class LoggingMiddleware(AgentMiddleware):
"""Custom middleware that logs model calls."""

def before_model(self, state: dict[str, Any], runtime: Runtime) -> dict[str, Any] | None:
messages = state.get("messages", [])
print(f"Calling model with {len(messages)} messages")
return None

def after_model(self, state: dict[str, Any], runtime: Runtime) -> dict[str, Any] | None:
messages = state.get("messages", [])
if messages and hasattr(messages[-1], "tool_calls") and messages[-1].tool_calls:
print(f"Model wants to call {len(messages[-1].tool_calls)} tool(s)")
return None


# Tools
def get_current_time() -> str:
"""Get the current time."""
from datetime import datetime
return datetime.now().strftime("%I:%M %p")


def get_random_fact() -> str:
"""Get a random interesting fact."""
facts = [
"Honey never spoils. Archaeologists have found 3000-year-old honey that's still edible.",
"A group of flamingos is called a 'flamboyance'.",
"The shortest war in history lasted only 38-45 minutes.",
"Bananas are berries, but strawberries aren't.",
"There are more stars in the universe than grains of sand on Earth.",
]
import random
return random.choice(facts)


def calculate_fibonacci(n: int) -> int:
"""Calculate the nth Fibonacci number."""
if n < 0:
raise ValueError("n must be non-negative")
if n <= 1:
return n

a, b = 0, 1
for _ in range(2, n + 1):
a, b = b, a + b
return b


if __name__ == "__main__":
if not os.environ.get("OPENAI_API_KEY"):
print("Please set OPENAI_API_KEY environment variable")
exit(1)

agent = create_agent(
model=ChatOpenAI(model="gpt-4o-mini", temperature=0),
tools=[get_current_time, get_random_fact, calculate_fibonacci],
system_prompt=(
"You are a helpful assistant with access to tools. "
"Use them to provide accurate and interesting information."
),
middleware=[LoggingMiddleware()],
)

queries = [
"What time is it?",
"Tell me an interesting fact",
"Calculate the 10th Fibonacci number",
]

for query in queries:
print(f"\nQuery: {query}")
result = agent.invoke({"messages": [{"role": "user", "content": query}]})
messages = result.get("messages", [])
if messages:
print(f"Response: {messages[-1].content}")
print()

Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@
Based on https://colab.research.google.com/drive/1xDEPe2i_2rRqs7o6oNTtqA4J7Orsnvx1?usp=sharing

Requires Tavily API Key https://github.com/tavily-ai/tavily-python

This example uses LangChain Classic for legacy agent patterns (AgentExecutor).
For new projects, consider using LangChain 1.x with the new agent framework.
"""

import functools
import operator
from typing import Annotated, Sequence, TypedDict

from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_classic.agents import AgentExecutor, create_openai_tools_agent
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
langchain >=0.2.11
langchain >=1.0.0
langchain-classic >=1.0.0
langchain-experimental >=0.0.63
langchain_community >=0.2.10
langchain-openai >=0.1.19
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ instruments = [
]
test = [
"langchain_core == 0.3.50",
"langchain == 0.3.15",
"langchain >= 1.0.0", # New 1.x agent framework
"langchain-classic >= 1.0.0", # Legacy chains and tools (formerly langchain 0.x)
"langchain_openai == 0.2.14",
"langchain-community == 0.3.15",
"langchain-google-vertexai == 2.0.12",
Expand Down
Loading
Loading