Skip to content

Commit c1c518c

Browse files
authored
fix: langchain name + usage (#430)
* fix: langchain name + usage * bump: version * fix: remove log * fix: prettier
1 parent 6dc7cb7 commit c1c518c

File tree

3 files changed

+44
-8
lines changed

3 files changed

+44
-8
lines changed

Diff for: posthog-ai/CHANGELOG.md

+4
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# 3.3.2 - 2025-03-25
2+
3+
- fix: langchain name mapping
4+
15
# 3.3.1 - 2025-03-13
26

37
- fix: fix vercel output mapping and token caching

Diff for: posthog-ai/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@posthog/ai",
3-
"version": "3.3.1",
3+
"version": "3.3.2",
44
"description": "PostHog Node.js AI integrations",
55
"repository": {
66
"type": "git",

Diff for: posthog-ai/src/langchain/callbacks.ts

+39-7
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
8282
parentRunId?: string,
8383
tags?: string[],
8484
metadata?: Record<string, unknown>,
85-
8685
runType?: string,
8786
runName?: string
8887
): void {
@@ -432,10 +431,18 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
432431
eventProperties['$ai_is_error'] = true
433432
} else {
434433
// Handle token usage
435-
const [inputTokens, outputTokens] = this.parseUsage(output)
434+
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output)
436435
eventProperties['$ai_input_tokens'] = inputTokens
437436
eventProperties['$ai_output_tokens'] = outputTokens
438437

438+
// Add additional token data to properties
439+
if (additionalTokenData.cacheReadInputTokens) {
440+
eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens
441+
}
442+
if (additionalTokenData.reasoningTokens) {
443+
eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens
444+
}
445+
439446
// Handle generations/completions
440447
let completions
441448
if (output.generations && Array.isArray(output.generations)) {
@@ -471,14 +478,17 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
471478
}
472479
}
473480

474-
private _getLangchainRunName(serialized: any, ...args: any[]): string | undefined {
481+
private _getLangchainRunName(serialized: any, ...args: any): string | undefined {
475482
if (args && args.length > 0) {
476483
for (const arg of args) {
477484
if (arg && typeof arg === 'object' && 'name' in arg) {
478485
return arg.name
486+
} else if (arg && typeof arg === 'object' && 'runName' in arg) {
487+
return arg.runName
479488
}
480489
}
481490
}
491+
482492
if (serialized && serialized.name) {
483493
return serialized.name
484494
}
@@ -520,7 +530,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
520530
return messageDict
521531
}
522532

523-
private _parseUsageModel(usage: any): [number, number] {
533+
private _parseUsageModel(usage: any): [number, number, Record<string, any>] {
524534
const conversionList: Array<[string, 'input' | 'output']> = [
525535
['promptTokens', 'input'],
526536
['completionTokens', 'output'],
@@ -548,11 +558,32 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
548558
{ input: 0, output: 0 }
549559
)
550560

551-
return [parsedUsage.input, parsedUsage.output]
561+
// Extract additional token details like cached tokens and reasoning tokens
562+
const additionalTokenData: Record<string, any> = {}
563+
564+
// Check for cached tokens in various formats
565+
if (usage.prompt_tokens_details?.cached_tokens != null) {
566+
additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens
567+
} else if (usage.input_token_details?.cache_read != null) {
568+
additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read
569+
} else if (usage.cachedPromptTokens != null) {
570+
additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens
571+
}
572+
573+
// Check for reasoning tokens in various formats
574+
if (usage.completion_tokens_details?.reasoning_tokens != null) {
575+
additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens
576+
} else if (usage.output_token_details?.reasoning != null) {
577+
additionalTokenData.reasoningTokens = usage.output_token_details.reasoning
578+
} else if (usage.reasoningTokens != null) {
579+
additionalTokenData.reasoningTokens = usage.reasoningTokens
580+
}
581+
582+
return [parsedUsage.input, parsedUsage.output, additionalTokenData]
552583
}
553584

554-
private parseUsage(response: LLMResult): [number, number] {
555-
let llmUsage: [number, number] = [0, 0]
585+
private parseUsage(response: LLMResult): [number, number, Record<string, any>] {
586+
let llmUsage: [number, number, Record<string, any>] = [0, 0, {}]
556587
const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage']
557588

558589
if (response.llmOutput != null) {
@@ -566,6 +597,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
566597
if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
567598
for (const generation of response.generations) {
568599
for (const genChunk of generation) {
600+
// Check other paths for usage information
569601
if (genChunk.generationInfo?.usage_metadata) {
570602
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata)
571603
return llmUsage

0 commit comments

Comments
 (0)