1
+ // src/llms/grok.ts
2
+
3
+ /**
4
+ * @file Provides integration with the Grok LLM via xAI's REST API endpoint.
5
+ * @packageDocumentation
6
+ */
7
+
8
+ import { env } from 'node:process'
9
+ import { OpenAI } from 'openai'
10
+ import { err , logLLMCost } from '../utils/logging'
11
+ import type { GroqChatCompletionResponse , GrokModelType } from '../utils/types/llms'
12
+
13
+ /**
14
+ * Calls the Grok API to generate a response to a prompt and transcript.
15
+ * Uses the xAI-compatible OpenAI interface with a custom baseURL.
16
+ *
17
+ * @async
18
+ * @function callGrok
19
+ * @param {string } prompt - The prompt or instructions for Grok
20
+ * @param {string } transcript - The transcript or additional context to process
21
+ * @param {GrokModelType | string | { modelId: string } | boolean } [model='GROK_2_LATEST'] - The Grok model to use (defaults to GROK_2_LATEST).
22
+ * Note: a boolean may appear if the CLI is used like `--grok` with no model specified. We handle that by defaulting to 'grok-2-latest'.
23
+ * @throws Will throw an error if GROK_API_KEY is not set or if the API call fails
24
+ * @returns {Promise<string> } The generated text from Grok
25
+ */
26
+ export async function callGrok (
27
+ prompt : string ,
28
+ transcript : string ,
29
+ model : GrokModelType | string | { modelId : string } | boolean = 'GROK_2_LATEST'
30
+ ) : Promise < string > {
31
+ if ( ! env [ 'GROK_API_KEY' ] ) {
32
+ throw new Error ( 'GROK_API_KEY environment variable is not set. Please set it to your xAI Grok API key.' )
33
+ }
34
+
35
+ // Safely parse the model parameter, since it can be a string, object, or boolean
36
+ const modelId = typeof model === 'boolean'
37
+ ? 'grok-2-latest'
38
+ : typeof model === 'object'
39
+ ? model ?. modelId ?? 'grok-2-latest'
40
+ : typeof model === 'string'
41
+ ? model
42
+ : 'grok-2-latest'
43
+
44
+ const openai = new OpenAI ( {
45
+ apiKey : env [ 'GROK_API_KEY' ] ,
46
+ baseURL : 'https://api.x.ai/v1' ,
47
+ } )
48
+
49
+ try {
50
+ const combinedPrompt = `${ prompt } \n${ transcript } `
51
+
52
+ const response = await openai . chat . completions . create ( {
53
+ model : modelId ,
54
+ messages : [
55
+ {
56
+ role : 'user' ,
57
+ content : combinedPrompt
58
+ }
59
+ ] ,
60
+ } ) as GroqChatCompletionResponse
61
+
62
+ const firstChoice = response . choices [ 0 ]
63
+ if ( ! firstChoice || ! firstChoice . message ?. content ) {
64
+ throw new Error ( 'No valid response received from Grok API' )
65
+ }
66
+
67
+ const content = firstChoice . message . content
68
+
69
+ if ( response . usage ) {
70
+ logLLMCost ( {
71
+ modelName : modelId ,
72
+ stopReason : firstChoice . finish_reason ?? 'unknown' ,
73
+ tokenUsage : {
74
+ input : response . usage . prompt_tokens ,
75
+ output : response . usage . completion_tokens ,
76
+ total : response . usage . total_tokens
77
+ }
78
+ } )
79
+ }
80
+
81
+ return content
82
+ } catch ( error ) {
83
+ err ( `Error in callGrok: ${ ( error as Error ) . message } ` )
84
+ throw error
85
+ }
86
+ }
0 commit comments