Skip to content

Commit d57735e

Browse files
committed
add grok model
1 parent d81178c commit d57735e

File tree

8 files changed

+138
-34
lines changed

8 files changed

+138
-34
lines changed

src/commander.ts

+1-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ program
6161
.option('--cohere [model]', 'Use Cohere for processing with optional model specification')
6262
.option('--mistral [model]', 'Use Mistral for processing with optional model specification')
6363
.option('--deepseek [model]', 'Use DeepSeek for processing with optional model specification')
64-
// .option('--grok [model]', 'Use Grok for processing with optional model specification')
64+
.option('--grok [model]', 'Use Grok for processing with optional model specification')
6565
.option('--fireworks [model]', 'Use Fireworks AI for processing with optional model specification')
6666
.option('--together [model]', 'Use Together AI for processing with optional model specification')
6767
.option('--groq [model]', 'Use Groq for processing with optional model specification')
@@ -79,7 +79,6 @@ program
7979
.option('--cohereApiKey <key>', 'Specify Cohere API key (overrides .env variable)')
8080
.option('--mistralApiKey <key>', 'Specify Mistral API key (overrides .env variable)')
8181
.option('--deepseekApiKey <key>', 'Specify DeepSeek API key (overrides .env variable)')
82-
// .option('--grokApiKey <key>', 'Specify Grok API key (overrides .env variable)')
8382
.option('--grokApiKey <key>', 'Specify GROK API key (overrides .env variable)')
8483
.option('--togetherApiKey <key>', 'Specify Together API key (overrides .env variable)')
8584
.option('--fireworksApiKey <key>', 'Specify Fireworks API key (overrides .env variable)')

src/llms/grok.ts

+86
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
// src/llms/grok.ts
2+
3+
/**
4+
* @file Provides integration with the Grok LLM via xAI's REST API endpoint.
5+
* @packageDocumentation
6+
*/
7+
8+
import { env } from 'node:process'
9+
import { OpenAI } from 'openai'
10+
import { err, logLLMCost } from '../utils/logging'
11+
import type { GroqChatCompletionResponse, GrokModelType } from '../utils/types/llms'
12+
13+
/**
14+
* Calls the Grok API to generate a response to a prompt and transcript.
15+
* Uses the xAI-compatible OpenAI interface with a custom baseURL.
16+
*
17+
* @async
18+
* @function callGrok
19+
* @param {string} prompt - The prompt or instructions for Grok
20+
* @param {string} transcript - The transcript or additional context to process
21+
* @param {GrokModelType | string | { modelId: string } | boolean} [model='GROK_2_LATEST'] - The Grok model to use (defaults to GROK_2_LATEST).
22+
* Note: a boolean may appear if the CLI is used like `--grok` with no model specified. We handle that by defaulting to 'grok-2-latest'.
23+
* @throws Will throw an error if GROK_API_KEY is not set or if the API call fails
24+
* @returns {Promise<string>} The generated text from Grok
25+
*/
26+
export async function callGrok(
27+
prompt: string,
28+
transcript: string,
29+
model: GrokModelType | string | { modelId: string } | boolean = 'GROK_2_LATEST'
30+
): Promise<string> {
31+
if (!env['GROK_API_KEY']) {
32+
throw new Error('GROK_API_KEY environment variable is not set. Please set it to your xAI Grok API key.')
33+
}
34+
35+
// Safely parse the model parameter, since it can be a string, object, or boolean
36+
const modelId = typeof model === 'boolean'
37+
? 'grok-2-latest'
38+
: typeof model === 'object'
39+
? model?.modelId ?? 'grok-2-latest'
40+
: typeof model === 'string'
41+
? model
42+
: 'grok-2-latest'
43+
44+
const openai = new OpenAI({
45+
apiKey: env['GROK_API_KEY'],
46+
baseURL: 'https://api.x.ai/v1',
47+
})
48+
49+
try {
50+
const combinedPrompt = `${prompt}\n${transcript}`
51+
52+
const response = await openai.chat.completions.create({
53+
model: modelId,
54+
messages: [
55+
{
56+
role: 'user',
57+
content: combinedPrompt
58+
}
59+
],
60+
}) as GroqChatCompletionResponse
61+
62+
const firstChoice = response.choices[0]
63+
if (!firstChoice || !firstChoice.message?.content) {
64+
throw new Error('No valid response received from Grok API')
65+
}
66+
67+
const content = firstChoice.message.content
68+
69+
if (response.usage) {
70+
logLLMCost({
71+
modelName: modelId,
72+
stopReason: firstChoice.finish_reason ?? 'unknown',
73+
tokenUsage: {
74+
input: response.usage.prompt_tokens,
75+
output: response.usage.completion_tokens,
76+
total: response.usage.total_tokens
77+
}
78+
})
79+
}
80+
81+
return content
82+
} catch (error) {
83+
err(`Error in callGrok: ${(error as Error).message}`)
84+
throw error
85+
}
86+
}

src/server/tests/fetch-all.ts

+9-9
Original file line numberDiff line numberDiff line change
@@ -298,15 +298,15 @@ const requests = [
298298
endpoint: '/process',
299299
outputFiles: ['FILE_30.md'],
300300
},
301-
// {
302-
// data: {
303-
// type: 'video',
304-
// url: 'https://www.youtube.com/watch?v=MORMZXEaONk',
305-
// llm: 'grok',
306-
// },
307-
// endpoint: '/process',
308-
// outputFiles: ['FILE_30.md'],
309-
// },
301+
{
302+
data: {
303+
type: 'video',
304+
url: 'https://www.youtube.com/watch?v=MORMZXEaONk',
305+
llm: 'grok',
306+
},
307+
endpoint: '/process',
308+
outputFiles: ['FILE_30.md'],
309+
},
310310
{
311311
data: {
312312
type: 'video',

src/utils/globals/llms.ts

+19-5
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import { callGemini } from '../../llms/gemini'
77
import { callCohere } from '../../llms/cohere'
88
import { callMistral } from '../../llms/mistral'
99
import { callDeepSeek } from '../../llms/deepseek'
10-
// import { callGrok } from '../../llms/grok'
10+
import { callGrok } from '../../llms/grok'
1111
import { callFireworks } from '../../llms/fireworks'
1212
import { callTogether } from '../../llms/together'
1313
import { callGroq } from '../../llms/groq'
@@ -21,7 +21,7 @@ import type {
2121
GeminiModelType,
2222
MistralModelType,
2323
DeepSeekModelType,
24-
// GrokModelType,
24+
GrokModelType,
2525
TogetherModelType,
2626
FireworksModelType,
2727
GroqModelType,
@@ -48,7 +48,7 @@ export const LLM_SERVICES: Record<string, LLMServiceConfig> = {
4848
COHERE: { name: 'Cohere', value: 'cohere' },
4949
MISTRAL: { name: 'Mistral', value: 'mistral' },
5050
DEEPSEEK: { name: 'DeepSeek', value: 'deepseek' },
51-
// GROK: { name: 'Grok', value: 'grok' },
51+
GROK: { name: 'Grok', value: 'grok' },
5252
FIREWORKS: { name: 'Fireworks AI', value: 'fireworks' },
5353
TOGETHER: { name: 'Together AI', value: 'together' },
5454
GROQ: { name: 'Groq', value: 'groq' },
@@ -112,7 +112,7 @@ export const LLM_FUNCTIONS = {
112112
cohere: callCohere,
113113
mistral: callMistral,
114114
deepseek: callDeepSeek,
115-
// grok: callGrok,
115+
grok: callGrok,
116116
fireworks: callFireworks,
117117
together: callTogether,
118118
groq: callGroq,
@@ -466,6 +466,20 @@ export const GROQ_MODELS: ModelConfig<GroqModelType> = {
466466
},
467467
}
468468

469+
/**
470+
* Configuration for Grok models, mapping model types to their display names and identifiers.
471+
* Pricing is hypothetical or as provided by xAI docs
472+
* @type {ModelConfig<GrokModelType>}
473+
*/
474+
export const GROK_MODELS: ModelConfig<GrokModelType> = {
475+
GROK_2_LATEST: {
476+
name: 'Grok 2 Latest',
477+
modelId: 'grok-2-latest',
478+
inputCostPer1M: 2.00,
479+
outputCostPer1M: 10.00
480+
},
481+
}
482+
469483
/**
470484
* Configuration for DeepSeek models, mapping model types to their display names and identifiers.
471485
* Pricing is based on publicly listed rates for DeepSeek.
@@ -497,7 +511,7 @@ export const ALL_MODELS: { [key: string]: ModelConfigValue } = {
497511
...COHERE_MODELS,
498512
...MISTRAL_MODELS,
499513
...DEEPSEEK_MODELS,
500-
// ...GROK_MODELS,
514+
...GROK_MODELS,
501515
...FIREWORKS_MODELS,
502516
...TOGETHER_MODELS,
503517
...GROQ_MODELS,

src/utils/types/llms.ts

+6-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ export type ModelConfigValue = {
1818
/**
1919
* Options for Language Models (LLMs) that can be used in the application.
2020
*/
21-
export type LLMServices = 'chatgpt' | 'claude' | 'cohere' | 'mistral' | 'ollama' | 'gemini' | 'deepseek' | 'fireworks' | 'together' | 'groq'
21+
export type LLMServices = 'chatgpt' | 'claude' | 'cohere' | 'mistral' | 'ollama' | 'gemini' | 'deepseek' | 'fireworks' | 'together' | 'groq' | 'grok'
2222

2323
export type LLMServiceConfig = {
2424
name: string
@@ -84,6 +84,11 @@ export type TogetherModelType = 'LLAMA_3_2_3B' | 'LLAMA_3_1_405B' | 'LLAMA_3_1_7
8484
*/
8585
export type GroqModelType = 'LLAMA_3_2_1B_PREVIEW' | 'LLAMA_3_2_3B_PREVIEW' | 'LLAMA_3_3_70B_VERSATILE' | 'LLAMA_3_1_8B_INSTANT' | 'MIXTRAL_8X7B_INSTRUCT'
8686

87+
/**
88+
* Available Grok models.
89+
*/
90+
export type GrokModelType = 'GROK_2_LATEST'
91+
8792
/**
8893
* Local model with Ollama.
8994
*/

test/all.test.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -222,12 +222,12 @@ const commands = [
222222
expectedFile: 'audio-deepseek-shownotes.md',
223223
newName: '41-deepsek-shownotes.md'
224224
},
225-
// {
226-
// // process file using Grok for LLM operations
227-
// cmd: 'npm run as -- --file "content/audio.mp3" --grok',
228-
// expectedFile: 'audio-grok-shownotes.md',
229-
// newName: '41-grok-shownotes.md'
230-
// },
225+
{
226+
// process file using Grok for LLM operations
227+
cmd: 'npm run as -- --file "content/audio.mp3" --grok',
228+
expectedFile: 'audio-grok-shownotes.md',
229+
newName: '41-grok-shownotes.md'
230+
},
231231
{
232232
// process file using Fireworks for LLM operations
233233
cmd: 'npm run as -- --file "content/audio.mp3" --fireworks',

test/docker.test.ts

+5-5
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,11 @@ const commands = [
7676
expectedFile: 'audio-deepseek-shownotes.md',
7777
newName: '15-docker-three-prompts-grok-shownotes.md'
7878
},
79-
// {
80-
// cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --grok',
81-
// expectedFile: 'audio-grok-shownotes.md',
82-
// newName: '15-docker-three-prompts-grok-shownotes.md'
83-
// },
79+
{
80+
cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --grok',
81+
expectedFile: 'audio-grok-shownotes.md',
82+
newName: '15-docker-three-prompts-grok-shownotes.md'
83+
},
8484
{
8585
cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --fireworks',
8686
expectedFile: 'audio-fireworks-shownotes.md',

test/services.test.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,12 @@ const commands = [
8181
expectedFile: '2024-09-24-ep0-fsjam-podcast-deepseek-shownotes.md',
8282
newName: '13-deepseek-shownotes.md'
8383
},
84-
// {
85-
// // process video using Grok for LLM operations
86-
// cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --grok',
87-
// expectedFile: '2024-09-24-ep0-fsjam-podcast-grok-shownotes.md',
88-
// newName: '13-grok-shownotes.md'
89-
// },
84+
{
85+
// process video using Grok for LLM operations
86+
cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --grok',
87+
expectedFile: '2024-09-24-ep0-fsjam-podcast-grok-shownotes.md',
88+
newName: '13-grok-shownotes.md'
89+
},
9090
{
9191
// process video using Fireworks for LLM operations
9292
cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --fireworks',

0 commit comments

Comments
 (0)