Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
{
"terminal.integrated.tabs.title": "${sequence} ${process}",
"terminal.integrated.tabs.description": "${cwd}"
"terminal.integrated.tabs.description": "${cwd}",
"snyk.advanced.autoSelectOrganization": true
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ const providerIconMap: Record<ProviderType, IconComponent | null> = {
'chatgpt-pro': OpenAI,
'github-copilot': Github,
'qwen-code': Qwen,
'nvidia-nim': Bot,
}

interface ProviderIconProps {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,16 @@ export const providerTemplates: ProviderTemplate[] = [
contextWindow: 1000000,
setupGuideUrl: 'https://docs.browseros.com/features/qwen-code-oauth',
},
{
id: 'nvidia-nim',
name: 'NVIDIA NIM',
defaultBaseUrl: 'https://integrate.api.nvidia.com/v1',
defaultModelId: 'nvidia/llama-3.1-nemotron-nano-8b-instruct',
supportsImages: true,
contextWindow: 128000,
apiKeyUrl: 'https://build.nvidia.com/settings/api-keys',
setupGuideUrl: 'https://docs.nvidia.com/nim/',
},
{
id: 'moonshot',
name: 'Moonshot AI',
Expand Down Expand Up @@ -161,6 +171,7 @@ export const providerTypeOptions: { value: ProviderType; label: string }[] = [
{ value: 'lmstudio', label: 'LM Studio' },
{ value: 'bedrock', label: 'AWS Bedrock' },
{ value: 'browseros', label: 'BrowserOS' },
{ value: 'nvidia-nim', label: 'NVIDIA NIM' },
]

/**
Expand Down Expand Up @@ -192,6 +203,7 @@ export const DEFAULT_BASE_URLS: Record<ProviderType, string> = {
lmstudio: 'http://localhost:1234/v1',
bedrock: '',
browseros: '',
'nvidia-nim': 'https://integrate.api.nvidia.com/v1',
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export type ProviderType =
| 'chatgpt-pro'
| 'github-copilot'
| 'qwen-code'
| 'nvidia-nim'

/**
* LLM Provider configuration
Expand Down
12 changes: 12 additions & 0 deletions packages/browseros-agent/apps/server/src/agent/provider-factory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,17 @@ function createQwenCodeFactory(
})
}

function createNvidiaNimFactory(
config: ResolvedAgentConfig,
): (modelId: string) => unknown {
if (!config.apiKey) throw new Error('NVIDIA NIM requires apiKey')
return createOpenAICompatible({
name: 'nvidia-nim',
baseURL: EXTERNAL_URLS.NVIDIA_NIM_API,
apiKey: config.apiKey,
})
}

function createGitHubCopilotFactory(
config: ResolvedAgentConfig,
): (modelId: string) => unknown {
Expand Down Expand Up @@ -218,6 +229,7 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
[LLM_PROVIDERS.CHATGPT_PRO]: createChatGPTProFactory,
[LLM_PROVIDERS.GITHUB_COPILOT]: createGitHubCopilotFactory,
[LLM_PROVIDERS.QWEN_CODE]: createQwenCodeFactory,
[LLM_PROVIDERS.NVIDIA_NIM]: createNvidiaNimFactory,
}

export function createLanguageModel(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,15 @@ function createQwenCodeModel(config: ResolvedLLMConfig): LanguageModel {
})(config.model)
}

function createNvidiaNimModel(config: ResolvedLLMConfig): LanguageModel {
if (!config.apiKey) throw new Error('NVIDIA NIM requires apiKey')
return createOpenAICompatible({
name: 'nvidia-nim',
baseURL: EXTERNAL_URLS.NVIDIA_NIM_API,
apiKey: config.apiKey,
})(config.model)
}

function createGitHubCopilotModel(config: ResolvedLLMConfig): LanguageModel {
if (!config.apiKey)
throw new Error('GitHub Copilot requires OAuth authentication')
Expand Down Expand Up @@ -196,6 +205,7 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
[LLM_PROVIDERS.CHATGPT_PRO]: createChatGPTProModel,
[LLM_PROVIDERS.GITHUB_COPILOT]: createGitHubCopilotModel,
[LLM_PROVIDERS.QWEN_CODE]: createQwenCodeModel,
[LLM_PROVIDERS.NVIDIA_NIM]: createNvidiaNimModel,
}

export function createLLMProvider(config: ResolvedLLMConfig): LanguageModel {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,5 @@ export const EXTERNAL_URLS = {
QWEN_DEVICE_CODE: 'https://chat.qwen.ai/api/v1/oauth2/device/code',
QWEN_OAUTH_TOKEN: 'https://chat.qwen.ai/api/v1/oauth2/token',
QWEN_CODE_API: 'https://portal.qwen.ai/v1',
NVIDIA_NIM_API: 'https://integrate.api.nvidia.com/v1',
} as const
3 changes: 3 additions & 0 deletions packages/browseros-agent/packages/shared/src/schemas/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export const LLM_PROVIDERS = {
CHATGPT_PRO: 'chatgpt-pro',
GITHUB_COPILOT: 'github-copilot',
QWEN_CODE: 'qwen-code',
NVIDIA_NIM: 'nvidia-nim',
} as const

/**
Expand All @@ -48,6 +49,7 @@ export const LLMProviderSchema: z.ZodEnum<
'chatgpt-pro',
'github-copilot',
'qwen-code',
'nvidia-nim',
]
> = z.enum([
LLM_PROVIDERS.ANTHROPIC,
Expand All @@ -64,6 +66,7 @@ export const LLMProviderSchema: z.ZodEnum<
LLM_PROVIDERS.CHATGPT_PRO,
LLM_PROVIDERS.GITHUB_COPILOT,
LLM_PROVIDERS.QWEN_CODE,
LLM_PROVIDERS.NVIDIA_NIM,
])

export type LLMProvider = z.infer<typeof LLMProviderSchema>
Expand Down
Loading