diff --git a/.vscode/settings.json b/.vscode/settings.json index e88781a2f..7db934522 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,5 @@ { "terminal.integrated.tabs.title": "${sequence} ${process}", - "terminal.integrated.tabs.description": "${cwd}" + "terminal.integrated.tabs.description": "${cwd}", + "snyk.advanced.autoSelectOrganization": true } diff --git a/packages/browseros-agent/apps/agent/lib/llm-providers/providerIcons.tsx b/packages/browseros-agent/apps/agent/lib/llm-providers/providerIcons.tsx index 8fd9f8892..ec95ad2cb 100644 --- a/packages/browseros-agent/apps/agent/lib/llm-providers/providerIcons.tsx +++ b/packages/browseros-agent/apps/agent/lib/llm-providers/providerIcons.tsx @@ -36,6 +36,7 @@ const providerIconMap: Record = { 'chatgpt-pro': OpenAI, 'github-copilot': Github, 'qwen-code': Qwen, + 'nvidia-nim': Bot, } interface ProviderIconProps { diff --git a/packages/browseros-agent/apps/agent/lib/llm-providers/providerTemplates.ts b/packages/browseros-agent/apps/agent/lib/llm-providers/providerTemplates.ts index 4d8799b45..6cae94dba 100644 --- a/packages/browseros-agent/apps/agent/lib/llm-providers/providerTemplates.ts +++ b/packages/browseros-agent/apps/agent/lib/llm-providers/providerTemplates.ts @@ -72,6 +72,16 @@ export const providerTemplates: ProviderTemplate[] = [ contextWindow: 1000000, setupGuideUrl: 'https://docs.browseros.com/features/qwen-code-oauth', }, + { + id: 'nvidia-nim', + name: 'NVIDIA NIM', + defaultBaseUrl: 'https://integrate.api.nvidia.com/v1', + defaultModelId: 'nvidia/llama-3.1-nemotron-nano-8b-instruct', + supportsImages: true, + contextWindow: 128000, + apiKeyUrl: 'https://build.nvidia.com/settings/api-keys', + setupGuideUrl: 'https://docs.nvidia.com/nim/', + }, { id: 'moonshot', name: 'Moonshot AI', @@ -161,6 +171,7 @@ export const providerTypeOptions: { value: ProviderType; label: string }[] = [ { value: 'lmstudio', label: 'LM Studio' }, { value: 'bedrock', label: 'AWS Bedrock' }, { value: 'browseros', label: 'BrowserOS' }, + { value: 'nvidia-nim', label: 'NVIDIA NIM' }, ] /** @@ -192,6 +203,7 @@ export const DEFAULT_BASE_URLS: Record = { lmstudio: 'http://localhost:1234/v1', bedrock: '', browseros: '', + 'nvidia-nim': 'https://integrate.api.nvidia.com/v1', } /** diff --git a/packages/browseros-agent/apps/agent/lib/llm-providers/types.ts b/packages/browseros-agent/apps/agent/lib/llm-providers/types.ts index df537f2fb..dbc33d5de 100644 --- a/packages/browseros-agent/apps/agent/lib/llm-providers/types.ts +++ b/packages/browseros-agent/apps/agent/lib/llm-providers/types.ts @@ -17,6 +17,7 @@ export type ProviderType = | 'chatgpt-pro' | 'github-copilot' | 'qwen-code' + | 'nvidia-nim' /** * LLM Provider configuration diff --git a/packages/browseros-agent/apps/server/src/agent/provider-factory.ts b/packages/browseros-agent/apps/server/src/agent/provider-factory.ts index 263c09ed3..9bf253cbb 100644 --- a/packages/browseros-agent/apps/server/src/agent/provider-factory.ts +++ b/packages/browseros-agent/apps/server/src/agent/provider-factory.ts @@ -179,6 +179,17 @@ function createQwenCodeFactory( }) } +function createNvidiaNimFactory( + config: ResolvedAgentConfig, +): (modelId: string) => unknown { + if (!config.apiKey) throw new Error('NVIDIA NIM requires apiKey') + return createOpenAICompatible({ + name: 'nvidia-nim', + baseURL: EXTERNAL_URLS.NVIDIA_NIM_API, + apiKey: config.apiKey, + }) +} + function createGitHubCopilotFactory( config: ResolvedAgentConfig, ): (modelId: string) => unknown { @@ -218,6 +229,7 @@ const PROVIDER_FACTORIES: Record = { [LLM_PROVIDERS.CHATGPT_PRO]: createChatGPTProFactory, [LLM_PROVIDERS.GITHUB_COPILOT]: createGitHubCopilotFactory, [LLM_PROVIDERS.QWEN_CODE]: createQwenCodeFactory, + [LLM_PROVIDERS.NVIDIA_NIM]: createNvidiaNimFactory, } export function createLanguageModel( diff --git a/packages/browseros-agent/apps/server/src/lib/clients/llm/provider.ts b/packages/browseros-agent/apps/server/src/lib/clients/llm/provider.ts index 8018ae69e..6969244fd 100644 --- a/packages/browseros-agent/apps/server/src/lib/clients/llm/provider.ts +++ b/packages/browseros-agent/apps/server/src/lib/clients/llm/provider.ts @@ -161,6 +161,15 @@ function createQwenCodeModel(config: ResolvedLLMConfig): LanguageModel { })(config.model) } +function createNvidiaNimModel(config: ResolvedLLMConfig): LanguageModel { + if (!config.apiKey) throw new Error('NVIDIA NIM requires apiKey') + return createOpenAICompatible({ + name: 'nvidia-nim', + baseURL: EXTERNAL_URLS.NVIDIA_NIM_API, + apiKey: config.apiKey, + })(config.model) +} + function createGitHubCopilotModel(config: ResolvedLLMConfig): LanguageModel { if (!config.apiKey) throw new Error('GitHub Copilot requires OAuth authentication') @@ -196,6 +205,7 @@ const PROVIDER_FACTORIES: Record = { [LLM_PROVIDERS.CHATGPT_PRO]: createChatGPTProModel, [LLM_PROVIDERS.GITHUB_COPILOT]: createGitHubCopilotModel, [LLM_PROVIDERS.QWEN_CODE]: createQwenCodeModel, + [LLM_PROVIDERS.NVIDIA_NIM]: createNvidiaNimModel, } export function createLLMProvider(config: ResolvedLLMConfig): LanguageModel { diff --git a/packages/browseros-agent/packages/shared/src/constants/urls.ts b/packages/browseros-agent/packages/shared/src/constants/urls.ts index 7ee114c56..6dfb528f9 100644 --- a/packages/browseros-agent/packages/shared/src/constants/urls.ts +++ b/packages/browseros-agent/packages/shared/src/constants/urls.ts @@ -21,4 +21,5 @@ export const EXTERNAL_URLS = { QWEN_DEVICE_CODE: 'https://chat.qwen.ai/api/v1/oauth2/device/code', QWEN_OAUTH_TOKEN: 'https://chat.qwen.ai/api/v1/oauth2/token', QWEN_CODE_API: 'https://portal.qwen.ai/v1', + NVIDIA_NIM_API: 'https://integrate.api.nvidia.com/v1', } as const diff --git a/packages/browseros-agent/packages/shared/src/schemas/llm.ts b/packages/browseros-agent/packages/shared/src/schemas/llm.ts index 45e7cc029..7344c5d90 100644 --- a/packages/browseros-agent/packages/shared/src/schemas/llm.ts +++ b/packages/browseros-agent/packages/shared/src/schemas/llm.ts @@ -27,6 +27,7 @@ export const LLM_PROVIDERS = { CHATGPT_PRO: 'chatgpt-pro', GITHUB_COPILOT: 'github-copilot', QWEN_CODE: 'qwen-code', + NVIDIA_NIM: 'nvidia-nim', } as const /** @@ -48,6 +49,7 @@ export const LLMProviderSchema: z.ZodEnum< 'chatgpt-pro', 'github-copilot', 'qwen-code', + 'nvidia-nim', ] > = z.enum([ LLM_PROVIDERS.ANTHROPIC, @@ -64,6 +66,7 @@ export const LLMProviderSchema: z.ZodEnum< LLM_PROVIDERS.CHATGPT_PRO, LLM_PROVIDERS.GITHUB_COPILOT, LLM_PROVIDERS.QWEN_CODE, + LLM_PROVIDERS.NVIDIA_NIM, ]) export type LLMProvider = z.infer