diff --git a/packages/app/src/custom-elements.d.ts b/packages/app/src/custom-elements.d.ts
index e4ea0d6cebd..075c1614f78 120000
--- a/packages/app/src/custom-elements.d.ts
+++ b/packages/app/src/custom-elements.d.ts
@@ -1 +1 @@
-../../ui/src/custom-elements.d.ts
\ No newline at end of file
+///
\ No newline at end of file
diff --git a/packages/opencode/src/provider/model-cache.ts b/packages/opencode/src/provider/model-cache.ts
index 1dc0c4d1969..efd4b62c470 100644
--- a/packages/opencode/src/provider/model-cache.ts
+++ b/packages/opencode/src/provider/model-cache.ts
@@ -167,6 +167,10 @@ export namespace ModelCache {
if (providerID === "apertis") {
return fetchApertisModels(options)
}
+
+ if (providerID === "lmstudio") {
+ return fetchOpenAICompatibleModels(options)
+ }
// kilocode_change end
// Other providers not implemented yet
@@ -177,52 +181,59 @@ export namespace ModelCache {
// kilocode_change start
const APERTIS_BASE_URL = "https://api.apertis.ai/v1"
- async function fetchApertisModels(options: any): Promise> {
- const baseURL = options.baseURL ?? APERTIS_BASE_URL
+ async function fetchOpenAICompatibleModels(options: { baseURL?: string; apiKey?: string }): Promise> {
+ const baseURL = options.baseURL ?? "http://127.0.0.1:1234/v1"
const apiKey = options.apiKey
- if (!apiKey) {
- log.debug("no API key for apertis, skipping model fetch")
- return {}
- }
-
const url = `${baseURL.replace(/\/+$/, "")}/models`
- const response = await fetch(url, {
- headers: {
- Authorization: `Bearer ${apiKey}`,
- },
- signal: AbortSignal.timeout(10_000),
- })
- if (!response.ok) {
- log.error("apertis model fetch failed", { status: response.status })
- return {}
- }
+ try {
+ const response = await Bun.fetch(url, {
+ method: "GET",
+ headers: apiKey ? { Authorization: `Bearer ${apiKey}` } : {},
+ signal: AbortSignal.timeout(10_000),
+ })
- const json = (await response.json()) as { data?: Array<{ id: string; owned_by?: string }> }
- const models: Record = {}
-
- for (const model of json.data ?? []) {
- models[model.id] = {
- id: model.id,
- name: model.id,
- family: model.owned_by ?? "",
- release_date: "",
- attachment: true,
- reasoning: false,
- temperature: true,
- tool_call: true,
- cost: { input: 0, output: 0 },
- limit: { context: 128000, output: 4096 },
- options: {},
- modalities: {
- input: ["text", "image"],
- output: ["text"],
- },
+ if (!response.ok) {
+ log.error("openai-compatible model fetch failed", { url, status: response.status })
+ return {}
}
+
+ const json = await response.json() as { data?: Array<{ id: string; owned_by?: string }> }
+
+ const models: Record = {}
+ for (const model of json.data ?? []) {
+ models[model.id] = {
+ id: model.id,
+ name: model.id,
+ family: model.owned_by ?? "",
+ release_date: "",
+ attachment: true,
+ reasoning: false,
+ temperature: true,
+ tool_call: true,
+ cost: { input: 0, output: 0 },
+ limit: { context: 128000, output: 4096 },
+ options: {},
+ modalities: {
+ input: ["text", "image"],
+ output: ["text"],
+ },
+ }
+ }
+
+ return models
+ } catch (error) {
+ log.error("openai-compatible model fetch error", { url, error })
+ return {}
}
+ }
- return models
+ async function fetchApertisModels(options: any): Promise> {
+ return fetchOpenAICompatibleModels({
+ baseURL: options.baseURL ?? APERTIS_BASE_URL,
+ apiKey: options.apiKey,
+ })
}
// kilocode_change end
@@ -310,6 +321,36 @@ export namespace ModelCache {
hasBaseURL: !!options.baseURL,
})
}
+
+ if (providerID === "lmstudio") {
+ const config = await Config.get()
+ const providerConfig = config.provider?.[providerID]
+ if (providerConfig?.options?.apiKey) {
+ options.apiKey = providerConfig.options.apiKey
+ }
+ if (providerConfig?.options?.baseURL) {
+ options.baseURL = providerConfig.options.baseURL
+ }
+
+ const auth = await Auth.get(providerID)
+ if (auth && auth.type === "api") {
+ options.apiKey = auth.key
+ }
+
+ const env = process.env
+ if (env.LMSTUDIO_API_KEY) {
+ options.apiKey = env.LMSTUDIO_API_KEY
+ }
+ if (env.LMSTUDIO_BASE_URL) {
+ options.baseURL = env.LMSTUDIO_BASE_URL
+ }
+
+ log.debug("lmstudio auth options resolved", {
+ providerID,
+ hasKey: !!options.apiKey,
+ hasBaseURL: !!options.baseURL,
+ })
+ }
// kilocode_change end
return options
diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts
index b3c3359194b..55d482624ca 100644
--- a/packages/opencode/src/provider/models.ts
+++ b/packages/opencode/src/provider/models.ts
@@ -199,6 +199,9 @@ export async function get() {
const disabled = new Set(config.disabled_providers ?? [])
const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null
const kiloAllowed = (!enabled || enabled.has("kilo")) && !disabled.has("kilo")
+ const lmstudioDisabled = disabled.has("lmstudio")
+ const lmstudioEnabled = enabled ? enabled.has("lmstudio") : true
+ const lmstudioAllowed = lmstudioEnabled && !lmstudioDisabled
if (kiloAllowed && !providers["kilo"]) {
const kiloOptions = config.provider?.kilo?.options
@@ -254,6 +257,39 @@ export async function get() {
ModelCache.refresh("apertis", apertisFetchOptions).catch(() => {})
}
}
+
+ // LM Studio dynamic model discovery
+ if (lmstudioAllowed) {
+ const lmstudioConfig = config.provider?.lmstudio?.options
+ const lmstudioBaseURL = lmstudioConfig?.baseURL ?? "http://127.0.0.1:1234/v1"
+ const lmstudioFetchOptions = {
+ ...(lmstudioConfig?.baseURL ? { baseURL: lmstudioConfig.baseURL } : {}),
+ ...(lmstudioConfig?.apiKey ? { apiKey: lmstudioConfig.apiKey } : {}),
+ }
+
+ try {
+ const lmstudioModels = await ModelCache.fetch("lmstudio", lmstudioFetchOptions).catch(() => ({}))
+ if (Object.keys(lmstudioModels).length > 0) {
+ if (!providers["lmstudio"]) {
+ providers["lmstudio"] = {
+ id: "lmstudio",
+ name: "LMStudio",
+ env: ["LMSTUDIO_API_KEY"],
+ api: lmstudioBaseURL,
+ npm: "@ai-sdk/openai-compatible",
+ models: lmstudioModels,
+ }
+ } else {
+ providers["lmstudio"].models = lmstudioModels
+ if (lmstudioConfig?.baseURL) {
+ providers["lmstudio"].api = lmstudioBaseURL
+ }
+ }
+ }
+ } catch (err) {
+ log.debug("lmstudio model fetch failed, using snapshot fallback", { error: err })
+ }
+ }
} else if (!providers["apertis"]) {
const apertisConfig = config.provider?.apertis?.options
const apertisBaseURL = apertisConfig?.baseURL ?? "https://api.apertis.ai/v1"
@@ -272,6 +308,39 @@ export async function get() {
if (Object.keys(apertisModels).length === 0) {
ModelCache.refresh("apertis", apertisFetchOptions).catch(() => {})
}
+
+ // LM Studio dynamic model discovery (when kilo is not allowed)
+ if (lmstudioAllowed) {
+ const lmstudioConfig = config.provider?.lmstudio?.options
+ const lmstudioBaseURL = lmstudioConfig?.baseURL ?? "http://127.0.0.1:1234/v1"
+ const lmstudioFetchOptions = {
+ ...(lmstudioConfig?.baseURL ? { baseURL: lmstudioConfig.baseURL } : {}),
+ ...(lmstudioConfig?.apiKey ? { apiKey: lmstudioConfig.apiKey } : {}),
+ }
+
+ try {
+ const lmstudioModels = await ModelCache.fetch("lmstudio", lmstudioFetchOptions).catch(() => ({}))
+ if (Object.keys(lmstudioModels).length > 0) {
+ if (!providers["lmstudio"]) {
+ providers["lmstudio"] = {
+ id: "lmstudio",
+ name: "LMStudio",
+ env: ["LMSTUDIO_API_KEY"],
+ api: lmstudioBaseURL,
+ npm: "@ai-sdk/openai-compatible",
+ models: lmstudioModels,
+ }
+ } else {
+ providers["lmstudio"].models = lmstudioModels
+ if (lmstudioConfig?.baseURL) {
+ providers["lmstudio"].api = lmstudioBaseURL
+ }
+ }
+ }
+ } catch (err) {
+ log.debug("lmstudio model fetch failed, using snapshot fallback", { error: err })
+ }
+ }
}
return providers