Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/app/src/custom-elements.d.ts
117 changes: 79 additions & 38 deletions packages/opencode/src/provider/model-cache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,10 @@ export namespace ModelCache {
if (providerID === "apertis") {
return fetchApertisModels(options)
}

if (providerID === "lmstudio") {
return fetchOpenAICompatibleModels(options)
}
// kilocode_change end

// Other providers not implemented yet
Expand All @@ -177,52 +181,59 @@ export namespace ModelCache {
// kilocode_change start
const APERTIS_BASE_URL = "https://api.apertis.ai/v1"

async function fetchApertisModels(options: any): Promise<Record<string, any>> {
const baseURL = options.baseURL ?? APERTIS_BASE_URL
async function fetchOpenAICompatibleModels(options: { baseURL?: string; apiKey?: string }): Promise<Record<string, any>> {
const baseURL = options.baseURL ?? "http://127.0.0.1:1234/v1"
const apiKey = options.apiKey

if (!apiKey) {
log.debug("no API key for apertis, skipping model fetch")
return {}
}

const url = `${baseURL.replace(/\/+$/, "")}/models`
const response = await fetch(url, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
signal: AbortSignal.timeout(10_000),
})

if (!response.ok) {
log.error("apertis model fetch failed", { status: response.status })
return {}
}
try {
const response = await Bun.fetch(url, {
method: "GET",
headers: apiKey ? { Authorization: `Bearer ${apiKey}` } : {},
signal: AbortSignal.timeout(10_000),
})

const json = (await response.json()) as { data?: Array<{ id: string; owned_by?: string }> }
const models: Record<string, any> = {}

for (const model of json.data ?? []) {
models[model.id] = {
id: model.id,
name: model.id,
family: model.owned_by ?? "",
release_date: "",
attachment: true,
reasoning: false,
temperature: true,
tool_call: true,
cost: { input: 0, output: 0 },
limit: { context: 128000, output: 4096 },
options: {},
modalities: {
input: ["text", "image"],
output: ["text"],
},
if (!response.ok) {
log.error("openai-compatible model fetch failed", { url, status: response.status })
return {}
}

const json = await response.json() as { data?: Array<{ id: string; owned_by?: string }> }

const models: Record<string, any> = {}
for (const model of json.data ?? []) {
models[model.id] = {
id: model.id,
name: model.id,
family: model.owned_by ?? "",
release_date: "",
attachment: true,
reasoning: false,
temperature: true,
tool_call: true,
cost: { input: 0, output: 0 },
limit: { context: 128000, output: 4096 },
options: {},
modalities: {
input: ["text", "image"],
output: ["text"],
},
}
}

return models
} catch (error) {
log.error("openai-compatible model fetch error", { url, error })
return {}
}
}

return models
async function fetchApertisModels(options: any): Promise<Record<string, any>> {
return fetchOpenAICompatibleModels({
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

WARNING: Refactor removes the Apertis API-key guard

Before this helper was generalized, fetchApertisModels() returned early when apiKey was missing. After this change we still call https://api.apertis.ai/v1/models without auth, and models.ts immediately schedules a second refresh() because the cached result is empty. That turns an unconfigured provider into repeated failing network traffic on every startup.

baseURL: options.baseURL ?? APERTIS_BASE_URL,
apiKey: options.apiKey,
})
}
// kilocode_change end

Expand Down Expand Up @@ -310,6 +321,36 @@ export namespace ModelCache {
hasBaseURL: !!options.baseURL,
})
}

if (providerID === "lmstudio") {
const config = await Config.get()
const providerConfig = config.provider?.[providerID]
if (providerConfig?.options?.apiKey) {
options.apiKey = providerConfig.options.apiKey
}
if (providerConfig?.options?.baseURL) {
options.baseURL = providerConfig.options.baseURL
}

const auth = await Auth.get(providerID)
if (auth && auth.type === "api") {
options.apiKey = auth.key
}

const env = process.env
if (env.LMSTUDIO_API_KEY) {
options.apiKey = env.LMSTUDIO_API_KEY
}
if (env.LMSTUDIO_BASE_URL) {
options.baseURL = env.LMSTUDIO_BASE_URL
}

log.debug("lmstudio auth options resolved", {
providerID,
hasKey: !!options.apiKey,
hasBaseURL: !!options.baseURL,
})
}
// kilocode_change end

return options
Expand Down
69 changes: 69 additions & 0 deletions packages/opencode/src/provider/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,9 @@ export async function get() {
const disabled = new Set(config.disabled_providers ?? [])
const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null
const kiloAllowed = (!enabled || enabled.has("kilo")) && !disabled.has("kilo")
const lmstudioDisabled = disabled.has("lmstudio")
const lmstudioEnabled = enabled ? enabled.has("lmstudio") : true
const lmstudioAllowed = lmstudioEnabled && !lmstudioDisabled

if (kiloAllowed && !providers["kilo"]) {
const kiloOptions = config.provider?.kilo?.options
Expand Down Expand Up @@ -254,6 +257,39 @@ export async function get() {
ModelCache.refresh("apertis", apertisFetchOptions).catch(() => {})
}
}

// LM Studio dynamic model discovery
if (lmstudioAllowed) {
const lmstudioConfig = config.provider?.lmstudio?.options
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The logic looks pretty duplicated to below, please generalize if possible

const lmstudioBaseURL = lmstudioConfig?.baseURL ?? "http://127.0.0.1:1234/v1"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

WARNING: LMSTUDIO_BASE_URL can fetch models from one endpoint and send requests to another

ModelCache.fetch("lmstudio", ...) resolves baseURL from config, auth, and LMSTUDIO_BASE_URL, but lmstudioBaseURL here only reads config.provider?.lmstudio?.options. If a user sets only LMSTUDIO_BASE_URL, discovery will hit that URL while provider.api still stays on http://127.0.0.1:1234/v1, so the selected model can be listed from one server and invoked against another.

const lmstudioFetchOptions = {
...(lmstudioConfig?.baseURL ? { baseURL: lmstudioConfig.baseURL } : {}),
...(lmstudioConfig?.apiKey ? { apiKey: lmstudioConfig.apiKey } : {}),
}

try {
const lmstudioModels = await ModelCache.fetch("lmstudio", lmstudioFetchOptions).catch(() => ({}))
if (Object.keys(lmstudioModels).length > 0) {
if (!providers["lmstudio"]) {
providers["lmstudio"] = {
id: "lmstudio",
name: "LMStudio",
env: ["LMSTUDIO_API_KEY"],
api: lmstudioBaseURL,
npm: "@ai-sdk/openai-compatible",
models: lmstudioModels,
}
} else {
providers["lmstudio"].models = lmstudioModels
if (lmstudioConfig?.baseURL) {
providers["lmstudio"].api = lmstudioBaseURL
}
}
}
} catch (err) {
log.debug("lmstudio model fetch failed, using snapshot fallback", { error: err })
}
}
} else if (!providers["apertis"]) {
const apertisConfig = config.provider?.apertis?.options
const apertisBaseURL = apertisConfig?.baseURL ?? "https://api.apertis.ai/v1"
Expand All @@ -272,6 +308,39 @@ export async function get() {
if (Object.keys(apertisModels).length === 0) {
ModelCache.refresh("apertis", apertisFetchOptions).catch(() => {})
}

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can't accept those changes without kilocode change markers and a changelog

// LM Studio dynamic model discovery (when kilo is not allowed)
if (lmstudioAllowed) {
const lmstudioConfig = config.provider?.lmstudio?.options
const lmstudioBaseURL = lmstudioConfig?.baseURL ?? "http://127.0.0.1:1234/v1"
const lmstudioFetchOptions = {
...(lmstudioConfig?.baseURL ? { baseURL: lmstudioConfig.baseURL } : {}),
...(lmstudioConfig?.apiKey ? { apiKey: lmstudioConfig.apiKey } : {}),
}

try {
const lmstudioModels = await ModelCache.fetch("lmstudio", lmstudioFetchOptions).catch(() => ({}))
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we log errors?

if (Object.keys(lmstudioModels).length > 0) {
if (!providers["lmstudio"]) {
providers["lmstudio"] = {
id: "lmstudio",
name: "LMStudio",
env: ["LMSTUDIO_API_KEY"],
api: lmstudioBaseURL,
npm: "@ai-sdk/openai-compatible",
models: lmstudioModels,
}
} else {
providers["lmstudio"].models = lmstudioModels
if (lmstudioConfig?.baseURL) {
providers["lmstudio"].api = lmstudioBaseURL
}
}
}
} catch (err) {
log.debug("lmstudio model fetch failed, using snapshot fallback", { error: err })
}
}
}

return providers
Expand Down
Loading