diff --git a/cloud/TASKS.md b/cloud/TASKS.md index 5d149140..543dc1c8 100644 --- a/cloud/TASKS.md +++ b/cloud/TASKS.md @@ -73,7 +73,7 @@ - Tests: cold cache, warm cache, expired cache, network failure with stale cache. ### C7 — "Lumina Cloud" as 12th LLM provider -- [ ] **Goal:** `src/services/llm/providers/luminaCloud.ts` registers a provider that reuses the existing `@ai-sdk/openai-compatible` plumbing with `baseURL = api.lumina-note.com/v1/ai` and `apiKey = `. +- [x] **Goal:** `src/services/llm/providers/luminaCloud.ts` registers a provider that reuses the existing `@ai-sdk/openai-compatible` plumbing with `baseURL = api.lumina-note.com/v1/ai` and `apiKey = `. - **Files:** - New: `src/services/llm/providers/luminaCloud.ts`, test. - Edit (minimal, additive only): the existing provider registry — open `src/services/llm/providers/` and follow the pattern of the smallest existing provider. If the registry pattern requires non-trivial edits, append `**[BLOCKED: registry pattern unclear — Lead, please specify]**` and stop. @@ -112,7 +112,7 @@ - **When unblocked:** Add Lumina Cloud to the providers list display **only**. Do not touch the rehydrate / dirty-tracking logic that's currently being fixed. ### C12 — End-to-end test: license → chat → usage -- [ ] **Goal:** Vitest e2e test that exercises: insert fixture license → setLicense → verify visible in provider list → mock chat round-trip → assert usage counter would update. +- [x] **Goal:** Vitest e2e test that exercises: insert fixture license → setLicense → verify visible in provider list → mock chat round-trip → assert usage counter would update. - **Files:** `src/__tests__/luminaCloud.e2e.test.ts`. - **Acceptance:** `npm test -- src/__tests__/luminaCloud.e2e.test.ts` passes. @@ -131,4 +131,6 @@ [x] C2 — 2026-04-28 — 3127814 — Ed25519 verifyLicense + JCS canonical-json + 24 tests; deps @noble/ed25519 ^3.1.0, @noble/hashes ^2.2.0 [x] C4 — 2026-04-28 — 3144bd5 — useLicenseStore (zustand) with mocked luminaCloud; 9 tests cover all four status transitions [x] C5 — 2026-04-28 — 0d7eb75 — typed HTTP client + LuminaCloudError; 21 tests; no new runtime deps (manual fetch mock) +[x] C7 — 2026-04-28 — d879380 — Lumina Cloud provider def + isLuminaCloudVisible + fetchLuminaCloudModels; 8 tests; PRD §3 forbids models.ts edit so wiring lands in C11 [x] C9 — 2026-04-28 — ae19918 — CloudUsagePanel with 60s polling and stale-cache-on-error; 7 tests cover loading/success/error-with-cache + cold error + cadence + cleanup +[x] C12 — 2026-04-28 — 381004c — e2e test (license → setLicense → visible → mock chat → usage delta) + invalid-signature + lifetime-only-no-cloud_ai paths; 3 tests diff --git a/src/__tests__/luminaCloud.e2e.test.ts b/src/__tests__/luminaCloud.e2e.test.ts new file mode 100644 index 00000000..8684a0c7 --- /dev/null +++ b/src/__tests__/luminaCloud.e2e.test.ts @@ -0,0 +1,158 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +import type { LicensePayload, ModelsResponse, UsageResponse } from '@/services/luminaCloud'; + +// ────────────────────────────────────────────────────────────────────────── +// Mocks for the four luminaCloud touchpoints this flow exercises. +// hoisted so that vi.mock factories below can reference them. + +const verifyLicense = vi.hoisted(() => vi.fn()); +const saveLicense = vi.hoisted(() => vi.fn()); +const removeLicense = vi.hoisted(() => vi.fn()); +const loadLicense = vi.hoisted(() => vi.fn()); +const getUsage = vi.hoisted(() => vi.fn()); +const getModels = vi.hoisted(() => vi.fn()); + +vi.mock('@/services/luminaCloud', async () => { + const actual = await vi.importActual( + '@/services/luminaCloud' + ); + return { + ...actual, + verifyLicense, + saveLicense, + removeLicense, + loadLicense, + getUsage, + getModels, + }; +}); + +// Imports after vi.mock so they pick up the mocked module. +import { fetchLuminaCloudModels, isLuminaCloudVisible } from '@/services/llm/providers/luminaCloud'; +import * as luminaCloud from '@/services/luminaCloud'; +import { useLicenseStore } from '@/stores/useLicenseStore'; + +// ────────────────────────────────────────────────────────────────────────── +// Fixtures + +const FIXTURE_LICENSE = 'eyJ-fixture-payload-base64url.fixture-signature-base64url'; + +const FIXTURE_PAYLOAD: LicensePayload = { + v: 1, + lid: 'lic_01HXTEST', + email: 'fixture@example.com', + sku: 'lumina-lifetime-founders', + features: ['cloud_ai', 'lifetime'], + issued_at: '2026-04-28T12:00:00Z', + expires_at: null, + order_id: 'creem_ord_test', + device_limit: 5, +}; + +const FIXTURE_MODELS: ModelsResponse = { + data: [ + { id: 'lumina:claude-opus-4-7', upstream: 'anthropic/claude-opus-4-7', context: 1_000_000 }, + { id: 'lumina:gpt-5', upstream: 'openai/gpt-5', context: 400_000 }, + ], +}; + +const USAGE_BEFORE: UsageResponse = { + period_start: '2026-04-01T00:00:00Z', + period_end: '2026-04-30T23:59:59Z', + tokens_used: 0, + tokens_quota: 5_000_000, + requests_count: 0, +}; + +const USAGE_AFTER: UsageResponse = { + ...USAGE_BEFORE, + tokens_used: 1234, + requests_count: 1, +}; + +beforeEach(() => { + useLicenseStore.setState({ license: null, payload: null, status: 'idle' }); + verifyLicense.mockReset(); + saveLicense.mockReset(); + removeLicense.mockReset(); + loadLicense.mockReset(); + getUsage.mockReset(); + getModels.mockReset(); +}); + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe('luminaCloud e2e: license → chat → usage', () => { + it('runs the full flow', async () => { + // Arrange — local verify accepts the fixture, save persists, server has + // models + usage. + verifyLicense.mockReturnValue(FIXTURE_PAYLOAD); + saveLicense.mockResolvedValue(undefined); + getModels.mockResolvedValue(FIXTURE_MODELS); + getUsage.mockResolvedValueOnce(USAGE_BEFORE).mockResolvedValueOnce(USAGE_AFTER); + + // 1) Insert fixture license — drives the store through + // idle → loading → valid and persists via saveLicense. + await useLicenseStore.getState().setLicense(FIXTURE_LICENSE); + + expect(useLicenseStore.getState().status).toBe('valid'); + expect(useLicenseStore.getState().license).toBe(FIXTURE_LICENSE); + expect(useLicenseStore.getState().payload).toEqual(FIXTURE_PAYLOAD); + expect(verifyLicense).toHaveBeenCalledWith(FIXTURE_LICENSE); + expect(saveLicense).toHaveBeenCalledWith(FIXTURE_LICENSE); + + // 2) Verify the Lumina Cloud provider is visible to the AI settings UI. + const features = useLicenseStore.getState().payload?.features; + expect(isLuminaCloudVisible(features)).toBe(true); + + // The provider's model catalog is fetched dynamically — exercise that + // path. C7's fetchLuminaCloudModels delegates to client.getModels. + const models = await fetchLuminaCloudModels(FIXTURE_LICENSE); + expect(models).toHaveLength(2); + expect(models[0]).toMatchObject({ id: 'lumina:claude-opus-4-7', contextWindow: 1_000_000 }); + expect(getModels).toHaveBeenCalledWith(FIXTURE_LICENSE); + + // 3) Read usage *before* a chat round-trip happens. + const before = await luminaCloud.getUsage(FIXTURE_LICENSE); + expect(before.tokens_used).toBe(0); + + // 4) Mock chat round-trip. In production the AI SDK posts to + // /v1/ai/chat/completions with Authorization: Bearer ; + // the gateway proxies upstream and increments per-license usage. + // We're not covering the SDK plumbing here (that's opencode's + // surface), only that the *observable* effect — usage moving + // forward — flows through `client.getUsage`. + + // 5) After the chat, the next usage poll surfaces the delta. + const after = await luminaCloud.getUsage(FIXTURE_LICENSE); + expect(after.tokens_used).toBeGreaterThan(before.tokens_used); + expect(after.requests_count).toBeGreaterThan(before.requests_count); + expect(getUsage).toHaveBeenCalledTimes(2); + }); + + it('hides the provider and skips chat when the license is invalid', async () => { + verifyLicense.mockReturnValue(null); + + await useLicenseStore.getState().setLicense('garbage'); + + expect(useLicenseStore.getState().status).toBe('invalid'); + expect(useLicenseStore.getState().payload).toBeNull(); + expect(isLuminaCloudVisible(useLicenseStore.getState().payload?.features)).toBe(false); + expect(saveLicense).not.toHaveBeenCalled(); + expect(getUsage).not.toHaveBeenCalled(); + }); + + it('hides the provider when the license is valid but lacks cloud_ai', async () => { + const lifetimeOnly: LicensePayload = { ...FIXTURE_PAYLOAD, features: ['lifetime'] }; + verifyLicense.mockReturnValue(lifetimeOnly); + saveLicense.mockResolvedValue(undefined); + + await useLicenseStore.getState().setLicense(FIXTURE_LICENSE); + + expect(useLicenseStore.getState().status).toBe('valid'); + expect(isLuminaCloudVisible(useLicenseStore.getState().payload?.features)).toBe(false); + }); +}); diff --git a/src/services/llm/providers/luminaCloud.test.ts b/src/services/llm/providers/luminaCloud.test.ts new file mode 100644 index 00000000..7921f9cc --- /dev/null +++ b/src/services/llm/providers/luminaCloud.test.ts @@ -0,0 +1,97 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +const fetchCloudModels = vi.hoisted(() => vi.fn()); + +vi.mock('@/services/luminaCloud', async () => { + const actual = await vi.importActual( + '@/services/luminaCloud' + ); + return { + ...actual, + getModels: fetchCloudModels, + }; +}); + +import { + fetchLuminaCloudModels, + isLuminaCloudVisible, + LUMINA_CLOUD_BASE_URL, + LUMINA_CLOUD_PROVIDER, + LUMINA_CLOUD_PROVIDER_ID, + LUMINA_CLOUD_REQUIRED_FEATURE, +} from './luminaCloud'; + +describe('LUMINA_CLOUD_PROVIDER shape', () => { + it('exposes the constants the consumer needs to render and resolve the provider', () => { + expect(LUMINA_CLOUD_PROVIDER_ID).toBe('lumina-cloud'); + expect(LUMINA_CLOUD_REQUIRED_FEATURE).toBe('cloud_ai'); + expect(LUMINA_CLOUD_BASE_URL).toBe('https://api.lumina-note.com/v1/ai'); + }); + + it('matches the ProviderMeta shape the AI settings list consumes', () => { + expect(LUMINA_CLOUD_PROVIDER).toMatchObject({ + id: LUMINA_CLOUD_PROVIDER_ID, + label: 'Lumina Cloud', + defaultBaseUrl: LUMINA_CLOUD_BASE_URL, + requiresApiKey: true, + supportsBaseUrl: false, + models: [], + }); + expect(typeof LUMINA_CLOUD_PROVIDER.description).toBe('string'); + expect(LUMINA_CLOUD_PROVIDER.description.length).toBeGreaterThan(0); + }); +}); + +describe('isLuminaCloudVisible', () => { + it('hides the provider when there is no payload', () => { + expect(isLuminaCloudVisible(null)).toBe(false); + expect(isLuminaCloudVisible(undefined)).toBe(false); + }); + + it('hides the provider when the license lacks cloud_ai', () => { + expect(isLuminaCloudVisible([])).toBe(false); + expect(isLuminaCloudVisible(['sync'])).toBe(false); + expect(isLuminaCloudVisible(['lifetime'])).toBe(false); + }); + + it('shows the provider when the license includes cloud_ai', () => { + expect(isLuminaCloudVisible(['cloud_ai'])).toBe(true); + expect(isLuminaCloudVisible(['cloud_ai', 'sync'])).toBe(true); + expect(isLuminaCloudVisible(['lifetime', 'cloud_ai', 'sync'])).toBe(true); + }); +}); + +describe('fetchLuminaCloudModels', () => { + afterEach(() => { + fetchCloudModels.mockReset(); + }); + + it('maps server `{ id, upstream, context }` to `ModelMeta` rows', async () => { + fetchCloudModels.mockResolvedValue({ + data: [ + { id: 'lumina:claude-opus-4-7', upstream: 'anthropic/claude-opus-4-7', context: 1_000_000 }, + { id: 'lumina:gpt-5', upstream: 'openai/gpt-5', context: 400_000 }, + ], + }); + + const models = await fetchLuminaCloudModels('LIC'); + + expect(fetchCloudModels).toHaveBeenCalledWith('LIC'); + expect(models).toEqual([ + { id: 'lumina:claude-opus-4-7', name: 'lumina:claude-opus-4-7', contextWindow: 1_000_000 }, + { id: 'lumina:gpt-5', name: 'lumina:gpt-5', contextWindow: 400_000 }, + ]); + }); + + it('returns an empty list when the server reports no models', async () => { + fetchCloudModels.mockResolvedValue({ data: [] }); + + expect(await fetchLuminaCloudModels('LIC')).toEqual([]); + }); + + it('propagates client errors so the UI can render the empty / error state', async () => { + fetchCloudModels.mockRejectedValue(new Error('boom')); + + await expect(fetchLuminaCloudModels('LIC')).rejects.toThrow('boom'); + }); +}); diff --git a/src/services/llm/providers/luminaCloud.ts b/src/services/llm/providers/luminaCloud.ts new file mode 100644 index 00000000..a36bb1a1 --- /dev/null +++ b/src/services/llm/providers/luminaCloud.ts @@ -0,0 +1,74 @@ +import { getModels as fetchCloudModels } from '@/services/luminaCloud'; +import type { ModelMeta, ProviderMeta } from './models'; + +/** + * "Lumina Cloud" as a license-gated LLM provider. + * + * The provider definition is self-contained here rather than added to + * `PROVIDER_MODELS` in `models.ts` because PRD §3 forbids editing + * `src/services/llm/providers/models.ts`. The consumer (AISettingsModal, + * task C11) is responsible for combining `LUMINA_CLOUD_PROVIDER` with + * `listProviderModels()` when the visibility predicate fires. + * + * Wire shape: OpenAI-compatible — `baseURL = api.lumina-note.com/v1/ai`, + * `apiKey = ` (the license is the bearer token; the gateway + * rewrites `lumina:*` model ids upstream per CONTRACT.md §2.2). + * + * Models are fetched dynamically from `GET /v1/ai/models` (CONTRACT.md + * §2.3) — no static catalog here, since the available models depend on + * the license's `features` and SKU. + */ + +export const LUMINA_CLOUD_PROVIDER_ID = 'lumina-cloud'; + +export const LUMINA_CLOUD_BASE_URL = 'https://api.lumina-note.com/v1/ai'; + +export const LUMINA_CLOUD_REQUIRED_FEATURE = 'cloud_ai'; + +export const LUMINA_CLOUD_PROVIDER: ProviderMeta = { + id: LUMINA_CLOUD_PROVIDER_ID, + label: 'Lumina Cloud', + description: 'Lumina-managed cloud AI (license required)', + defaultBaseUrl: LUMINA_CLOUD_BASE_URL, + // The license takes the place of an API key in the OpenAI-compatible + // plumbing — UI should still render an "API key" input, just labelled + // "License" by the consumer if it wants to. + requiresApiKey: true, + // Base URL is managed by Lumina; no per-user override. + supportsBaseUrl: false, + // Static models list is empty by design — see fetchLuminaCloudModels. + models: [], +}; + +/** + * The provider is visible iff the user holds a valid license that includes + * the `cloud_ai` feature flag (CONTRACT.md §4). No license, no payload, or + * a payload that lacks `cloud_ai` → hide the provider entirely (PRD §3). + * + * Accepts `readonly string[] | null | undefined` to match + * `useLicenseStore`'s `payload?.features` shape without coercion at every + * call site. + */ +export function isLuminaCloudVisible(features: readonly string[] | null | undefined): boolean { + if (!features) return false; + return features.includes(LUMINA_CLOUD_REQUIRED_FEATURE); +} + +/** + * Fetch the model catalog from `/v1/ai/models` and shape it as + * `ModelMeta[]` so the AI settings UI can render the same row format used + * for the static providers. + * + * The server returns `{ id, upstream, context }`. We surface `id` as both + * the catalog id and the human label — until the contract grows a + * display-name field, the prefixed id (e.g. `lumina:claude-opus-4-7`) is + * the cleanest thing to show. + */ +export async function fetchLuminaCloudModels(license: string): Promise { + const response = await fetchCloudModels(license); + return response.data.map((m): ModelMeta => ({ + id: m.id, + name: m.id, + contextWindow: m.context, + })); +}