diff --git a/.changeset/ao-migrate-v3-dry-run.md b/.changeset/ao-migrate-v3-dry-run.md new file mode 100644 index 0000000000..53bff57510 --- /dev/null +++ b/.changeset/ao-migrate-v3-dry-run.md @@ -0,0 +1,12 @@ +--- +"@aoagents/ao-core": minor +"@aoagents/ao-cli": minor +--- + +Add `ao migrate` (replaces `ao migrate-storage`). Inventories the AO storage tree, detects identity-system drift (V1 bare-basename projectIds, doubled-prefix and storageKey-prefixed tmux names, numbered orchestrators, legacy workspacePaths, observability-dir leaks, stranded `~/.worktrees/` leaves, same-repo duplicate registrations, lingering `storageKey` schema fields) and prints a step-by-step V3 plan plus a structured JSON record (`--json [--output ]`). + +Execution is gated in this release: `ao migrate --execute` and `ao migrate --rollback` print a feedback notice and exit 1. The intent is to collect dry-run output from real users before any disk writes land. + +`ao migrate-storage` is removed from the CLI registry; the V1→V2 helpers stay internal in `@aoagents/ao-core` so the new `ao migrate --dry-run` can detect and report on V1 hash directories. The `ao start` legacy-storage warning now points at `ao migrate --dry-run`. + +Public API additions in `@aoagents/ao-core`: `inventoryV3`, `planV3`, `formatBytes`, plus types `V3Inventory`, `V3Plan`, `V3Step`, `V3Issue`, `V3IssueKind`, `V3ProjectInventory`, `V3StrandedWorktree`, `V3LiveTmuxSession`, `V3DuplicateRepo`, `V3InventoryOptions`. diff --git a/packages/cli/src/commands/migrate-storage.ts b/packages/cli/src/commands/migrate-storage.ts deleted file mode 100644 index 04f450439f..0000000000 --- a/packages/cli/src/commands/migrate-storage.ts +++ /dev/null @@ -1,43 +0,0 @@ -import type { Command } from "commander"; -import chalk from "chalk"; -import { migrateStorage, rollbackStorage } from "@aoagents/ao-core"; - -export function registerMigrateStorage(program: Command): void { - program - .command("migrate-storage") - .description( - "Migrate storage from legacy hash-based layout to projects/{projectId}/ layout", - ) - .option("--dry-run", "Report what would be done without making changes") - .option("--force", "Migrate even if active tmux sessions are detected") - .option("--rollback", "Reverse a previous migration (restores .migrated directories)") - .action( - async (opts: { dryRun?: boolean; force?: boolean; rollback?: boolean }) => { - try { - if (opts.rollback) { - await rollbackStorage({ - dryRun: opts.dryRun, - log: (msg) => console.log(msg), - }); - } else { - const result = await migrateStorage({ - dryRun: opts.dryRun, - force: opts.force, - log: (msg) => console.log(msg), - }); - - if (result.projects === 0 && !opts.dryRun) { - console.log(chalk.green("\nNothing to migrate — already on V2 layout.")); - } else { - console.log(chalk.green("\nMigration complete.")); - } - } - } catch (err) { - console.error( - chalk.red(err instanceof Error ? err.message : String(err)), - ); - process.exit(1); - } - }, - ); -} diff --git a/packages/cli/src/commands/migrate.ts b/packages/cli/src/commands/migrate.ts new file mode 100644 index 0000000000..5d2e040231 --- /dev/null +++ b/packages/cli/src/commands/migrate.ts @@ -0,0 +1,197 @@ +import type { Command } from "commander"; +import chalk from "chalk"; +import { writeFileSync } from "node:fs"; + +import { + formatBytes, + getAoBaseDir, + getGlobalConfigPath, + inventoryV3, + planV3, + type V3Plan, +} from "@aoagents/ao-core"; +import { homedir } from "node:os"; +import { join } from "node:path"; + +import { getCliVersion } from "../options/version.js"; + +interface MigrateOptions { + dryRun?: boolean; + json?: boolean; + output?: string; + execute?: boolean; + rollback?: boolean; +} + +const FEEDBACK_ISSUE_URL = + "https://github.com/ComposioHQ/agent-orchestrator/issues/new?title=ao+migrate+dry-run+output"; + +const GATED_MESSAGE = ` +${chalk.bold.red("ao migrate execution is gated in v0.6.0.")} + +This release ships ${chalk.cyan("--dry-run")} only so we can review real-world plan output +before the migration touches any disk on user machines. + +Please share dry-run output: + 1. ${chalk.dim("ao migrate --json --output ~/ao-migrate-plan.json")} + 2. Open an issue with that file attached: ${FEEDBACK_ISSUE_URL} + +Execution unlocks in v0.6.1. +`; + +export function registerMigrate(program: Command): void { + program + .command("migrate") + .description( + "Inventory + plan storage migration to V3 (one-format identity, one prefix allocator, observability inside projects). Dry-run only in v0.6.0.", + ) + .option("--dry-run", "Inventory + plan only (default)", true) + .option("--json", "Emit V3Plan as JSON to stdout") + .option("--output ", "Write the V3Plan record to a file instead of stdout") + .option("--execute", "[gated] Apply the plan to disk") + .option("--rollback", "[gated] Reverse a previous migration") + .action(async (opts: MigrateOptions) => { + if (opts.execute || opts.rollback) { + process.stderr.write(GATED_MESSAGE + "\n"); + process.exit(1); + } + + const aoBaseDir = getAoBaseDir(); + const globalConfigPath = getGlobalConfigPath(); + const legacyWorktreeRoot = join(homedir(), ".worktrees"); + + const inventory = await inventoryV3({ + aoBaseDir, + globalConfigPath, + legacyWorktreeRoot, + }); + + const plan = planV3(inventory, getCliVersion()); + + if (opts.json) { + const json = JSON.stringify(plan, null, 2); + if (opts.output) { + writeFileSync(opts.output, json + "\n", "utf-8"); + process.stdout.write(`Plan written to ${opts.output}\n`); + } else { + process.stdout.write(json + "\n"); + } + return; + } + + printHumanPlan(plan); + + if (opts.output) { + writeFileSync(opts.output, JSON.stringify(plan, null, 2) + "\n", "utf-8"); + process.stdout.write( + `\n${chalk.dim("Full JSON record written to:")} ${opts.output}\n`, + ); + } + }); +} + +function printHumanPlan(plan: V3Plan): void { + const out = process.stdout; + + out.write(`\n${chalk.bold("ao migrate v3")} ${chalk.dim(`(dry-run · ${plan.aoVersion})`)}\n`); + out.write(`${chalk.dim("Scanned:")} ${plan.inventory.aoBaseDir}\n`); + out.write(`${chalk.dim("At:")} ${plan.generatedAt}\n\n`); + + // Inventory summary + out.write(`${chalk.bold("Inventory")}\n`); + out.write(` Projects: ${plan.inventory.projects.length}\n`); + const v1 = plan.inventory.projects.filter((p) => p.layout === "v1-bare").length; + const v2 = plan.inventory.projects.filter((p) => p.layout === "v2-hashed").length; + out.write(` V1 bare-basename: ${v1}\n`); + out.write(` V2 hashed: ${v2}\n`); + out.write(` Sessions: ${plan.inventory.totals.sessions}\n`); + out.write(` Worktrees: ${plan.inventory.totals.worktrees}\n`); + out.write( + ` Observability dirs: ${plan.inventory.observability.rootLevelDirCount}` + + ` (${formatBytes(plan.inventory.observability.bytes)})\n`, + ); + out.write(` Stranded worktrees: ${plan.inventory.strandedWorktrees.length}\n`); + out.write(` Bare hash dirs: ${plan.inventory.bareHashDirs.length}\n`); + out.write(` .migrated dirs: ${plan.inventory.migratedDirs.length}\n`); + out.write(` Live tmux sessions: ${plan.inventory.liveTmuxSessions.length}\n`); + out.write(` Same-repo duplicates: ${plan.inventory.duplicateRepos.length}\n`); + out.write(` V1 hash dirs (legacy): ${plan.inventory.v1HashDirs.length}\n`); + out.write(` Total bytes: ${formatBytes(plan.inventory.totals.bytes)}\n\n`); + + // Issues by project + const projectsWithIssues = plan.inventory.projects.filter((p) => p.issues.length > 0); + if (projectsWithIssues.length > 0) { + out.write(`${chalk.bold("Per-project issues")}\n`); + for (const p of projectsWithIssues) { + out.write(` ${chalk.cyan(p.projectId)} ${chalk.dim(`[${p.layout}]`)}\n`); + for (const issue of p.issues) { + out.write(` ${chalk.yellow("•")} ${issue.detail}\n`); + } + } + out.write("\n"); + } + + // Global config issues + if (plan.inventory.globalConfigIssues.length > 0) { + out.write(`${chalk.bold("Global config issues")}\n`); + for (const issue of plan.inventory.globalConfigIssues) { + out.write(` ${chalk.yellow("•")} ${issue.detail}\n`); + } + out.write("\n"); + } + + // Plan steps + out.write(`${chalk.bold("Plan")} ${chalk.dim("(would execute these in order if unlocked)")}\n`); + if (plan.steps.length === 0) { + out.write( + ` ${chalk.green("Nothing to do — disk is already V3-compliant.")}\n\n`, + ); + } else { + for (const step of plan.steps) { + out.write(` ${chalk.bold(step.order + ".")} ${step.title} ${chalk.dim(`(${step.count})`)}\n`); + out.write(` ${chalk.dim(step.description)}\n`); + if (step.details.length > 0 && step.details.length <= 8) { + for (const detail of step.details) { + out.write(` - ${detail}\n`); + } + } else if (step.details.length > 8) { + for (const detail of step.details.slice(0, 6)) { + out.write(` - ${detail}\n`); + } + out.write(` ${chalk.dim(`… ${step.details.length - 6} more`)}\n`); + } + } + out.write("\n"); + } + + // Totals + out.write(`${chalk.bold("Totals")}\n`); + out.write(` Projects to re-key: ${plan.totals.projectsToRekey}\n`); + out.write(` Sessions to rewrite: ${plan.totals.sessionsToRewrite}\n`); + out.write(` Tmux renames: ${plan.totals.tmuxRenames}\n`); + out.write(` Worktree adoptions: ${plan.totals.worktreeAdoptions}\n`); + out.write(` Orchestrators to normalize: ${plan.totals.orchestratorsToNormalize}\n`); + out.write(` Observability dirs to GC: ${plan.totals.observabilityDirsToCollapse}\n`); + out.write(` Bare hash dirs to remove: ${plan.totals.bareHashDirsToRemove}\n`); + out.write(` storageKey fields to strip: ${plan.totals.storageKeyFieldsToStrip}\n`); + out.write( + ` Estimated bytes freed: ~${formatBytes(plan.totals.estimatedBytesFreed)}\n\n`, + ); + + // Warnings + if (plan.warnings.length > 0) { + out.write(`${chalk.bold.yellow("Warnings")}\n`); + for (const w of plan.warnings) { + out.write(` ${chalk.yellow("⚠")} ${w}\n`); + } + out.write("\n"); + } + + // Footer + out.write(`${chalk.dim("─".repeat(60))}\n`); + out.write(`${chalk.bold("Execution is gated in v0.6.0.")}\n`); + out.write( + `Share this plan at: ${chalk.cyan(FEEDBACK_ISSUE_URL)}\n` + + `${chalk.dim("Execution unlocks in v0.6.1.")}\n\n`, + ); +} diff --git a/packages/cli/src/lib/startup-preflight.ts b/packages/cli/src/lib/startup-preflight.ts index 311ba2e4b0..bbcc204e19 100644 --- a/packages/cli/src/lib/startup-preflight.ts +++ b/packages/cli/src/lib/startup-preflight.ts @@ -156,7 +156,7 @@ export function warnAboutLegacyStorage(): void { chalk.yellow( `\n ⚠ Found ${nonEmptyDirCount} legacy storage director${nonEmptyDirCount === 1 ? "y" : "ies"} that need${nonEmptyDirCount === 1 ? "s" : ""} migration.\n` + ` Sessions stored in the old format won't appear until migrated.\n` + - ` Run ${chalk.bold("ao migrate-storage")} to upgrade (use ${chalk.bold("--dry-run")} to preview).\n`, + ` Run ${chalk.bold("ao migrate --dry-run")} to preview the V3 plan. Execution unlocks in v0.6.1.\n`, ), ); } catch { diff --git a/packages/cli/src/program.ts b/packages/cli/src/program.ts index 12ffb0858b..0c00bb040f 100644 --- a/packages/cli/src/program.ts +++ b/packages/cli/src/program.ts @@ -14,7 +14,7 @@ import { registerUpdate } from "./commands/update.js"; import { registerSetup } from "./commands/setup.js"; import { registerPlugin } from "./commands/plugin.js"; import { registerProjectCommand } from "./commands/project.js"; -import { registerMigrateStorage } from "./commands/migrate-storage.js"; +import { registerMigrate } from "./commands/migrate.js"; import { registerCompletion } from "./commands/completion.js"; import { registerEvents } from "./commands/events.js"; import { getConfigInstruction } from "./lib/config-instruction.js"; @@ -46,7 +46,7 @@ export function createProgram(): Command { registerSetup(program); registerPlugin(program); registerProjectCommand(program); - registerMigrateStorage(program); + registerMigrate(program); registerCompletion(program); registerEvents(program); diff --git a/packages/core/src/__tests__/migration-v3.test.ts b/packages/core/src/__tests__/migration-v3.test.ts new file mode 100644 index 0000000000..a9c7804bd0 --- /dev/null +++ b/packages/core/src/__tests__/migration-v3.test.ts @@ -0,0 +1,591 @@ +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { mkdirSync, writeFileSync, rmSync } from "node:fs"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; +import { stringify as stringifyYaml } from "yaml"; + +import { inventoryV3, planV3 } from "../migration/v3.js"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function createTempDir(): string { + const dir = join( + tmpdir(), + `ao-v3-test-${Date.now()}-${Math.random().toString(36).slice(2)}`, + ); + mkdirSync(dir, { recursive: true }); + return dir; +} + +function writeGlobalConfig( + aoBaseDir: string, + projects: Record>, +): string { + const configPath = join(aoBaseDir, "config.yaml"); + writeFileSync( + configPath, + stringifyYaml({ + port: 3000, + defaults: {}, + projects, + }), + "utf-8", + ); + return configPath; +} + +interface SessionFixture { + sessionId: string; + tmuxName?: string; + workspacePath?: string; + branch?: string; + kind?: "worker" | "orchestrator"; +} + +function writeSessionJson( + projectsDir: string, + projectId: string, + fixture: SessionFixture, +): void { + const sessionsDir = join(projectsDir, projectId, "sessions"); + mkdirSync(sessionsDir, { recursive: true }); + const meta = { + sessionId: fixture.sessionId, + kind: fixture.kind ?? "worker", + project: projectId, + tmuxName: fixture.tmuxName ?? fixture.sessionId, + branch: fixture.branch ?? `session/${fixture.sessionId}`, + workspacePath: + fixture.workspacePath ?? + join(projectsDir, projectId, "worktrees", fixture.sessionId), + agent: "claude-code", + createdAt: "2026-05-07T08:43:35.402Z", + }; + writeFileSync( + join(sessionsDir, `${fixture.sessionId}.json`), + JSON.stringify(meta, null, 2), + "utf-8", + ); +} + +function writeOrchestratorJson( + projectsDir: string, + projectId: string, + fixture: SessionFixture, +): void { + const meta = { + sessionId: fixture.sessionId, + kind: "orchestrator", + project: projectId, + tmuxName: fixture.tmuxName ?? fixture.sessionId, + branch: fixture.branch ?? `orchestrator/${fixture.sessionId}`, + runtimeHandle: { + id: fixture.tmuxName ?? fixture.sessionId, + runtimeName: "tmux", + data: { + workspacePath: + fixture.workspacePath ?? + join(projectsDir, projectId, "worktrees", fixture.sessionId), + }, + }, + agent: "claude-code", + createdAt: "2026-04-25T17:32:56.275Z", + }; + mkdirSync(join(projectsDir, projectId), { recursive: true }); + writeFileSync( + join(projectsDir, projectId, "orchestrator.json"), + JSON.stringify(meta, null, 2), + "utf-8", + ); +} + +function writeObservabilityDir(aoBaseDir: string, hash: string): void { + const dir = join(aoBaseDir, `${hash}-observability`, "processes"); + mkdirSync(dir, { recursive: true }); + writeFileSync( + join(dir, "session-manager-12345.json"), + JSON.stringify( + { + component: "session-manager", + pid: 12345, + projectId: "some-project", + traces: [], + }, + null, + 2, + ), + "utf-8", + ); +} + +// --------------------------------------------------------------------------- +// inventoryV3 +// --------------------------------------------------------------------------- + +describe("inventoryV3", () => { + let aoBaseDir: string; + let projectsDir: string; + + beforeEach(() => { + aoBaseDir = createTempDir(); + projectsDir = join(aoBaseDir, "projects"); + mkdirSync(projectsDir, { recursive: true }); + }); + + afterEach(() => { + rmSync(aoBaseDir, { recursive: true, force: true }); + }); + + it("returns an empty inventory when aoBaseDir does not exist", async () => { + const missing = join(aoBaseDir, "missing"); + const inv = await inventoryV3({ aoBaseDir: missing, skipTmux: true }); + expect(inv.projects).toHaveLength(0); + expect(inv.totals.bytes).toBe(0); + expect(inv.observability.rootLevelDirCount).toBe(0); + }); + + it("classifies V2 hashed and V1 bare-basename projects correctly", async () => { + // V2 hashed + mkdirSync(join(projectsDir, "agent-orchestrator_a1b2c3d4e5", "sessions"), { + recursive: true, + }); + // V1 bare + mkdirSync(join(projectsDir, "agent-orchestrator", "sessions"), { + recursive: true, + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + "agent-orchestrator_a1b2c3d4e5": { + path: "/Users/x/v2/agent-orchestrator", + sessionPrefix: "ao", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + "agent-orchestrator": { + path: "/Users/x/v1/agent-orchestrator", + sessionPrefix: "ao", + storageKey: "7dc54da05c9e", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + expect(inv.projects).toHaveLength(2); + const v2 = inv.projects.find((p) => p.layout === "v2-hashed"); + const v1 = inv.projects.find((p) => p.layout === "v1-bare"); + expect(v2?.projectId).toBe("agent-orchestrator_a1b2c3d4e5"); + expect(v1?.projectId).toBe("agent-orchestrator"); + expect(v1?.rekeyTo).toMatch(/^agent-orchestrator_[0-9a-f]{10}$/); + expect(v1?.storageKeyField).toBe("7dc54da05c9e"); + + // Issues raised for V1 + expect(v1?.issues.some((i) => i.kind === "v1-bare-basename")).toBe(true); + expect(v1?.issues.some((i) => i.kind === "storageKey-field-present")).toBe(true); + }); + + it("flags duplicate repos by originUrl", async () => { + mkdirSync(join(projectsDir, "agent-orchestrator", "sessions"), { + recursive: true, + }); + mkdirSync(join(projectsDir, "agent-orchestrator_168566536d", "sessions"), { + recursive: true, + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + "agent-orchestrator": { + path: "/Users/x/clones/c1/agent-orchestrator", + sessionPrefix: "ao", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + "agent-orchestrator_168566536d": { + path: "/Users/x/clones/c2/agent-orchestrator", + sessionPrefix: "ao2", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + expect(inv.duplicateRepos).toHaveLength(1); + expect(inv.duplicateRepos[0].projectIds.sort()).toEqual([ + "agent-orchestrator", + "agent-orchestrator_168566536d", + ]); + }); + + it("counts observability dir leak", async () => { + writeObservabilityDir(aoBaseDir, "0149ff87f4a5"); + writeObservabilityDir(aoBaseDir, "03706227e15e"); + writeObservabilityDir(aoBaseDir, "fea10426c4ba"); + + const inv = await inventoryV3({ aoBaseDir, skipTmux: true }); + + expect(inv.observability.rootLevelDirCount).toBe(3); + expect(inv.observability.bytes).toBeGreaterThan(0); + expect(inv.observability.oldestModifiedAt).toBeTypeOf("string"); + }); + + it("detects bare hash dirs and .migrated dirs", async () => { + mkdirSync(join(aoBaseDir, "111111111114"), { recursive: true }); + mkdirSync(join(aoBaseDir, "111111111114.migrated"), { recursive: true }); + + const inv = await inventoryV3({ aoBaseDir, skipTmux: true }); + + expect(inv.bareHashDirs).toEqual(["111111111114"]); + expect(inv.migratedDirs).toEqual(["111111111114.migrated"]); + }); + + it("flags numbered orchestrators", async () => { + const projectId = "agent-orchestrator_a1b2c3d4e5"; + mkdirSync(join(projectsDir, projectId, "sessions"), { recursive: true }); + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator-1", + kind: "orchestrator", + }); + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator-2", + kind: "orchestrator", + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/Users/x/repo", sessionPrefix: "ao" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + const project = inv.projects[0]; + expect(project.orchestratorVariants).toContain("ao-orchestrator-1"); + expect(project.orchestratorVariants).toContain("ao-orchestrator-2"); + expect(project.issues.filter((i) => i.kind === "numbered-orchestrator").length).toBe(2); + }); + + it("flags doubled-prefix tmux name in metadata", async () => { + const projectId = "agent-orchestrator_a1b2c3d4e5"; + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator", + tmuxName: "ao-ao-orchestrator", // doubled + kind: "orchestrator", + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/Users/x/repo", sessionPrefix: "ao" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + const project = inv.projects[0]; + expect(project.legacyTmuxNamesInMetadata).toBe(1); + expect(project.issues.some((i) => i.kind === "doubled-prefix-tmux")).toBe(true); + }); + + it("flags storageKey-prefixed tmux name in orchestrator.json", async () => { + const projectId = "agent-orchestrator"; + writeOrchestratorJson(projectsDir, projectId, { + sessionId: "ao-orchestrator", + tmuxName: "66c66786e971-agent-orchestrator-ao-orchestrator-8", + workspacePath: "/Users/x/.worktrees/agent-orchestrator/ao-orchestrator-8", + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/Users/x/repo", sessionPrefix: "ao", storageKey: "66c66786e971" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + const project = inv.projects[0]; + expect(project.liveOrchestratorTmuxName).toBe( + "66c66786e971-agent-orchestrator-ao-orchestrator-8", + ); + expect(project.issues.some((i) => i.kind === "legacy-tmux-in-metadata")).toBe(true); + expect(project.issues.some((i) => i.kind === "legacy-workspace-path")).toBe(true); + }); + + it("flags stranded worktrees in legacy ~/.worktrees/", async () => { + const projectId = "agent-orchestrator_a1b2c3d4e5"; + mkdirSync(join(projectsDir, projectId, "sessions"), { recursive: true }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/Users/x/repo", sessionPrefix: "ao" }, + }); + + // Set up legacy worktree tree + const legacyRoot = createTempDir(); + mkdirSync(join(legacyRoot, "agent-orchestrator", "ao-101"), { recursive: true }); + mkdirSync(join(legacyRoot, "agent-orchestrator", "ao-102"), { recursive: true }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + legacyWorktreeRoot: legacyRoot, + skipTmux: true, + }); + + expect(inv.strandedWorktrees).toHaveLength(2); + expect(inv.strandedWorktrees[0].candidateProjectId).toBe(projectId); + expect(inv.strandedWorktrees[0].candidateSessionId).toMatch(/^ao-10\d$/); + + rmSync(legacyRoot, { recursive: true, force: true }); + }); + + it("flags global config storageKey fields", async () => { + mkdirSync(join(projectsDir, "p1"), { recursive: true }); + mkdirSync(join(projectsDir, "p2_a1b2c3d4e5"), { recursive: true }); + + const configPath = writeGlobalConfig(aoBaseDir, { + p1: { path: "/x", sessionPrefix: "p1", storageKey: "aaaaaaaaaaaa" }, + p2_a1b2c3d4e5: { path: "/y", sessionPrefix: "p2", storageKey: "bbbbbbbbbbbb" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + const storageKeyIssues = inv.globalConfigIssues.filter( + (i) => i.kind === "storageKey-field-present", + ); + expect(storageKeyIssues).toHaveLength(2); + }); + + it("flags registry entries that have no on-disk project dir", async () => { + const configPath = writeGlobalConfig(aoBaseDir, { + "missing-on-disk": { path: "/x", sessionPrefix: "m" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + + const stranded = inv.globalConfigIssues.filter( + (i) => i.kind === "stranded-legacy-hash-dir", + ); + expect(stranded).toHaveLength(1); + expect(stranded[0].ref).toBe("missing-on-disk"); + }); +}); + +// --------------------------------------------------------------------------- +// planV3 +// --------------------------------------------------------------------------- + +describe("planV3", () => { + let aoBaseDir: string; + let projectsDir: string; + + beforeEach(() => { + aoBaseDir = createTempDir(); + projectsDir = join(aoBaseDir, "projects"); + mkdirSync(projectsDir, { recursive: true }); + }); + + afterEach(() => { + rmSync(aoBaseDir, { recursive: true, force: true }); + }); + + it("returns minimal plan for clean V2-only disk", async () => { + mkdirSync(join(projectsDir, "p1_a1b2c3d4e5", "sessions"), { recursive: true }); + + const configPath = writeGlobalConfig(aoBaseDir, { + p1_a1b2c3d4e5: { path: "/x", sessionPrefix: "p1" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + const plan = planV3(inv, "0.6.0"); + + expect(plan.totals.projectsToRekey).toBe(0); + expect(plan.totals.sessionsToRewrite).toBe(0); + expect(plan.totals.tmuxRenames).toBe(0); + expect(plan.totals.worktreeAdoptions).toBe(0); + expect(plan.totals.observabilityDirsToCollapse).toBe(0); + + // Always includes identity.json + counter steps + expect(plan.steps.find((s) => s.id === "write-identity-json")).toBeDefined(); + expect(plan.steps.find((s) => s.id === "reconcile-counter")).toBeDefined(); + expect(plan.steps.find((s) => s.id === "dead-export-manifest")).toBeDefined(); + + // Should NOT include conditional steps when there's nothing to do + expect(plan.steps.find((s) => s.id === "rekey-v1-entries")).toBeUndefined(); + expect(plan.steps.find((s) => s.id === "rename-tmux-sessions")).toBeUndefined(); + expect(plan.steps.find((s) => s.id === "collapse-observability")).toBeUndefined(); + }); + + it("emits rekey + same-repo merge steps for the user's actual disk shape", async () => { + // Simulate the user's real situation: V1 bare 'agent-orchestrator' + V2 hashed sibling + mkdirSync(join(projectsDir, "agent-orchestrator", "sessions"), { recursive: true }); + mkdirSync(join(projectsDir, "agent-orchestrator_168566536d", "sessions"), { + recursive: true, + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + "agent-orchestrator": { + path: "/Users/x/clones/clone-1/agent-orchestrator", + sessionPrefix: "ao", + storageKey: "7dc54da05c9e", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + "agent-orchestrator_168566536d": { + path: "/Users/x/clones/clone-2/agent-orchestrator", + sessionPrefix: "ao2", + repo: { + owner: "ComposioHQ", + name: "agent-orchestrator", + platform: "github", + originUrl: "https://github.com/composiohq/agent-orchestrator", + }, + }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + const plan = planV3(inv, "0.6.0"); + + // V1 entry needs re-key + expect(plan.totals.projectsToRekey).toBe(1); + const rekey = plan.steps.find((s) => s.id === "rekey-v1-entries"); + expect(rekey?.count).toBe(1); + expect(rekey?.details[0]).toMatch(/^agent-orchestrator → agent-orchestrator_[0-9a-f]{10}$/); + + // Same-repo merge surfaced + expect(plan.steps.find((s) => s.id === "same-repo-merge")).toBeDefined(); + expect(plan.warnings.some((w) => w.includes("same-repo duplicate"))).toBe(true); + + // storageKey strip step + expect(plan.totals.storageKeyFieldsToStrip).toBe(1); + expect(plan.steps.find((s) => s.id === "strip-storage-key")).toBeDefined(); + }); + + it("includes orchestrator-normalize step when numbered orchestrators present", async () => { + const projectId = "agent-orchestrator_a1b2c3d4e5"; + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator-1", + kind: "orchestrator", + }); + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator-2", + kind: "orchestrator", + }); + writeSessionJson(projectsDir, projectId, { + sessionId: "ao-orchestrator-3", + kind: "orchestrator", + }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/x", sessionPrefix: "ao" }, + }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + skipTmux: true, + }); + const plan = planV3(inv, "0.6.0"); + + expect(plan.totals.orchestratorsToNormalize).toBe(3); + expect(plan.steps.find((s) => s.id === "normalize-orchestrators")).toBeDefined(); + }); + + it("includes observability-collapse step when leak present", async () => { + writeObservabilityDir(aoBaseDir, "0149ff87f4a5"); + writeObservabilityDir(aoBaseDir, "03706227e15e"); + + const inv = await inventoryV3({ aoBaseDir, skipTmux: true }); + const plan = planV3(inv, "0.6.0"); + + expect(plan.totals.observabilityDirsToCollapse).toBe(2); + const step = plan.steps.find((s) => s.id === "collapse-observability"); + expect(step).toBeDefined(); + expect(step?.count).toBe(2); + }); + + it("includes adopt-worktrees step when stranded worktrees present", async () => { + const projectId = "agent-orchestrator_a1b2c3d4e5"; + mkdirSync(join(projectsDir, projectId, "sessions"), { recursive: true }); + + const configPath = writeGlobalConfig(aoBaseDir, { + [projectId]: { path: "/x", sessionPrefix: "ao" }, + }); + + const legacyRoot = createTempDir(); + mkdirSync(join(legacyRoot, "agent-orchestrator", "ao-101"), { recursive: true }); + + const inv = await inventoryV3({ + aoBaseDir, + globalConfigPath: configPath, + legacyWorktreeRoot: legacyRoot, + skipTmux: true, + }); + const plan = planV3(inv, "0.6.0"); + + expect(plan.totals.worktreeAdoptions).toBe(1); + expect(plan.steps.find((s) => s.id === "adopt-stranded-worktrees")).toBeDefined(); + + rmSync(legacyRoot, { recursive: true, force: true }); + }); + + it("schemaVersion + aoVersion present on the plan", async () => { + const inv = await inventoryV3({ aoBaseDir, skipTmux: true }); + const plan = planV3(inv, "0.6.0"); + + expect(plan.schemaVersion).toBe(3); + expect(plan.aoVersion).toBe("0.6.0"); + expect(plan.generatedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(plan.inventory.schemaVersion).toBe(3); + }); +}); diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 0c39e7a13e..f6f4d350e5 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -360,6 +360,7 @@ export { } from "./portfolio-routing.js"; // Storage V2 migration — one-time converter from hash-based to projectId-based layout +// (Internal helpers; the V1→V3 path is now driven by ao migrate.) export { migrateStorage, rollbackStorage, @@ -373,6 +374,21 @@ export type { HashDirEntry, } from "./migration/storage-v2.js"; +// Storage V3 migration — inventory + plan generator (dry-run only in v0.6.0) +export { inventoryV3, planV3, formatBytes } from "./migration/v3.js"; +export type { + V3Inventory, + V3Plan, + V3Step, + V3Issue, + V3IssueKind, + V3ProjectInventory, + V3StrandedWorktree, + V3LiveTmuxSession, + V3DuplicateRepo, + InventoryOptions as V3InventoryOptions, +} from "./migration/v3.js"; + export { atomicWriteFileSync } from "./atomic-write.js"; // Activity event logging — structured diagnostic event trail diff --git a/packages/core/src/migration/v3.ts b/packages/core/src/migration/v3.ts new file mode 100644 index 0000000000..b0015b8345 --- /dev/null +++ b/packages/core/src/migration/v3.ts @@ -0,0 +1,1033 @@ +/** + * Storage V3 inventory + plan generator. + * + * Pure functions over the AO base directory. NO file system writes. + * The CLI command (`ao migrate`) consumes this output to render the human plan + * and the JSON record. Execution is gated until v0.6.1. + * + * Design context: + * - V3 keeps the V2 `{basename}_{hash10}` projectId format but applies it + * uniformly. V1 bare-basename entries get re-keyed; V2 entries pass through. + * - Identifies leaks (observability dirs, stranded worktrees) and metadata + * drift (doubled-prefix tmux names, legacy storageKey-prefixed names, + * numbered orchestrators) so the dry-run output can be reviewed before any + * execution lands. + * - Reuses inventoryHashDirs from storage-v2.ts for V1 detection so we keep + * the V1→V3 path in one PR (rather than V1→V2→V3 separately). + */ + +import { + existsSync, + readFileSync, + readdirSync, + statSync, + type Stats, +} from "node:fs"; +import { join } from "node:path"; +import { parse as parseYaml } from "yaml"; + +import { generateExternalId } from "../global-config.js"; +import { + detectActiveSessions, + inventoryHashDirs, + type HashDirEntry, +} from "./storage-v2.js"; + +// --------------------------------------------------------------------------- +// Public types +// --------------------------------------------------------------------------- + +export type V3IssueKind = + | "v1-bare-basename" + | "storageKey-field-present" + | "doubled-prefix-tmux" + | "legacy-tmux-in-metadata" + | "legacy-workspace-path" + | "numbered-orchestrator" + | "stranded-worktree" + | "duplicate-repo" + | "observability-leak" + | "stranded-legacy-hash-dir"; + +export interface V3Issue { + kind: V3IssueKind; + detail: string; + ref?: string; +} + +export interface V3ProjectInventory { + projectId: string; + layout: "v1-bare" | "v2-hashed"; + rekeyTo: string | null; // proposed V3 id (null if already V2) + path: string | null; + realpath: string | null; + originUrl: string | null; + sessionPrefix: string | null; + storageKeyField: string | null; + sessionsCount: number; + archiveCount: number; + worktreesCount: number; + orchestratorVariants: string[]; + liveOrchestratorTmuxName: string | null; + legacyTmuxNamesInMetadata: number; + legacyWorkspacePathsInMetadata: number; + bytes: number; + issues: V3Issue[]; +} + +export interface V3StrandedWorktree { + path: string; + branch: string | null; + candidateProjectId: string | null; + candidateSessionId: string | null; +} + +export interface V3LiveTmuxSession { + name: string; + convention: "v3" | "doubled-prefix" | "legacy-storagekey" | "unknown"; +} + +export interface V3DuplicateRepo { + originUrl: string; + projectIds: string[]; +} + +export interface V3Inventory { + schemaVersion: 3; + scannedAt: string; + aoBaseDir: string; + totals: { + bytes: number; + sessions: number; + worktrees: number; + observabilityDirs: number; + }; + projects: V3ProjectInventory[]; + observability: { + rootLevelDirCount: number; + bytes: number; + oldestModifiedAt: string | null; + }; + strandedWorktrees: V3StrandedWorktree[]; + bareHashDirs: string[]; + migratedDirs: string[]; + liveTmuxSessions: V3LiveTmuxSession[]; + duplicateRepos: V3DuplicateRepo[]; + v1HashDirs: HashDirEntry[]; + globalConfigIssues: V3Issue[]; +} + +export interface V3Step { + order: number; + id: string; + title: string; + description: string; + count: number; + details: string[]; +} + +export interface V3Plan { + schemaVersion: 3; + generatedAt: string; + aoVersion: string; + inventory: V3Inventory; + steps: V3Step[]; + totals: { + projectsToRekey: number; + sessionsToRewrite: number; + tmuxRenames: number; + worktreeAdoptions: number; + orchestratorsToNormalize: number; + observabilityDirsToCollapse: number; + bareHashDirsToRemove: number; + storageKeyFieldsToStrip: number; + estimatedBytesFreed: number; + }; + warnings: string[]; + blockers: string[]; +} + +export interface InventoryOptions { + /** Base directory to scan. Defaults to `~/.agent-orchestrator`. */ + aoBaseDir: string; + /** Path to global config (`config.yaml`). */ + globalConfigPath?: string; + /** Optional override for the legacy worktree root (`~/.worktrees`). */ + legacyWorktreeRoot?: string; + /** Skip tmux probe (faster + offline tests). */ + skipTmux?: boolean; +} + +// --------------------------------------------------------------------------- +// Constants +// --------------------------------------------------------------------------- + +/** V2 tmux name pattern: {prefix}-{N}, {prefix}-orchestrator, {prefix}-orchestrator-{N}. */ +const V3_TMUX_PATTERN = /^[a-z0-9][a-zA-Z0-9_-]*(?:-\d+|-orchestrator(?:-\d+)?)$/; + +/** Legacy storageKey-prefixed tmux: {12-hex}-... */ +const LEGACY_STORAGEKEY_TMUX = /^[0-9a-f]{12}-/; + +/** Doubled prefix: {prefix}-{prefix}-orchestrator (the ao-ao-orchestrator bug). */ +function isDoubledPrefix(name: string, knownPrefixes: Set): boolean { + for (const prefix of knownPrefixes) { + if (name === `${prefix}-${prefix}-orchestrator`) return true; + if (name.startsWith(`${prefix}-${prefix}-`)) return true; + } + return false; +} + +// --------------------------------------------------------------------------- +// Inventory +// --------------------------------------------------------------------------- + +export async function inventoryV3(opts: InventoryOptions): Promise { + const { aoBaseDir, globalConfigPath, legacyWorktreeRoot, skipTmux } = opts; + const scannedAt = new Date().toISOString(); + + if (!existsSync(aoBaseDir)) { + return emptyInventory(aoBaseDir, scannedAt); + } + + const globalConfig = readGlobalConfigRaw(globalConfigPath); + const projectsRoot = join(aoBaseDir, "projects"); + + // Walk projects/{id}/ + const projects: V3ProjectInventory[] = []; + const knownPrefixes = new Set(); + + if (existsSync(projectsRoot) && statSync(projectsRoot).isDirectory()) { + for (const projectId of readdirSync(projectsRoot)) { + if (projectId.startsWith(".")) continue; + const projectDir = join(projectsRoot, projectId); + let projectStat: Stats; + try { + projectStat = statSync(projectDir); + } catch { + continue; + } + if (!projectStat.isDirectory()) continue; + + const inv = inventoryProject(projectId, projectDir, globalConfig); + projects.push(inv); + if (inv.sessionPrefix) knownPrefixes.add(inv.sessionPrefix); + } + } + + // Observability leak + const observability = inventoryObservabilityDirs(aoBaseDir); + + // Bare hash dirs + .migrated + const bareHashDirs: string[] = []; + const migratedDirs: string[] = []; + for (const name of readdirSync(aoBaseDir)) { + if (/^[0-9a-f]{12}$/.test(name)) bareHashDirs.push(name); + if (/\.migrated$/.test(name)) migratedDirs.push(name); + } + + // Stranded worktrees in ~/.worktrees/ + const strandedWorktrees = inventoryStrandedWorktrees(legacyWorktreeRoot, projects); + + // Live tmux sessions + const liveTmuxSessions = skipTmux + ? [] + : await inventoryLiveTmuxSessions(knownPrefixes); + + // Duplicate repos by originUrl + const duplicateRepos = inventoryDuplicateRepos(projects); + + // V1 hash dirs (for V1→V3 in one pass) + const v1HashDirs = inventoryHashDirs(aoBaseDir, globalConfigPath); + + // Global config issues + const globalConfigIssues = inventoryGlobalConfigIssues(globalConfig, projects); + + // Totals + const totals = { + bytes: + projects.reduce((sum, p) => sum + p.bytes, 0) + + observability.bytes, + sessions: projects.reduce((sum, p) => sum + p.sessionsCount, 0), + worktrees: projects.reduce((sum, p) => sum + p.worktreesCount, 0), + observabilityDirs: observability.rootLevelDirCount, + }; + + return { + schemaVersion: 3, + scannedAt, + aoBaseDir, + totals, + projects, + observability, + strandedWorktrees, + bareHashDirs, + migratedDirs, + liveTmuxSessions, + duplicateRepos, + v1HashDirs, + globalConfigIssues, + }; +} + +function emptyInventory(aoBaseDir: string, scannedAt: string): V3Inventory { + return { + schemaVersion: 3, + scannedAt, + aoBaseDir, + totals: { bytes: 0, sessions: 0, worktrees: 0, observabilityDirs: 0 }, + projects: [], + observability: { rootLevelDirCount: 0, bytes: 0, oldestModifiedAt: null }, + strandedWorktrees: [], + bareHashDirs: [], + migratedDirs: [], + liveTmuxSessions: [], + duplicateRepos: [], + v1HashDirs: [], + globalConfigIssues: [], + }; +} + +// --------------------------------------------------------------------------- +// Per-project inventory +// --------------------------------------------------------------------------- + +interface RawGlobalConfig { + projects: Record>; +} + +function readGlobalConfigRaw(globalConfigPath?: string): RawGlobalConfig { + if (!globalConfigPath || !existsSync(globalConfigPath)) { + return { projects: {} }; + } + try { + const text = readFileSync(globalConfigPath, "utf-8"); + const parsed = parseYaml(text) as Record | null; + const projects = + parsed && typeof parsed["projects"] === "object" && parsed["projects"] + ? (parsed["projects"] as Record>) + : {}; + return { projects }; + } catch { + return { projects: {} }; + } +} + +function isV2HashedId(projectId: string): boolean { + // V2 format: {sanitized basename, max 30}_{10 hex} + return /^[a-z0-9][a-z0-9_-]{0,29}_[0-9a-f]{10}$/.test(projectId); +} + +function inventoryProject( + projectId: string, + projectDir: string, + globalConfig: RawGlobalConfig, +): V3ProjectInventory { + const issues: V3Issue[] = []; + const layout: "v1-bare" | "v2-hashed" = isV2HashedId(projectId) + ? "v2-hashed" + : "v1-bare"; + + const registryEntry = globalConfig.projects[projectId]; + const path = typeof registryEntry?.["path"] === "string" ? registryEntry["path"] : null; + const realpath = path; // resolving here would do FS work; keep raw for inventory + const repo = registryEntry?.["repo"] as Record | undefined; + const originUrl = typeof repo?.["originUrl"] === "string" ? repo["originUrl"] : null; + const sessionPrefix = + typeof registryEntry?.["sessionPrefix"] === "string" + ? registryEntry["sessionPrefix"] + : null; + const storageKeyField = + typeof registryEntry?.["storageKey"] === "string" ? registryEntry["storageKey"] : null; + + // Re-key target (for V1 → V3); null if already V2 + let rekeyTo: string | null = null; + if (layout === "v1-bare") { + if (path) { + rekeyTo = generateExternalId(path, originUrl); + } else { + // Without a path we can't compute; flag it + rekeyTo = null; + } + issues.push({ + kind: "v1-bare-basename", + detail: `Project "${projectId}" uses bare-basename layout; would re-key to "${rekeyTo ?? "(unable: no path)"}".`, + ref: projectId, + }); + } + + if (storageKeyField) { + issues.push({ + kind: "storageKey-field-present", + detail: `Project "${projectId}" still has the legacy storageKey field "${storageKeyField}" in config.yaml.`, + ref: projectId, + }); + } + + // Walk sessions/ + const sessionsDir = join(projectDir, "sessions"); + let sessionsCount = 0; + let archiveCount = 0; + const orchestratorVariants: string[] = []; + let legacyTmuxNamesInMetadata = 0; + let legacyWorkspacePathsInMetadata = 0; + + if (existsSync(sessionsDir)) { + for (const entry of readdirSync(sessionsDir)) { + if (entry === "archive") { + archiveCount = countNonHidden(join(sessionsDir, "archive")); + continue; + } + if (entry.startsWith(".")) continue; + const sessionPath = join(sessionsDir, entry); + let stat: Stats; + try { + stat = statSync(sessionPath); + } catch { + continue; + } + if (!stat.isFile()) continue; + + sessionsCount += 1; + + const sessionMeta = readSessionMeta(sessionPath); + const sessionId = sessionMeta?.["sessionId"] ?? entry.replace(/\.json$/, ""); + + // Numbered orchestrator detection: matches {prefix}-orchestrator-{N} + if (typeof sessionId === "string" && /-orchestrator-\d+$/.test(sessionId)) { + orchestratorVariants.push(sessionId); + issues.push({ + kind: "numbered-orchestrator", + detail: `Session "${sessionId}" uses a numbered orchestrator suffix; should normalize to "${sessionPrefix ?? ""}-orchestrator".`, + ref: sessionPath, + }); + } else if ( + typeof sessionId === "string" && + sessionPrefix && + sessionId === `${sessionPrefix}-orchestrator` + ) { + orchestratorVariants.push(sessionId); + } + + // Legacy tmuxName / workspacePath checks + const tmuxName = sessionMeta?.["tmuxName"]; + if (typeof tmuxName === "string") { + if (LEGACY_STORAGEKEY_TMUX.test(tmuxName)) { + legacyTmuxNamesInMetadata += 1; + issues.push({ + kind: "legacy-tmux-in-metadata", + detail: `Session JSON has legacy storageKey-prefixed tmuxName "${tmuxName}".`, + ref: sessionPath, + }); + } else if ( + sessionPrefix && + tmuxName.startsWith(`${sessionPrefix}-${sessionPrefix}-`) + ) { + legacyTmuxNamesInMetadata += 1; + issues.push({ + kind: "doubled-prefix-tmux", + detail: `Session JSON has doubled-prefix tmuxName "${tmuxName}".`, + ref: sessionPath, + }); + } + } + + const workspacePath = sessionMeta?.["workspacePath"]; + if (typeof workspacePath === "string" && workspacePath.includes("/.worktrees/")) { + legacyWorkspacePathsInMetadata += 1; + issues.push({ + kind: "legacy-workspace-path", + detail: `Session JSON workspacePath points at legacy ~/.worktrees/ tree: "${workspacePath}".`, + ref: sessionPath, + }); + } + } + } + + // orchestrator.json (singleton, alongside sessions/) + const orchestratorJson = join(projectDir, "orchestrator.json"); + let liveOrchestratorTmuxName: string | null = null; + if (existsSync(orchestratorJson)) { + const meta = readSessionMeta(orchestratorJson); + const tmuxName = meta?.["tmuxName"]; + if (typeof tmuxName === "string") { + liveOrchestratorTmuxName = tmuxName; + if (LEGACY_STORAGEKEY_TMUX.test(tmuxName)) { + legacyTmuxNamesInMetadata += 1; + issues.push({ + kind: "legacy-tmux-in-metadata", + detail: `orchestrator.json has legacy storageKey-prefixed tmuxName "${tmuxName}".`, + ref: orchestratorJson, + }); + } + if ( + sessionPrefix && + tmuxName.startsWith(`${sessionPrefix}-${sessionPrefix}-`) + ) { + issues.push({ + kind: "doubled-prefix-tmux", + detail: `orchestrator.json has doubled-prefix tmuxName "${tmuxName}".`, + ref: orchestratorJson, + }); + } + } + const ws = meta?.["runtimeHandle"]; + if ( + ws && + typeof ws === "object" && + typeof (ws as Record)["data"] === "object" + ) { + const wsPath = ( + (ws as Record)["data"] as Record + )["workspacePath"]; + if (typeof wsPath === "string" && wsPath.includes("/.worktrees/")) { + legacyWorkspacePathsInMetadata += 1; + issues.push({ + kind: "legacy-workspace-path", + detail: `orchestrator.json runtimeHandle.data.workspacePath points at legacy ~/.worktrees/ tree: "${wsPath}".`, + ref: orchestratorJson, + }); + } + } + } + + // Worktrees + const worktreesDir = join(projectDir, "worktrees"); + let worktreesCount = 0; + if (existsSync(worktreesDir)) { + for (const entry of readdirSync(worktreesDir)) { + if (entry.startsWith(".")) continue; + try { + if (statSync(join(worktreesDir, entry)).isDirectory()) worktreesCount += 1; + } catch { + // ignore + } + } + } + + const bytes = directoryBytes(projectDir); + + return { + projectId, + layout, + rekeyTo, + path, + realpath, + originUrl, + sessionPrefix, + storageKeyField, + sessionsCount, + archiveCount, + worktreesCount, + orchestratorVariants, + liveOrchestratorTmuxName, + legacyTmuxNamesInMetadata, + legacyWorkspacePathsInMetadata, + bytes, + issues, + }; +} + +function readSessionMeta(filePath: string): Record | null { + try { + const content = readFileSync(filePath, "utf-8").trim(); + if (!content) return null; + if (content.startsWith("{")) { + return JSON.parse(content) as Record; + } + return null; + } catch { + return null; + } +} + +function countNonHidden(dir: string): number { + if (!existsSync(dir)) return 0; + try { + return readdirSync(dir).filter((n) => !n.startsWith(".")).length; + } catch { + return 0; + } +} + +function directoryBytes(dir: string): number { + let total = 0; + try { + for (const entry of readdirSync(dir)) { + const p = join(dir, entry); + let s: Stats; + try { + s = statSync(p); + } catch { + continue; + } + if (s.isDirectory()) { + total += directoryBytes(p); + } else if (s.isFile()) { + total += s.size; + } + } + } catch { + // ignore unreadable + } + return total; +} + +// --------------------------------------------------------------------------- +// Observability leak inventory +// --------------------------------------------------------------------------- + +function inventoryObservabilityDirs(aoBaseDir: string): { + rootLevelDirCount: number; + bytes: number; + oldestModifiedAt: string | null; +} { + let count = 0; + let bytes = 0; + let oldest: number | null = null; + + for (const name of readdirSync(aoBaseDir)) { + if (!name.endsWith("-observability")) continue; + const p = join(aoBaseDir, name); + let s: Stats; + try { + s = statSync(p); + } catch { + continue; + } + if (!s.isDirectory()) continue; + count += 1; + bytes += directoryBytes(p); + const mtime = s.mtimeMs; + if (oldest === null || mtime < oldest) oldest = mtime; + } + + return { + rootLevelDirCount: count, + bytes, + oldestModifiedAt: oldest === null ? null : new Date(oldest).toISOString(), + }; +} + +// --------------------------------------------------------------------------- +// Stranded worktrees +// --------------------------------------------------------------------------- + +function inventoryStrandedWorktrees( + legacyWorktreeRoot: string | undefined, + projects: V3ProjectInventory[], +): V3StrandedWorktree[] { + if (!legacyWorktreeRoot || !existsSync(legacyWorktreeRoot)) return []; + const out: V3StrandedWorktree[] = []; + + for (const projectName of readdirSync(legacyWorktreeRoot)) { + const projectDir = join(legacyWorktreeRoot, projectName); + let s: Stats; + try { + s = statSync(projectDir); + } catch { + continue; + } + if (!s.isDirectory()) continue; + + for (const wtName of readdirSync(projectDir)) { + const wtPath = join(projectDir, wtName); + try { + if (!statSync(wtPath).isDirectory()) continue; + } catch { + continue; + } + + // Try to resolve a candidate project + session by sessionPrefix and worktree name. + const candidate = projects.find( + (p) => + p.sessionPrefix !== null && + (wtName.startsWith(`${p.sessionPrefix}-`) || + wtName === `${p.sessionPrefix}-orchestrator`), + ); + + out.push({ + path: wtPath, + branch: null, // could read via `git -C wtPath branch --show-current`; defer + candidateProjectId: candidate?.projectId ?? null, + candidateSessionId: candidate ? wtName : null, + }); + } + } + + return out; +} + +// --------------------------------------------------------------------------- +// Live tmux sessions +// --------------------------------------------------------------------------- + +async function inventoryLiveTmuxSessions( + knownPrefixes: Set, +): Promise { + let sessionNames: string[]; + try { + sessionNames = await detectActiveSessions(Array.from(knownPrefixes)); + } catch { + sessionNames = []; + } + + return sessionNames.map((name) => ({ + name, + convention: classifyTmuxName(name, knownPrefixes), + })); +} + +function classifyTmuxName( + name: string, + knownPrefixes: Set, +): V3LiveTmuxSession["convention"] { + if (LEGACY_STORAGEKEY_TMUX.test(name)) return "legacy-storagekey"; + if (isDoubledPrefix(name, knownPrefixes)) return "doubled-prefix"; + if (V3_TMUX_PATTERN.test(name)) return "v3"; + return "unknown"; +} + +// --------------------------------------------------------------------------- +// Duplicate repos +// --------------------------------------------------------------------------- + +function inventoryDuplicateRepos(projects: V3ProjectInventory[]): V3DuplicateRepo[] { + const byOrigin = new Map(); + for (const p of projects) { + if (!p.originUrl) continue; + const ids = byOrigin.get(p.originUrl) ?? []; + ids.push(p.projectId); + byOrigin.set(p.originUrl, ids); + } + const out: V3DuplicateRepo[] = []; + for (const [originUrl, projectIds] of byOrigin) { + if (projectIds.length > 1) { + out.push({ originUrl, projectIds }); + } + } + return out; +} + +// --------------------------------------------------------------------------- +// Global config issues +// --------------------------------------------------------------------------- + +function inventoryGlobalConfigIssues( + globalConfig: RawGlobalConfig, + projects: V3ProjectInventory[], +): V3Issue[] { + const issues: V3Issue[] = []; + + // Per-project storageKey strip + for (const [pid, entry] of Object.entries(globalConfig.projects)) { + if (typeof entry["storageKey"] === "string") { + issues.push({ + kind: "storageKey-field-present", + detail: `Global config project "${pid}" still has storageKey="${entry["storageKey"]}".`, + ref: pid, + }); + } + } + + // Project IDs in registry but not on disk + const onDisk = new Set(projects.map((p) => p.projectId)); + for (const pid of Object.keys(globalConfig.projects)) { + if (!onDisk.has(pid)) { + issues.push({ + kind: "stranded-legacy-hash-dir", + detail: `Project "${pid}" is in config.yaml but no projects/${pid}/ directory exists.`, + ref: pid, + }); + } + } + + return issues; +} + +// --------------------------------------------------------------------------- +// Plan generation +// --------------------------------------------------------------------------- + +export function planV3(inventory: V3Inventory, aoVersion: string): V3Plan { + const generatedAt = new Date().toISOString(); + const steps: V3Step[] = []; + + // Step 1: Re-key V1 entries + const v1Projects = inventory.projects.filter((p) => p.layout === "v1-bare"); + if (v1Projects.length > 0) { + steps.push({ + order: 1, + id: "rekey-v1-entries", + title: "Re-key V1 bare-basename projects to V2 format", + description: + "Compute generateExternalId(realpath, originUrl) for each V1 project; rename projects/{old} → projects/{new}; update config.yaml registry key.", + count: v1Projects.length, + details: v1Projects.map( + (p) => `${p.projectId} → ${p.rekeyTo ?? "(unable: missing path)"}`, + ), + }); + } + + // Step 2: Same-repo merge prompt + if (inventory.duplicateRepos.length > 0) { + steps.push({ + order: 2, + id: "same-repo-merge", + title: "Detect same-repo dual registrations", + description: + "Projects sharing the same originUrl are candidates for merging. User confirms; default keeps both.", + count: inventory.duplicateRepos.length, + details: inventory.duplicateRepos.map( + (d) => `${d.originUrl}: ${d.projectIds.join(" + ")}`, + ), + }); + } + + // Step 3: Path renames (covered by step 1 for V1 entries) + + // Step 4: Write identity.json (per project) + steps.push({ + order: 4, + id: "write-identity-json", + title: "Write identity.json into each project directory", + description: + "Per project: write projects/{id}/identity.json with displayName, originUrl, path, realpath, sessionPrefix, repo, defaultBranch, schemaVersion: 3.", + count: inventory.projects.length, + details: inventory.projects.map((p) => `projects/${p.rekeyTo ?? p.projectId}/identity.json`), + }); + + // Step 5: Reconcile session counter + steps.push({ + order: 5, + id: "reconcile-counter", + title: "Write .next-session-id.json per project", + description: + "Scan sessions/, find max(N) per prefix, advance counter, write .next-session-id.json. The remote scan stays as a reconciler-only fallback.", + count: inventory.projects.length, + details: inventory.projects.map( + (p) => `projects/${p.rekeyTo ?? p.projectId}/.next-session-id.json`, + ), + }); + + // Step 6: Rewrite session JSONs (legacy tmux + workspace paths) + const sessionsToRewrite = + inventory.projects.reduce( + (sum, p) => sum + p.legacyTmuxNamesInMetadata + p.legacyWorkspacePathsInMetadata, + 0, + ); + if (sessionsToRewrite > 0) { + steps.push({ + order: 6, + id: "rewrite-session-metadata", + title: "Rewrite session JSONs with stale tmuxName / workspacePath", + description: + "For every session whose tmuxName uses legacy storageKey prefix, doubled prefix, or whose workspacePath points at ~/.worktrees/, rewrite to V3 format. tmuxName ≡ sessionId.", + count: sessionsToRewrite, + details: inventory.projects + .filter( + (p) => p.legacyTmuxNamesInMetadata + p.legacyWorkspacePathsInMetadata > 0, + ) + .map( + (p) => + `${p.projectId}: ${p.legacyTmuxNamesInMetadata} legacy tmux + ${p.legacyWorkspacePathsInMetadata} legacy paths`, + ), + }); + } + + // Step 7: Tmux session renames (live) + const tmuxRenames = inventory.liveTmuxSessions.filter( + (t) => t.convention === "doubled-prefix" || t.convention === "legacy-storagekey", + ); + if (tmuxRenames.length > 0) { + steps.push({ + order: 7, + id: "rename-tmux-sessions", + title: "Rename live tmux sessions to V3 names", + description: + "tmux rename-session for each non-V3 live session. Failure to rename = warn + continue (user can re-attach manually).", + count: tmuxRenames.length, + details: tmuxRenames.map((t) => `${t.name} (${t.convention})`), + }); + } + + // Step 8: Adopt stranded worktrees + if (inventory.strandedWorktrees.length > 0) { + steps.push({ + order: 8, + id: "adopt-stranded-worktrees", + title: "Adopt stranded ~/.worktrees/ leaves into projects/{id}/worktrees/", + description: + "For each leaf in ~/.worktrees/{name}/{sid}: find session JSON whose branch matches; mv into projects/{id}/worktrees/{sid}; rewrite workspacePath; run git worktree repair.", + count: inventory.strandedWorktrees.length, + details: inventory.strandedWorktrees.map( + (w) => + `${w.path} → ${ + w.candidateProjectId + ? `projects/${w.candidateProjectId}/worktrees/${w.candidateSessionId ?? "?"}` + : "(no candidate match — adopt with explicit flag)" + }`, + ), + }); + } + + // Step 8b: Normalize numbered orchestrators + const projectsWithNumberedOrchestrator = inventory.projects.filter( + (p) => p.orchestratorVariants.some((v) => /-orchestrator-\d+$/.test(v)), + ); + if (projectsWithNumberedOrchestrator.length > 0) { + const totalNumbered = projectsWithNumberedOrchestrator.reduce( + (sum, p) => + sum + p.orchestratorVariants.filter((v) => /-orchestrator-\d+$/.test(v)).length, + 0, + ); + steps.push({ + order: 9, + id: "normalize-orchestrators", + title: "Normalize numbered orchestrators to one-per-project", + description: + "For each project with multiple {prefix}-orchestrator-N entries, pick the most recent live one as canonical {prefix}-orchestrator and archive the rest. Detection regex tightens to ^{prefix}-orchestrator$.", + count: totalNumbered, + details: projectsWithNumberedOrchestrator.map( + (p) => + `${p.projectId}: ${p.orchestratorVariants.filter((v) => /-orchestrator-\d+$/.test(v)).length} numbered variants`, + ), + }); + } + + // Step 9: Collapse observability + if (inventory.observability.rootLevelDirCount > 0) { + steps.push({ + order: 10, + id: "collapse-observability", + title: "Collapse root-level *-observability dirs into projects/{id}/observability/", + description: + "Read each obs JSON's projectId field and route into the matching project; unattributable files go to ~/.agent-orchestrator/observability/orphan/. Remove emptied {hash}-observability dirs.", + count: inventory.observability.rootLevelDirCount, + details: [ + `${inventory.observability.rootLevelDirCount} dirs (~${formatBytes( + inventory.observability.bytes, + )} of obs data)`, + ], + }); + } + + // Step 10: Strip storageKey + const storageKeyFieldsToStrip = inventory.globalConfigIssues.filter( + (i) => i.kind === "storageKey-field-present", + ).length; + if (storageKeyFieldsToStrip > 0) { + steps.push({ + order: 11, + id: "strip-storage-key", + title: "Strip storageKey field from config.yaml entries", + description: + "Walk config.yaml, remove storageKey from every project entry. Bump global schemaVersion to 3.", + count: storageKeyFieldsToStrip, + details: inventory.globalConfigIssues + .filter((i) => i.kind === "storageKey-field-present") + .map((i) => i.ref ?? "") + .filter((r) => r), + }); + } + + // Step 11: GC bare hash + .migrated dirs + if (inventory.bareHashDirs.length + inventory.migratedDirs.length > 0) { + steps.push({ + order: 12, + id: "gc-stranded-dirs", + title: "GC bare hash and .migrated directories", + description: + "Delete bare {12-hex}/ and {12-hex}.migrated/ at root after safety check (must be empty or completed-migration markers).", + count: inventory.bareHashDirs.length + inventory.migratedDirs.length, + details: [...inventory.bareHashDirs, ...inventory.migratedDirs], + }); + } + + // Step 12: Dead-export manifest + steps.push({ + order: 13, + id: "dead-export-manifest", + title: "Write dead-export manifest for follow-up deletion PR", + description: + "Emit migrations/v3-{ts}.dead-exports.txt listing exports from @aoagents/ao-core with zero non-test callers. Migrator does NOT delete code.", + count: 12, + details: [ + "deriveStorageKey", + "generateTmuxName", + "parseTmuxName", + "getProjectBaseDir", + "getSessionsDir", + "getWorktreesDir", + "getFeedbackReportsDir", + "getArchiveDir", + "getOriginFilePath", + "validateAndStoreOrigin", + "requireStorageKey", + "generateConfigHash", + ], + }); + + // Totals + const totals = { + projectsToRekey: v1Projects.length, + sessionsToRewrite, + tmuxRenames: tmuxRenames.length, + worktreeAdoptions: inventory.strandedWorktrees.length, + orchestratorsToNormalize: projectsWithNumberedOrchestrator.reduce( + (sum, p) => + sum + p.orchestratorVariants.filter((v) => /-orchestrator-\d+$/.test(v)).length, + 0, + ), + observabilityDirsToCollapse: inventory.observability.rootLevelDirCount, + bareHashDirsToRemove: inventory.bareHashDirs.length, + storageKeyFieldsToStrip, + estimatedBytesFreed: + inventory.observability.bytes + + inventory.bareHashDirs.length * 1024 + + inventory.migratedDirs.length * 1024, + }; + + // Warnings + blockers (informational; --execute is gated regardless) + const warnings: string[] = []; + const blockers: string[] = []; + + if (inventory.liveTmuxSessions.length > 0) { + warnings.push( + `${inventory.liveTmuxSessions.length} live tmux session(s) detected. Execution would refuse without --force.`, + ); + } + if (inventory.duplicateRepos.length > 0) { + warnings.push( + `${inventory.duplicateRepos.length} same-repo duplicate(s) detected. Execution would prompt; default merges.`, + ); + } + if (inventory.v1HashDirs.length > 0 && v1Projects.length === 0) { + warnings.push( + `${inventory.v1HashDirs.length} legacy hash directory layout(s) detected outside the registry. Manual review recommended.`, + ); + } + + return { + schemaVersion: 3, + generatedAt, + aoVersion, + inventory, + steps, + totals, + warnings, + blockers, + }; +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function formatBytes(bytes: number): string { + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`; +} + +export { formatBytes };