diff --git a/README.md b/README.md index c339c97..073283f 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,7 @@ Analysis.use(startAnalysis, { token: process.env.T_ANALYSIS_TOKEN }); ``` -If you want to use the Debugger with -D, make sure you have a **.swcrc** file with sourceMaps activated. This repository contains a .swcrc.example file if you prefer to just copy to your folder. +`tagoio run` executes your TypeScript file directly using Node's native experimental-transform-types runtime, so no build step or loader configuration is required. The `-d` flag forwards to Node so you can attach a debugger. ## Working with Environments @@ -198,6 +198,37 @@ Having a `tagoconfig.json` file is essential for executing several commands, suc ``` +## Using in CI/CD Pipelines + +Deploy every analysis from `tagoconfig.json` directly from a GitHub Actions workflow — no need to maintain a custom deploy script per project: + +```yaml +- name: Install TagoIO CLI and builder + run: npm install -g @tago-io/cli @tago-io/builder + +- name: Deploy analyses to TagoIO + run: tagoio deploy --all --env production -t ${{ secrets.TAGOIO_TOKEN }} --silent +``` + +The flag combination: +- `--all` — deploys every analysis registered in `tagoconfig.json` without any interactive prompt +- `--env, --environment` — picks the environment block from `tagoconfig.json` +- `-t, --token` — a TagoIO token. Accepts either a **profile token** or an **external-analysis token** (see permissions below). Bypasses the local `.tago-lock` file, which doesn't exist in CI runners +- `--silent` — skips confirmation prompts + +Together, the command runs fully non-interactively — suitable for any CI/CD system. No call to `tagoio init` or `tagoio login` is required before deploy, but your repository **must** include a pre-configured `tagoconfig.json` mapping each analysis file to its analysis ID. + +### Required permissions when using an external-analysis token + +Profile tokens always have full access and need no extra setup. If you'd rather use a scoped external-analysis token (recommended for least-privilege CI pipelines), create an Access Management rule in TagoIO with the **Analysis** resource type and the following permissions enabled: + +- **Access Analysis** +- **Edit Analysis** +- **Upload Analysis Script** + +Attach that rule to the token you pass via `-t, --token`. Without these permissions, `tagoio deploy` will fail with an Authorization Denied error from the TagoIO API. + + ## License TagoIO SDK for JavaScript in the browser and Node.js is released under the [Apache-2.0 License](https://github.com/tagoio-cli/blob/master/LICENSE.md). diff --git a/package-lock.json b/package-lock.json index 39a657a..83667ed 100644 --- a/package-lock.json +++ b/package-lock.json @@ -20,6 +20,7 @@ "ora": "^9.4.0", "prompts": "^2.4.2", "string-comparison": "^1.3.0", + "tsx": "^4.21.0", "unzipper": "^0.12.3" }, "bin": { @@ -136,6 +137,422 @@ "tslib": "^2.4.0" } }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", + "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", + "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", + "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", + "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", + "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", + "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", + "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", + "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", + "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", + "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", + "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", + "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", + "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", + "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", + "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", + "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", + "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", + "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", + "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", + "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", + "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", + "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", + "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", + "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", + "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", + "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", @@ -1618,6 +2035,47 @@ "node": ">= 0.4" } }, + "node_modules/esbuild": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", + "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.7", + "@esbuild/android-arm": "0.27.7", + "@esbuild/android-arm64": "0.27.7", + "@esbuild/android-x64": "0.27.7", + "@esbuild/darwin-arm64": "0.27.7", + "@esbuild/darwin-x64": "0.27.7", + "@esbuild/freebsd-arm64": "0.27.7", + "@esbuild/freebsd-x64": "0.27.7", + "@esbuild/linux-arm": "0.27.7", + "@esbuild/linux-arm64": "0.27.7", + "@esbuild/linux-ia32": "0.27.7", + "@esbuild/linux-loong64": "0.27.7", + "@esbuild/linux-mips64el": "0.27.7", + "@esbuild/linux-ppc64": "0.27.7", + "@esbuild/linux-riscv64": "0.27.7", + "@esbuild/linux-s390x": "0.27.7", + "@esbuild/linux-x64": "0.27.7", + "@esbuild/netbsd-arm64": "0.27.7", + "@esbuild/netbsd-x64": "0.27.7", + "@esbuild/openbsd-arm64": "0.27.7", + "@esbuild/openbsd-x64": "0.27.7", + "@esbuild/openharmony-arm64": "0.27.7", + "@esbuild/sunos-x64": "0.27.7", + "@esbuild/win32-arm64": "0.27.7", + "@esbuild/win32-ia32": "0.27.7", + "@esbuild/win32-x64": "0.27.7" + } + }, "node_modules/estree-walker": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", @@ -1695,7 +2153,6 @@ "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, "hasInstallScript": true, "license": "MIT", "optional": true, @@ -1764,6 +2221,18 @@ "node": ">= 0.4" } }, + "node_modules/get-tsconfig": { + "version": "4.14.0", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.14.0.tgz", + "integrity": "sha512-yTb+8DXzDREzgvYmh6s9vHsSVCHeC0G3PI5bEXNBHtmshPnO+S5O7qgLEOn0I5QvMy6kpZN8K1NKGyilLb93wA==", + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -2579,6 +3048,15 @@ "util-deprecate": "~1.0.1" } }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, "node_modules/restore-cursor": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", @@ -2905,6 +3383,25 @@ "license": "0BSD", "optional": true }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, "node_modules/typescript": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-6.0.3.tgz", diff --git a/package.json b/package.json index ce1fda6..fd380b6 100644 --- a/package.json +++ b/package.json @@ -49,6 +49,7 @@ "ora": "^9.4.0", "prompts": "^2.4.2", "string-comparison": "^1.3.0", + "tsx": "^4.21.0", "unzipper": "^0.12.3" }, "devDependencies": { diff --git a/src/commands/analysis/deploy.test.ts b/src/commands/analysis/deploy.test.ts index 82469ac..7788df6 100644 --- a/src/commands/analysis/deploy.test.ts +++ b/src/commands/analysis/deploy.test.ts @@ -1,4 +1,3 @@ -import prompts from "prompts"; import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; import { makeEnvironmentConfig } from "../../test-utils/mock-config.js"; @@ -6,7 +5,7 @@ import { makeAccount } from "../../test-utils/mock-sdk.js"; import { resetInjectedPrompts } from "../../test-utils/reset-prompts.js"; const getEnvironmentConfigMock = vi.fn(); -const errorHandlerMock = vi.fn((str: unknown): void => { +const errorHandlerMock = vi.fn<(str: unknown) => void>((str) => { throw new Error(String(str)); }); const successMSGMock = vi.fn(); @@ -15,6 +14,8 @@ const statMock = vi.fn(); const unlinkMock = vi.fn(); const execSyncMock = vi.fn(); const detectRuntimeMock = vi.fn(); +const chooseAnalysisListFromConfigMock = vi.fn(); +const confirmAnalysisFromConfigMock = vi.fn(); let accountInstance: ReturnType; @@ -54,21 +55,42 @@ vi.mock("../../lib/messages.js", () => ({ highlightMSG: (s: string) => s, })); +vi.mock("../../prompt/choose-analysis-list-config.js", () => ({ + chooseAnalysisListFromConfig: (...args: unknown[]) => chooseAnalysisListFromConfigMock(...args), +})); + +vi.mock("../../prompt/confirm-analysis-list.js", () => ({ + confirmAnalysisFromConfig: (...args: unknown[]) => confirmAnalysisFromConfigMock(...args), +})); + describe("deployAnalysis", () => { const analysisList = [{ name: "scriptA", fileName: "a.ts", id: "an-1" }]; + /** Default CLI options shape — individual tests override fields as needed. */ + const defaultOptions = () => ({ + environment: "prod", + silent: true, + deno: false, + node: false, + all: false, + }); + let exitSpy: ReturnType; beforeEach(() => { accountInstance = makeAccount(); getEnvironmentConfigMock.mockReset(); - errorHandlerMock.mockClear(); + errorHandlerMock.mockReset().mockImplementation((str: unknown) => { + throw new Error(String(str)); + }); successMSGMock.mockClear(); readFileMock.mockReset(); statMock.mockReset().mockResolvedValue(null); unlinkMock.mockReset(); execSyncMock.mockReset(); detectRuntimeMock.mockReset().mockReturnValue("--node"); + chooseAnalysisListFromConfigMock.mockReset(); + confirmAnalysisFromConfigMock.mockReset(); exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { throw new Error(`__exit:${code ?? 0}`); }) as never); @@ -79,11 +101,11 @@ describe("deployAnalysis", () => { exitSpy.mockRestore(); }); - test("calls errorHandler when the environment is missing", async () => { + test("errors when no profile token is available (no lock file and no --token)", async () => { getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ profileToken: "" })); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("a.ts", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/Environment not found/); + await expect(deployAnalysis("a.ts", defaultOptions())).rejects.toThrow(/No profile token found/); }); test("deploys a single matched script and emits a success message", async () => { @@ -94,7 +116,7 @@ describe("deployAnalysis", () => { readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); expect(execSyncMock).toHaveBeenCalled(); expect(accountInstance.analysis.uploadScript).toHaveBeenCalledWith("an-1", expect.objectContaining({ content: "ZmFrZS1zY3JpcHQ=" })); @@ -106,31 +128,74 @@ describe("deployAnalysis", () => { accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: true, node: true })).rejects.toThrow(/Cannot specify both/); + await expect(deployAnalysis("scriptA", { ...defaultOptions(), deno: true, node: true })).rejects.toThrow(/Cannot specify both/); }); test("errors when no analysis name matches the search", async () => { getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList: [] })); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("nope", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/No analysis found/); + await expect(deployAnalysis("nope", defaultOptions())).rejects.toThrow(/No analysis found/); }); - test("uses chooseAnalysisListFromConfig prompt when 'all' is passed", async () => { - getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); + test("rejects the legacy 'all' positional with a pointer to --all", async () => { + // No env config needed — the check runs before getEnvironmentConfig. + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("all", defaultOptions())).rejects.toThrow( + 'Did you mean "tagoio deploy --all"? The "all" positional argument is no longer supported.', + ); + expect(accountInstance.analysis.uploadScript).not.toHaveBeenCalled(); + }); + + test("--all deploys every analysis from the config without prompting", async () => { + const list = [ + { name: "scriptA", fileName: "a.ts", id: "an-1" }, + { name: "scriptB", fileName: "b.ts", id: "an-2" }, + ]; + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList: list })); accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); accountInstance.analysis.uploadScript.mockResolvedValue(undefined); accountInstance.analysis.edit.mockResolvedValue(undefined); readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); - prompts.inject([[analysisList[0]]]); + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("", { ...defaultOptions(), all: true })).rejects.toThrow(/__exit:0/); + + expect(accountInstance.analysis.uploadScript).toHaveBeenCalledTimes(2); + }); + + test("-t/--token overrides the lock-file token for this run", async () => { + // Simulate a CI runner: env config exists but carries no token. + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList, profileToken: "" })); + accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); + accountInstance.analysis.uploadScript.mockResolvedValue(undefined); + accountInstance.analysis.edit.mockResolvedValue(undefined); + readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("all", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", { ...defaultOptions(), token: "ci-token" })).rejects.toThrow(/__exit:0/); + // Upload was reached → the token override made it past the auth gate. expect(accountInstance.analysis.uploadScript).toHaveBeenCalled(); }); + test("--all + -t/--token works end-to-end with no lock file (CI flow)", async () => { + const list = [ + { name: "scriptA", fileName: "a.ts", id: "an-1" }, + { name: "scriptB", fileName: "b.ts", id: "an-2" }, + ]; + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList: list, profileToken: "" })); + accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); + accountInstance.analysis.uploadScript.mockResolvedValue(undefined); + accountInstance.analysis.edit.mockResolvedValue(undefined); + readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("", { ...defaultOptions(), all: true, token: "ci-token" })).rejects.toThrow(/__exit:0/); + + expect(accountInstance.analysis.uploadScript).toHaveBeenCalledTimes(2); + }); + test("bundles with deno when --deno flag is set", async () => { getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); accountInstance.analysis.info.mockResolvedValue({ runtime: "deno" }); @@ -140,7 +205,7 @@ describe("deployAnalysis", () => { const logSpy = vi.spyOn(console, "log").mockImplementation(() => undefined); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: true, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", { ...defaultOptions(), deno: true })).rejects.toThrow(/__exit:0/); expect(execSyncMock).toHaveBeenCalledWith(expect.stringContaining("deno bundle"), expect.any(Object)); logSpy.mockRestore(); @@ -155,7 +220,7 @@ describe("deployAnalysis", () => { const logSpy = vi.spyOn(console, "log").mockImplementation(() => undefined); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: true })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", { ...defaultOptions(), node: true })).rejects.toThrow(/__exit:0/); expect(logSpy).toHaveBeenCalledWith("deploying with node"); logSpy.mockRestore(); @@ -171,7 +236,7 @@ describe("deployAnalysis", () => { unlinkMock.mockResolvedValue(undefined); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); expect(unlinkMock).toHaveBeenCalled(); }); @@ -185,7 +250,7 @@ describe("deployAnalysis", () => { readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); expect(execSyncMock).toHaveBeenCalledWith(expect.stringContaining("nested/a.ts"), expect.any(Object)); }); @@ -198,7 +263,7 @@ describe("deployAnalysis", () => { const { deployAnalysis } = await import("./deploy.js"); // script read fails → buildScript returns early → loop finishes → process.exit() - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); expect(accountInstance.analysis.uploadScript).not.toHaveBeenCalled(); }); @@ -213,7 +278,7 @@ describe("deployAnalysis", () => { const { deployAnalysis } = await import("./deploy.js"); // Don't assert the exact thrown message; just that the command resolves or throws w/o upload. - await deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false }).catch(() => undefined); + await deployAnalysis("scriptA", defaultOptions()).catch(() => undefined); expect(accountInstance.analysis.uploadScript).not.toHaveBeenCalled(); }); @@ -227,7 +292,7 @@ describe("deployAnalysis", () => { errorHandlerMock.mockImplementationOnce(() => undefined); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); expect(errorHandlerMock).toHaveBeenCalledWith(expect.stringContaining("Script upload failed")); }); @@ -244,7 +309,7 @@ describe("deployAnalysis", () => { readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); const { deployAnalysis } = await import("./deploy.js"); - await expect(deployAnalysis("scriptA", { environment: "prod", silent: true, deno: false, node: false })).rejects.toThrow(/__exit:0/); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); // First execSync call should reference the default paths expect(execSyncMock).toHaveBeenCalled(); @@ -252,4 +317,63 @@ describe("deployAnalysis", () => { expect(cmd).toContain("./src/analysis/a.ts"); expect(cmd).toContain("./build/a.tago.js"); }); + + test("returns error when getEnvironmentConfig yields undefined", async () => { + getEnvironmentConfigMock.mockReturnValue(undefined); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/Environment not found/); + expect(accountInstance.analysis.uploadScript).not.toHaveBeenCalled(); + }); + + test("opens the interactive picker when no script name and --all are provided", async () => { + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); + chooseAnalysisListFromConfigMock.mockResolvedValue(analysisList); + accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); + accountInstance.analysis.uploadScript.mockResolvedValue(undefined); + accountInstance.analysis.edit.mockResolvedValue(undefined); + readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("", defaultOptions())).rejects.toThrow(/__exit:0/); + + expect(chooseAnalysisListFromConfigMock).toHaveBeenCalled(); + }); + + test("prompts for confirmation when silent is false and a name is provided", async () => { + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); + confirmAnalysisFromConfigMock.mockResolvedValue(analysisList); + accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); + accountInstance.analysis.uploadScript.mockResolvedValue(undefined); + accountInstance.analysis.edit.mockResolvedValue(undefined); + readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("scriptA", { ...defaultOptions(), silent: false })).rejects.toThrow(/__exit:0/); + + expect(confirmAnalysisFromConfigMock).toHaveBeenCalled(); + }); + + test("cancels with a clear error when the interactive picker returns an empty list", async () => { + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); + chooseAnalysisListFromConfigMock.mockResolvedValue([]); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("", defaultOptions())).rejects.toThrow(/Cancelled/); + + expect(accountInstance.analysis.uploadScript).not.toHaveBeenCalled(); + }); + + test("sets run_on to 'tago' after a successful upload", async () => { + getEnvironmentConfigMock.mockReturnValue(makeEnvironmentConfig({ analysisList })); + accountInstance.analysis.info.mockResolvedValue({ runtime: "node" }); + accountInstance.analysis.uploadScript.mockResolvedValue(undefined); + accountInstance.analysis.edit.mockResolvedValue(undefined); + readFileMock.mockResolvedValue("ZmFrZS1zY3JpcHQ="); + + const { deployAnalysis } = await import("./deploy.js"); + await expect(deployAnalysis("scriptA", defaultOptions())).rejects.toThrow(/__exit:0/); + + expect(accountInstance.analysis.edit).toHaveBeenCalledWith("an-1", { run_on: "tago" }); + }); }); diff --git a/src/commands/analysis/deploy.ts b/src/commands/analysis/deploy.ts index 838a180..1cb8b25 100644 --- a/src/commands/analysis/deploy.ts +++ b/src/commands/analysis/deploy.ts @@ -108,36 +108,58 @@ async function buildScript(params: BuildScriptParams) { }); } +interface IDeployOptions { + environment: string; + silent: boolean; + deno: boolean; + node: boolean; + /** Deploy every analysis from tagoconfig.json without prompting (for CI/CD). */ + all: boolean; + /** Profile token for this invocation, bypassing the lock file (for CI/CD). */ + token?: string; +} + /** * Deploys an analysis script to the specified environment. Picks default environment if none is specified. * @param cmdScriptName - The name of the script to deploy. * @param options - The options for the deployment. - * @param options.environment - The environment to deploy the script to. - * @param options.silent - Whether to skip confirmation prompts. * @returns void */ -async function deployAnalysis(cmdScriptName: string, options: { environment: string; silent: boolean; deno: boolean; node: boolean }) { +async function deployAnalysis(cmdScriptName: string, options: IDeployOptions) { + if (cmdScriptName === "all") { + errorHandler('Did you mean "tagoio deploy --all"? The "all" positional argument is no longer supported.'); + } + const config = getEnvironmentConfig(options.environment); - if (!config || !config.profileToken) { + if (!config) { errorHandler("Environment not found"); } - // check if script has a file - let scriptList = config.analysisList.filter((x) => x.fileName); - if (!cmdScriptName || cmdScriptName === "all") { - scriptList = await chooseAnalysisListFromConfig(scriptList); - } else { - const analysisFound: IEnvironment["analysisList"][0] = searchName( - cmdScriptName, - scriptList.map((x) => ({ names: [x.name, x.fileName], value: x })), - ); - - if (!analysisFound) { - errorHandler(`No analysis found containing name: ${cmdScriptName}`); - } + if (options.token) { + config.profileToken = options.token; + } + if (!config.profileToken) { + errorHandler("No profile token found. Pass --token or run 'tagoio login'."); + } - if (!options.silent) { - scriptList = await confirmAnalysisFromConfig([analysisFound]); + // --all skips selection entirely; everything in analysisList with a fileName ships. + let scriptList = config.analysisList.filter((x) => x.fileName); + if (!options.all) { + if (!cmdScriptName) { + scriptList = await chooseAnalysisListFromConfig(scriptList); + } else { + const analysisFound: IEnvironment["analysisList"][0] = searchName( + cmdScriptName, + scriptList.map((x) => ({ names: [x.name, x.fileName], value: x })), + ); + + if (!analysisFound) { + errorHandler(`No analysis found containing name: ${cmdScriptName}`); + } + + if (!options.silent) { + scriptList = await confirmAnalysisFromConfig([analysisFound]); + } } } diff --git a/src/commands/analysis/index.ts b/src/commands/analysis/index.ts index efd74e7..d11ec85 100644 --- a/src/commands/analysis/index.ts +++ b/src/commands/analysis/index.ts @@ -26,16 +26,19 @@ function analysisCommands(program: Command) { .option("-s, --silent", "will not prompt to confirm the deploy") .option("--deno", "Force build for Deno runtime", false) .option("--node", "Force build for Node.js runtime", false) + .option("--all", "deploy every analysis from tagoconfig.json without prompting", false) + .option("-t, --token ", "profile token for this run (bypasses lock file, for CI/CD)") .action(deployAnalysis) .addHelpText( "after", ` Example: - $ tagoio deploy all - $ tagoio deploy all -e stage $ tagoio deploy dashboard-handler $ tagoio deploy dashboard-handler --deno $ tagoio deploy dashboard-handler --node + $ tagoio deploy --all # deploy every analysis from tagoconfig.json + $ tagoio deploy --all --env stage # deploy all to the stage environment + $ tagoio deploy --all --env prod -t $TAGOIO_TOKEN --silent # pipeline-friendly: no prompts, no lock file needed $ tagoio deploy --node $ tagoio deploy --deno`, ); diff --git a/src/commands/analysis/run-analysis.test.ts b/src/commands/analysis/run-analysis.test.ts index 6660597..21db59a 100644 --- a/src/commands/analysis/run-analysis.test.ts +++ b/src/commands/analysis/run-analysis.test.ts @@ -64,8 +64,8 @@ describe("buildCMD", () => { const options = { tsnd: false, debug: false, clear: false }; const result = _buildCMD(options, "--node"); expect(result).toContain("node"); - expect(result).toContain("--watch"); - expect(result).toContain("@swc-node/register/index"); + expect(result).toContain("tsx/dist/cli.mjs"); + expect(result).toContain("watch "); expect(result).not.toContain("--inspect"); expect(result).not.toContain("--clear"); expect(result).not.toContain("tsnd"); @@ -93,9 +93,9 @@ describe("buildCMD", () => { const options = { tsnd: false, debug: true, clear: false }; const result = _buildCMD(options, "--node"); expect(result).toContain("node"); - expect(result).toContain("--watch"); + expect(result).toContain("tsx/dist/cli.mjs"); + expect(result).toContain("watch "); expect(result).toContain("--inspect"); - expect(result).toContain("@swc-node/register/index"); expect(result).not.toContain("--clear"); expect(result).not.toContain("tsnd"); }); @@ -104,8 +104,8 @@ describe("buildCMD", () => { const options = { tsnd: false, debug: false, clear: true }; const result = _buildCMD(options, "--node"); expect(result).toContain("node"); - expect(result).toContain("--watch"); - expect(result).toContain("@swc-node/register/index"); + expect(result).toContain("tsx/dist/cli.mjs"); + expect(result).toContain("watch "); expect(result).toContain("--clear"); expect(result).not.toContain("--inspect"); expect(result).not.toContain("tsnd"); diff --git a/src/commands/analysis/run-analysis.ts b/src/commands/analysis/run-analysis.ts index a3a14b1..79c7f1b 100644 --- a/src/commands/analysis/run-analysis.ts +++ b/src/commands/analysis/run-analysis.ts @@ -39,7 +39,11 @@ function _buildCMD(options: { tsnd: boolean; debug: boolean; clear: boolean }, r } default: { - cmd = `node -r ${resolveCLIPath("/node_modules/@swc-node/register/index")} --watch `; + // tsx wraps node with a CJS/ESM-aware TypeScript loader. Needed + // because Node's native --experimental-transform-types forces ESM + // resolution, which breaks legacy analyses that import CJS + // subpaths without a `.js` extension (e.g. "@tago-io/sdk/lib/types"). + cmd = `node ${resolveCLIPath("/node_modules/tsx/dist/cli.mjs")} watch `; if (options.debug) { cmd += "--inspect "; } diff --git a/src/commands/devices/index.ts b/src/commands/devices/index.ts index ecce340..7a02c5b 100644 --- a/src/commands/devices/index.ts +++ b/src/commands/devices/index.ts @@ -142,8 +142,8 @@ Example: "after", ` Example: - $ tagoio device-network 62151835435d540010b768c4 --n 62151835435d540010b768c4 --c 62151835435d540010b768c4 - $ tagoio nc 62151835435d540010b768c4 --n 62151835435d540010b768c4 + $ tagoio device-network 62151835435d540010b768c4 -n 62151835435d540010b768c4 -c 62151835435d540010b768c4 + $ tagoio nc 62151835435d540010b768c4 -n 62151835435d540010b768c4 -c 62151835435d540010b768c4 `, ); diff --git a/src/commands/profile/backup/resources/devices.test.ts b/src/commands/profile/backup/resources/devices.test.ts index 7787f48..ee3ad10 100644 --- a/src/commands/profile/backup/resources/devices.test.ts +++ b/src/commands/profile/backup/resources/devices.test.ts @@ -42,8 +42,9 @@ describe("restoreDevices", () => { const listMock = vi.fn().mockResolvedValue([{ id: "dev-exists" }]); const editMock = vi.fn().mockResolvedValue(undefined); - const createMock = vi.fn().mockResolvedValue(undefined); - const resources = { devices: { list: listMock, edit: editMock, create: createMock } }; + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-new" }); + const paramSetMock = vi.fn().mockResolvedValue(undefined); + const resources = { devices: { list: listMock, edit: editMock, create: createMock, paramSet: paramSetMock } }; const { restoreDevices } = await import("./devices.js"); const promise = restoreDevices(resources as never, "/tmp/extract"); @@ -56,9 +57,7 @@ describe("restoreDevices", () => { }); test("increments failed count when create throws", async () => { - readBackupFileMock.mockReturnValue([ - { id: "dev-boom", name: "Boom", network: "n", connector: "c" }, - ]); + readBackupFileMock.mockReturnValue([{ id: "dev-boom", name: "Boom", network: "n", connector: "c" }]); const resources = { devices: { @@ -93,8 +92,10 @@ describe("restoreDevices", () => { ]); selectItemsFromBackupMock.mockResolvedValue([{ id: "dev-1", name: "One", network: "n", connector: "c" }]); - const createMock = vi.fn().mockResolvedValue(undefined); - const resources = { devices: { list: vi.fn().mockResolvedValue([]), create: createMock, edit: vi.fn() } }; + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-1" }); + const resources = { + devices: { list: vi.fn().mockResolvedValue([]), create: createMock, edit: vi.fn(), paramSet: vi.fn() }, + }; const { restoreDevices } = await import("./devices.js"); const promise = restoreDevices(resources as never, "/tmp/extract", true); @@ -103,4 +104,376 @@ describe("restoreDevices", () => { expect(createMock).toHaveBeenCalledTimes(1); expect(result).toEqual({ created: 1, updated: 0, failed: 0 }); }); + + test("restores configuration parameters for created and edited devices, strips server-managed fields, and skips tokens without serie_number", async () => { + const backupDevice = (id: string) => ({ + id, + name: `Dev ${id}`, + network: "n1", + connector: "c1", + created_at: "2026-01-01T00:00:00Z", + updated_at: "2026-01-02T00:00:00Z", + last_input: "2026-01-03T00:00:00Z", + profile: "profile-x", + params: [ + { id: "p1", ref_id: id, key: "k1", value: "v1", sent: false, created_at: "2026-01-01T00:00:00Z", updated_at: "2026-01-01T00:00:00Z" }, + { id: "p2", ref_id: id, key: "k2", value: "v2", sent: false, created_at: "2026-01-01T00:00:00Z", updated_at: "2026-01-01T00:00:00Z" }, + ], + tokens: [{ token: "********-****-****-****-************a888", name: "T1", permission: "full" }], + }); + + readBackupFileMock.mockReturnValue([backupDevice("dev-new"), backupDevice("dev-exists")]); + + const listMock = vi.fn().mockResolvedValue([{ id: "dev-exists" }]); + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-new-generated" }); + const editMock = vi.fn().mockResolvedValue(undefined); + const paramSetMock = vi.fn().mockResolvedValue(undefined); + // Edit path fetches the destination device's current params to reconcile + // by key. Fixture returns [] → every backup param is a fresh insert, no + // `id` in the payload. + const paramListMock = vi.fn().mockResolvedValue([]); + const tokenCreateMock = vi.fn(); + // Edit path calls tokenList to look up existing serials; the fixture device + // has no tokens with serie_number anyway, so returning [] keeps the path + // tight while proving tokenCreate is never reached. + const tokenListMock = vi.fn().mockResolvedValue([]); + const resources = { + devices: { + list: listMock, + create: createMock, + edit: editMock, + paramSet: paramSetMock, + paramList: paramListMock, + tokenCreate: tokenCreateMock, + tokenList: tokenListMock, + }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + const result = await promise; + + // paramSet runs on both the newly-created device (using the generated id) and the edited one + expect(paramSetMock).toHaveBeenCalledTimes(2); + + // paramList is only consulted on the edit path — the create path has no + // existing params to reconcile. + expect(paramListMock).toHaveBeenCalledTimes(1); + expect(paramListMock).toHaveBeenCalledWith("dev-exists"); + + // Each param payload is trimmed to the fields the API accepts — metadata + // like `ref_id` and the ISO-string timestamps would otherwise be + // rejected with "Expected date, received string". With an empty + // destination param list, no existing `id` can be reused, so both paths + // emit key/value/sent only. + const expectedParamShape = [ + { key: "k1", value: "v1", sent: false }, + { key: "k2", value: "v2", sent: false }, + ]; + expect(paramSetMock).toHaveBeenCalledWith("dev-new-generated", expectedParamShape); + expect(paramSetMock).toHaveBeenCalledWith("dev-exists", expectedParamShape); + + for (const call of paramSetMock.mock.calls) { + const paramPayload = call[1] as Array>; + for (const p of paramPayload) { + expect(p).not.toHaveProperty("id"); + expect(p).not.toHaveProperty("ref_id"); + expect(p).not.toHaveProperty("created_at"); + expect(p).not.toHaveProperty("updated_at"); + } + } + + // Tokens without a serie_number are ephemeral credentials and are skipped. + // (The fixture's sole token has no serie_number.) + expect(tokenCreateMock).not.toHaveBeenCalled(); + + // Create payload must not carry server-managed fields that the API rejects + const createPayload = createMock.mock.calls[0][0]; + expect(createPayload).not.toHaveProperty("id"); + expect(createPayload).not.toHaveProperty("created_at"); + expect(createPayload).not.toHaveProperty("updated_at"); + expect(createPayload).not.toHaveProperty("last_input"); + expect(createPayload).not.toHaveProperty("profile"); + expect(createPayload).not.toHaveProperty("params"); + expect(createPayload).not.toHaveProperty("tokens"); + + expect(result).toEqual({ created: 1, updated: 1, failed: 0 }); + }); + + test("skips paramSet when the backup device has no params", async () => { + readBackupFileMock.mockReturnValue([{ id: "dev-new", name: "Bare", network: "n", connector: "c" }]); + + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-new-generated" }); + const paramSetMock = vi.fn(); + const resources = { + devices: { list: vi.fn().mockResolvedValue([]), create: createMock, edit: vi.fn(), paramSet: paramSetMock }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + await promise; + + expect(paramSetMock).not.toHaveBeenCalled(); + }); + + test("edit path skips backup params whose key already exists on the device", async () => { + // Backup has 3 params; the destination device already has 2 of them + // (by key). Expectation: only the brand-new key is sent to paramSet. + // Existing keys are left untouched — destination values win, matching + // the token restore behavior (no overwrite). Without this filter every + // re-run would duplicate the matching params. + readBackupFileMock.mockReturnValue([ + { + id: "dev-exists", + name: "Dev", + network: "n", + connector: "c", + params: [ + { key: "k_keep", value: "new_keep", sent: false }, + { key: "k_update", value: "new_update", sent: true }, + { key: "k_fresh", value: "fresh_val", sent: false }, + ], + }, + ]); + + const listMock = vi.fn().mockResolvedValue([{ id: "dev-exists" }]); + const editMock = vi.fn().mockResolvedValue(undefined); + const paramSetMock = vi.fn().mockResolvedValue(undefined); + // Destination already has params for k_keep and k_update. k_fresh is + // not yet present and is the only one that will be inserted. + const paramListMock = vi.fn().mockResolvedValue([ + { id: "dst-1", key: "k_keep", value: "old_keep", sent: false }, + { id: "dst-2", key: "k_update", value: "old_update", sent: false }, + { id: "dst-3", key: "unrelated", value: "keep_me", sent: false }, + ]); + + const resources = { + devices: { + list: listMock, + create: vi.fn(), + edit: editMock, + paramSet: paramSetMock, + paramList: paramListMock, + tokenList: vi.fn().mockResolvedValue([]), + }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + const result = await promise; + + expect(paramListMock).toHaveBeenCalledWith("dev-exists"); + expect(paramSetMock).toHaveBeenCalledTimes(1); + expect(paramSetMock).toHaveBeenCalledWith("dev-exists", [{ key: "k_fresh", value: "fresh_val", sent: false }]); + expect(result).toEqual({ created: 0, updated: 1, failed: 0 }); + }); + + test("edit path skips paramSet entirely when every backup key is already on the device", async () => { + // All backup keys are already present on the destination → nothing to + // insert. paramSet should not be called at all, and the restore must + // still complete successfully. + readBackupFileMock.mockReturnValue([ + { + id: "dev-exists", + name: "Dev", + network: "n", + connector: "c", + params: [ + { key: "k1", value: "new1", sent: false }, + { key: "k2", value: "new2", sent: false }, + ], + }, + ]); + + const paramSetMock = vi.fn(); + const paramListMock = vi.fn().mockResolvedValue([ + { id: "dst-1", key: "k1", value: "old1", sent: false }, + { id: "dst-2", key: "k2", value: "old2", sent: false }, + ]); + + const resources = { + devices: { + list: vi.fn().mockResolvedValue([{ id: "dev-exists" }]), + create: vi.fn(), + edit: vi.fn().mockResolvedValue(undefined), + paramSet: paramSetMock, + paramList: paramListMock, + tokenList: vi.fn().mockResolvedValue([]), + }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + const result = await promise; + + expect(paramListMock).toHaveBeenCalledWith("dev-exists"); + expect(paramSetMock).not.toHaveBeenCalled(); + expect(result).toEqual({ created: 0, updated: 1, failed: 0 }); + }); + + test("edit path skips tokens whose serie_number already exists on the device", async () => { + readBackupFileMock.mockReturnValue([ + { + id: "dev-exists", + name: "Dev", + network: "n", + connector: "c", + tokens: [ + // Already on the device → should be skipped to avoid + // "serial_number already exists" from the API. + { token: "********-a", name: "Already Here", permission: "full", serie_number: "SN-KEEP", expire_time: null }, + // Not on the device → should be (re)created. + { token: "********-b", name: "Brand New", permission: "full", serie_number: "SN-NEW", expire_time: null }, + ], + }, + ]); + + // Device already exists in the destination profile → edit path. + const listMock = vi.fn().mockResolvedValue([{ id: "dev-exists" }]); + const editMock = vi.fn().mockResolvedValue(undefined); + const tokenCreateMock = vi.fn().mockResolvedValue(undefined); + // tokenList returns a token with SN-KEEP — the backup's SN-KEEP should be skipped. + const tokenListMock = vi.fn().mockResolvedValue([ + { serie_number: "SN-KEEP" }, + { serie_number: null }, // defensive: tokens without serial shouldn't affect the filter + ]); + + const resources = { + devices: { + list: listMock, + create: vi.fn(), + edit: editMock, + paramSet: vi.fn(), + tokenCreate: tokenCreateMock, + tokenList: tokenListMock, + }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + const result = await promise; + + // tokenList is queried once per device on the edit path + expect(tokenListMock).toHaveBeenCalledWith("dev-exists", expect.objectContaining({ fields: ["serie_number"] })); + + // Only SN-NEW is created; SN-KEEP is skipped because it's already on the device. + expect(tokenCreateMock).toHaveBeenCalledTimes(1); + expect(tokenCreateMock).toHaveBeenCalledWith("dev-exists", expect.objectContaining({ serie_number: "SN-NEW", name: "Brand New" })); + + expect(result).toEqual({ created: 0, updated: 1, failed: 0 }); + }); + + test("create path skips tokenList and creates every token with a serie_number directly", async () => { + readBackupFileMock.mockReturnValue([ + { + id: "dev-new", + name: "Dev", + network: "n", + connector: "c", + tokens: [{ token: "********-a", name: "T1", permission: "full", serie_number: "SN-1", expire_time: null }], + }, + ]); + + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-new-generated" }); + const tokenCreateMock = vi.fn().mockResolvedValue(undefined); + const tokenListMock = vi.fn(); + + const resources = { + devices: { + list: vi.fn().mockResolvedValue([]), + create: createMock, + edit: vi.fn(), + paramSet: vi.fn(), + tokenCreate: tokenCreateMock, + tokenList: tokenListMock, + }, + }; + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + await promise; + + // Brand-new device → no existing tokens possible → skip the tokenList call. + expect(tokenListMock).not.toHaveBeenCalled(); + expect(tokenCreateMock).toHaveBeenCalledWith("dev-new-generated", expect.objectContaining({ serie_number: "SN-1" })); + }); + + test("recreates tokens with serie_number, skips those without, and tolerates per-token failures", async () => { + readBackupFileMock.mockReturnValue([ + { + id: "dev-new", + name: "Dev", + network: "n", + connector: "c", + tokens: [ + // Kept — has a serie_number + { token: "********-a", name: "Serial Token", permission: "full", serie_number: "SN-1", expire_time: null, created_at: "2026-01-01T00:00:00Z" }, + // Skipped — no serie_number (ephemeral credential) + { token: "********-b", name: "Ephemeral", permission: "read", serie_number: null, expire_time: null, created_at: "2026-01-01T00:00:00Z" }, + // Kept — failure on this one should not abort the next token + { token: "********-c", name: "Boom", permission: "full", serie_number: "SN-2", expire_time: null, created_at: "2026-01-01T00:00:00Z" }, + // Kept — should still run even after the failure above + { token: "********-d", name: "Survivor", permission: "full", serie_number: "SN-3", expire_time: null, created_at: "2026-01-01T00:00:00Z" }, + ], + }, + ]); + + const createMock = vi.fn().mockResolvedValue({ device_id: "dev-new-generated" }); + const tokenCreateMock = vi + .fn() + // First token: ok + .mockResolvedValueOnce(undefined) + // Second kept token: throws — but the loop continues + .mockRejectedValueOnce(new Error("token create failed")) + // Third kept token: ok + .mockResolvedValueOnce(undefined); + + const resources = { + devices: { + list: vi.fn().mockResolvedValue([]), + create: createMock, + edit: vi.fn(), + paramSet: vi.fn(), + tokenCreate: tokenCreateMock, + }, + }; + + // Silence the per-token error log so it doesn't pollute test output + const errSpy = vi.spyOn(console, "error").mockImplementation(() => undefined); + + const { restoreDevices } = await import("./devices.js"); + const promise = restoreDevices(resources as never, "/tmp/extract"); + await vi.runAllTimersAsync(); + const result = await promise; + + // 3 of the 4 tokens have a serie_number → 3 calls; the second one rejected. + expect(tokenCreateMock).toHaveBeenCalledTimes(3); + + // Payload is trimmed to what tokenCreate accepts — masked `token` and + // `created_at` from the backup are dropped. + expect(tokenCreateMock).toHaveBeenNthCalledWith(1, "dev-new-generated", { + name: "Serial Token", + permission: "full", + serie_number: "SN-1", + expire_time: undefined, + }); + expect(tokenCreateMock).toHaveBeenNthCalledWith(3, "dev-new-generated", { + name: "Survivor", + permission: "full", + serie_number: "SN-3", + expire_time: undefined, + }); + + // Device creation is still counted as successful despite the failed token — + // token failures are logged, not fatal. + expect(result).toEqual({ created: 1, updated: 0, failed: 0 }); + expect(errSpy).toHaveBeenCalledWith(expect.stringContaining('Failed to recreate token "Boom"')); + errSpy.mockRestore(); + }); }); diff --git a/src/commands/profile/backup/resources/devices.ts b/src/commands/profile/backup/resources/devices.ts index af18c2e..eec6dd0 100644 --- a/src/commands/profile/backup/resources/devices.ts +++ b/src/commands/profile/backup/resources/devices.ts @@ -1,4 +1,4 @@ -import { DeviceInfo, Resources } from "@tago-io/sdk"; +import { ConfigurationParams, DeviceInfo, Resources, TokenData } from "@tago-io/sdk"; import { queue } from "async"; import ora, { type Ora } from "ora"; @@ -21,18 +21,109 @@ async function fetchExistingDeviceIds(resources: Resources): Promise return new Set(devices.map((d) => d.id)); } +/** + * Strips fields that the TagoIO API rejects or manages on its own (IDs,timestamps, tokens, + * and config parameters — the last two are restored via separate endpoints). + * Returns the subset safe to send to `resources.devices.create` / `resources.devices.edit`. + */ +function stripDeviceFields(device: DeviceInfo) { + const { + id: _id, + created_at: _created_at, + updated_at: _updated_at, + last_input: _last_input, + profile: _profile, + params: _params, + tokens: _tokens, + ...deviceData + } = device as DeviceInfo & { params?: ConfigurationParams[]; tokens?: TokenData[] }; + return deviceData; +} + +/** + * Restores configuration parameters for a device using the dedicated `paramSet` endpoint. + * + * On the edit path (`deviceExists`), the device's current params are fetched + * and any backup param whose `key` is already present on the destination is + * skipped — `paramSet` inserts a new row, so without this filter every re-run + * would duplicate the matching params. Existing values are left untouched. + */ +async function restoreDeviceParams(resources: Resources, deviceId: string, device: DeviceInfo & { params?: ConfigurationParams[] }, deviceExists: boolean) { + const params = device.params; + if (!params || params.length === 0) { + return; + } + + let existingKeys = new Set(); + if (deviceExists) { + const currentParams = await resources.devices.paramList(deviceId); + existingKeys = new Set(currentParams.map((p) => p.key)); + } + + const payload = params.filter((p) => !existingKeys.has(p.key)).map(({ key, value, sent }) => ({ key, value, sent })); + if (payload.length === 0) { + return; + } + await resources.devices.paramSet(deviceId, payload); +} + +/** + * Recreates device tokens from the backup using `tokenCreate`. Only tokens + * that carry a `serie_number` are recreated — the serial number is what + * identifies the physical device and is the reason to preserve the token at + * all. Tokens without a serie_number are ephemeral credentials and are + * intentionally skipped. + * + * When `deviceExists` is true (edit path), the current tokens on the + * destination device are fetched first. Any backup token whose + * `serie_number` is already present on the device is skipped. + * + * The token's actual value cannot be restored: the backup stores it masked + * (e.g. `********-****-****-****-************a888`), so the new token has + * a different value. Integrations relying on the old token value must be + * updated. + */ +async function restoreDeviceTokens(resources: Resources, deviceId: string, device: DeviceInfo & { tokens?: TokenData[] }, deviceExists: boolean) { + const tokens = device.tokens; + if (!tokens || tokens.length === 0) { + return; + } + + let existingSerials = new Set(); + if (deviceExists) { + const currentTokens = await resources.devices.tokenList(deviceId, { amount: 10000, fields: ["serie_number"] }); + existingSerials = new Set(currentTokens.map((t) => t.serie_number).filter((s): s is string => Boolean(s))); + } + + for (const token of tokens) { + if (!token.serie_number) { + continue; + } + if (existingSerials.has(token.serie_number)) { + continue; + } + + try { + await resources.devices.tokenCreate(deviceId, { + name: token.name, + permission: token.permission, + serie_number: token.serie_number, + expire_time: token.expire_time || undefined, + }); + } catch (error) { + console.error(`\nFailed to recreate token "${token.name}" for device "${device.name}": ${getErrorMessage(error)}`); + } + } +} + /** Processes a single device creation task. */ -async function processCreateTask( - resources: Resources, - task: RestoreTask, - result: RestoreResult, - spinner: Ora -): Promise { +async function processCreateTask(resources: Resources, task: RestoreTask, result: RestoreResult, spinner: Ora): Promise { const { device } = task; try { - const { ...deviceData } = device; - await resources.devices.create(deviceData); + const { device_id } = await resources.devices.create(stripDeviceFields(device)); + await restoreDeviceParams(resources, device_id, device, false); + await restoreDeviceTokens(resources, device_id, device, false); result.created++; spinner.text = `Restoring devices... (${result.created} created, ${result.updated} updated)`; await new Promise((resolve) => setTimeout(resolve, DELAY_BETWEEN_REQUESTS_MS)); @@ -43,17 +134,14 @@ async function processCreateTask( } /** Processes a single device edit task. */ -async function processEditTask( - resources: Resources, - task: RestoreTask, - result: RestoreResult, - spinner: Ora -): Promise { +async function processEditTask(resources: Resources, task: RestoreTask, result: RestoreResult, spinner: Ora): Promise { const { device } = task; try { - const { id, network: _network, connector: _connector, updated_at: _updated_at, ...deviceData } = device; - await resources.devices.edit(id, deviceData); + const { network: _network, connector: _connector, ...deviceData } = stripDeviceFields(device); + await resources.devices.edit(device.id, deviceData); + await restoreDeviceParams(resources, device.id, device, true); + await restoreDeviceTokens(resources, device.id, device, true); result.updated++; spinner.text = `Restoring devices... (${result.created} created, ${result.updated} updated)`; await new Promise((resolve) => setTimeout(resolve, DELAY_BETWEEN_REQUESTS_MS));