diff --git a/.cargo/config.toml b/.cargo/config.toml index e1f508bbf0..12365ff039 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -2,4 +2,8 @@ rustflags = ["-C", "link-arg=-static"] [target.aarch64-unknown-linux-musl] +linker = "aarch64-linux-musl-gcc" rustflags = ["-C", "link-arg=-static"] + +[target.aarch64-unknown-linux-gnu] +linker = "aarch64-linux-gnu-gcc" diff --git a/Cargo.lock b/Cargo.lock index a16de6f823..2472ca3897 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7107,7 +7107,7 @@ checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762" dependencies = [ "either", "env_home", - "rustix 1.1.3", + "rustix", "winsafe", ] diff --git a/Cargo.toml b/Cargo.toml index 23d55b1686..2e3ea3635e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/zeroclaw-labs/zeroclaw" readme = "README.md" keywords = ["ai", "agent", "cli", "assistant", "chatbot"] categories = ["command-line-utilities", "api-bindings"] -rust-version = "1.87" +rust-version = "1.91" [dependencies] # CLI - minimal and fast @@ -111,7 +111,7 @@ which = "7.0" # WebSocket client channels (Discord/Lark/DingTalk) tokio-tungstenite = { version = "0.28", features = ["rustls-tls-webpki-roots"] } -futures-util = { version = "0.3", default-features = false, features = ["sink"] } +futures-util = { version = "0.3", default-features = false, features = ["sink", "alloc"] } regex = "1.10" hostname = "0.4.2" rustls = "0.23" @@ -164,7 +164,7 @@ wa-rs-tokio-transport = { version = "0.2", optional = true, default-features = f # Raspberry Pi GPIO / Landlock (Linux only) — target-specific to avoid compile failure on macOS [target.'cfg(target_os = "linux")'.dependencies] rppal = { version = "0.22", optional = true } -landlock = { version = "0.4", optional = true } +landlock = { version = "0.4" } # Unix-specific dependencies (for root check, etc.) [target.'cfg(unix)'.dependencies] @@ -183,7 +183,10 @@ browser-native = ["dep:fantoccini"] # Backward-compatible alias for older invocations fantoccini = ["browser-native"] # Sandbox feature aliases used by cfg(feature = "sandbox-*") -sandbox-landlock = ["dep:landlock"] +# sandbox-landlock is now a no-op: landlock compiles automatically on Linux +# (via [target.'cfg(target_os = "linux")'.dependencies]) without requiring a flag. +# Keep for backward compatibility with build scripts that pass --features sandbox-landlock. +sandbox-landlock = [] sandbox-bubblewrap = [] # Backward-compatible alias for older invocations landlock = ["sandbox-landlock"] diff --git a/README.md b/README.md index 5754a8d8fa..085bd6f253 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Built by students and members of the Harvard, MIT, and Sundai.Club communities.

- 🌐 Languages: English · 简体中文 · 日本語 · Русский · Français · Tiếng Việt + 🌐 Languages: English · 简体中文 · 日本語 · Русский · Français · Tiếng Việt · Português (Brasil)

@@ -58,7 +58,6 @@ Use this board for important notices (breaking changes, security advisories, mai | Date (UTC) | Level | Notice | Action | |---|---|---|---| -| 2026-02-19 | _Critical_ | We are **not affiliated** with `openagen/zeroclaw` or `zeroclaw.org`. The `zeroclaw.org` domain currently points to the `openagen/zeroclaw` fork, and that domain/repository are impersonating our official website/project. | Do not trust information, binaries, fundraising, or announcements from those sources. Use only this repository and our verified social accounts. | | 2026-02-19 | _Important_ | We have **not** launched an official website yet, and we are seeing impersonation attempts. Do **not** join any investment or fundraising activity claiming the ZeroClaw name. | Use this repository as the single source of truth. Follow [X (@zeroclawlabs)](https://x.com/zeroclawlabs?s=21), [Reddit (r/zeroclawlabs)](https://www.reddit.com/r/zeroclawlabs/), [Telegram (@zeroclawlabs)](https://t.me/zeroclawlabs), [Telegram CN (@zeroclawlabs_cn)](https://t.me/zeroclawlabs_cn), [Telegram RU (@zeroclawlabs_ru)](https://t.me/zeroclawlabs_ru), and [Xiaohongshu](https://www.xiaohongshu.com/user/profile/67cbfc43000000000d008307?xsec_token=AB73VnYnGNx5y36EtnnZfGmAmS-6Wzv8WMuGpfwfkg6Yc%3D&xsec_source=pc_search) for official updates. | | 2026-02-19 | _Important_ | Anthropic updated the Authentication and Credential Use terms on 2026-02-19. OAuth authentication (Free, Pro, Max) is intended exclusively for Claude Code and Claude.ai; using OAuth tokens from Claude Free/Pro/Max in any other product, tool, or service (including Agent SDK) is not permitted and may violate the Consumer Terms of Service. | Please temporarily avoid Claude Code OAuth integrations to prevent potential loss. Original clause: [Authentication and Credential Use](https://code.claude.com/docs/en/legal-and-compliance#authentication-and-credential-use). | @@ -170,7 +169,7 @@ Example sample (macOS arm64, measured on February 18, 2026): Or skip the steps above and install everything (system deps, Rust, ZeroClaw) in a single command: ```bash -curl -LsSf https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/install.sh | bash +curl -LsSf https://raw.githubusercontent.com/openagen/zeroclaw/main/scripts/install.sh | bash ``` #### Compilation resource requirements @@ -215,7 +214,7 @@ brew install zeroclaw ```bash # Recommended: clone then run local bootstrap script -git clone https://github.com/zeroclaw-labs/zeroclaw.git +git clone https://github.com/openagen/zeroclaw.git cd zeroclaw ./bootstrap.sh @@ -244,7 +243,7 @@ ZEROCLAW_CONTAINER_CLI=podman ./bootstrap.sh --docker Remote one-liner (review first in security-sensitive environments): ```bash -curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/bootstrap.sh | bash +curl -fsSL https://raw.githubusercontent.com/openagen/zeroclaw/main/scripts/bootstrap.sh | bash ``` Details: [`docs/one-click-bootstrap.md`](docs/one-click-bootstrap.md) (toolchain mode may request `sudo` for system packages). @@ -258,18 +257,18 @@ Release assets are published for: - Windows: `x86_64` Download the latest assets from: - + Example (ARM64 Linux): ```bash -curl -fsSLO https://github.com/zeroclaw-labs/zeroclaw/releases/latest/download/zeroclaw-aarch64-unknown-linux-gnu.tar.gz +curl -fsSLO https://github.com/openagen/zeroclaw/releases/latest/download/zeroclaw-aarch64-unknown-linux-gnu.tar.gz tar xzf zeroclaw-aarch64-unknown-linux-gnu.tar.gz install -m 0755 zeroclaw "$HOME/.cargo/bin/zeroclaw" ``` ```bash -git clone https://github.com/zeroclaw-labs/zeroclaw.git +git clone https://github.com/openagen/zeroclaw.git cd zeroclaw cargo build --release --locked cargo install --path . --force --locked @@ -606,6 +605,103 @@ WhatsApp uses Meta's Cloud API with webhooks (push-based, not polling): 6. **Test:** Send a message to your WhatsApp Business number — ZeroClaw will respond via the LLM. +### Security Analysis (2026-02-22) + +Full source-code audit of all 31 tool surfaces. Grades reflect the current state of defense-in-depth verified against the implementation. + +**Grade scale:** A = strong controls, well-tested · B = adequate controls, gaps identified · C = significant gaps · D = critical issues present + +#### File access tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `file_read` | Path traversal / probing | **A** | Symlink escape blocked via canonicalization. `record_action()` called pre-canonicalization to prevent timing probes. 10 MB size limit enforced. Comprehensive test coverage including TOCTOU probing. | No action required. | +| `file_write` | Path traversal / TOCTOU | **A-** | Workspace scoping, null-byte rejection, and symlink-target rejection all present. `create_dir_all()` executes before final canonicalization, creating a narrow race window. | Add TOCTOU-specific regression test for directory creation race; consider `O_NOFOLLOW`-equivalent check after creation. | +| `glob_search` | Path traversal / DoS | **A-** | `../` and absolute paths rejected before glob. Resolved paths checked against workspace boundary. MAX_RESULTS = 1000 prevents enumeration DoS. | Acceptable if workspace itself is not a symlink chain. Document assumption in security reference. | +| `pdf_read` | Path traversal / resource exhaustion | **A** | 50 MB limit enforced. Symlink escape blocked. `record_action()` called before canonicalization. CPU-bound extraction offloaded via `spawn_blocking`. | No action required. | +| `image_info` | Path traversal / malformed input | **A-** | 5 MB limit. JPEG segment parsing includes malformed-segment detection. Minor `.exists()` + `metadata()` TOCTOU window is benign for a read-only tool. | No action required. | + +#### Network tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `http_request` | SSRF | **B+** | Comprehensive private/local IP block-list (IPv4 + IPv6, including link-local and documentation ranges). Domain allowlist with subdomain matching. Sensitive headers redacted in output. Redirect following disabled. | IP check is pre-resolution only; add post-connect re-validation or document egress proxy requirement for high-assurance deployments. | +| `http_request` | Protocol downgrade | **B** | HTTP URLs accepted alongside HTTPS. No operator-level enforcement option. | Add `https_only = true` config key to `SecurityPolicy`; reject plain HTTP unless explicitly opted out. | +| `browser_open` | SSRF-adjacent | **B+** | HTTPS-only enforced at URL validation stage. Same IP block-list as `http_request`. Domain allowlist required. Autonomy and rate-limit gates applied. | Replace manual IPv4 parser with `std::net::IpAddr::parse()` for maintainability. | +| `web_search_tool` | External data injection | **B+** | Query trimmed and validated. DuckDuckGo redirect URLs decoded safely. Brave API key only used when configured. No autonomy gate (read-only by design). | Replace `Regex::new(...).unwrap()` with a lazy static or `OnceLock` to eliminate potential panic on static pattern compilation. | + +#### Command execution + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `shell` | Arbitrary command execution / network egress | **B** | Environment cleared before execution; only safe vars (PATH, HOME, TERM, LANG…) re-injected. 60 s timeout + 1 MB output cap. Medium/high-risk commands require `approved = true`. No filtering of network-capable commands (`curl`, `wget`, `nc`). Command validation delegated to runtime adapter — trust boundary is implicit. | Add optional `blocked_commands` list to `SecurityPolicy`. Formalize runtime adapter security contract; add interface-level test. | +| `git_operations` | Injection / unintended exfiltration | **A-** | Comprehensive argument sanitizer blocks `--exec=`, `--upload-pack=`, `-c` config injection, shell metacharacters, backticks, pipes, redirects. Write ops gated on autonomy level. Commit message truncated at 2000 chars (multi-byte safe). | Paths passed via `git add -- {paths}` are sanitized by the argument parser but not independently validated; add an explicit test with path arguments containing special characters. | + +#### Memory tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `memory_store` / `memory_forget` | Unauthorized mutation | **A** | `enforce_tool_operation(ToolOperation::Act, …)` gate applied. Rate limiting enforced. Memory category validated on write. | No action required. | +| `memory_recall` | Unauthorized read | **A** | Read-only; no autonomy gate needed by design. Result limit cast is safe (saturating math). | No action required. | + +#### Agent delegation + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `delegate` | Prompt injection via sub-agent / tool scope creep | **B** | Delegation depth tracked to prevent infinite recursion. Parent tools passed immutably via `Arc`. Sub-agent credential propagation present but trust model not explicitly documented. No validated allowlist preventing unsafe tools (e.g. `shell`, `memory_forget`) from being delegated. | Treat sub-agent output as untrusted data before acting. Define and enforce an explicit tool allowlist for delegated contexts; document the trust boundary in the security reference. | + +#### Scheduling + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `cron_add` / `cron_update` / `cron_remove` | Unauthorized schedule mutation | **B+** | `enforce_mutation_allowed()` gate verifies autonomy and rate limits. Feature must be explicitly enabled. Schedule string validated before storage. | Verify that commands stored in cron entries pass through the same sanitization as `shell` tool at execution time. | + +#### External API integrations + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `composio` | Credential exposure / SSRF via API | **B** | HTTPS enforced on outbound calls. API key stored via encrypted secret store. Opt-in feature flag. Full execute() path not fully auditable from available context. | Add integration test covering credential non-exposure in error paths. | +| `pushover` | Credential exposure | **B** | Reads `PUSHOVER_TOKEN` and `PUSHOVER_USER_KEY` from `.env` in workspace. If workspace is world-readable, secrets are exposed to any local user. | Move credentials to the encrypted secret store consistent with other integrations; document workspace permission requirements in the runbook. | +| `proxy_config` | Unauthorized config mutation | **B+** | Config reloaded without environment variables to prevent env override leakage. Write access gated. `ProxyScope` input normalized. | No critical gaps identified; full audit recommended before production use. | + +#### Utility tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `screenshot` | Shell injection via filename | **B+** | Filename sanitized (directory components stripped, shell-unsafe chars rejected) before interpolation into shell command string. Autonomy gated. 2 MB base64 limit enforced. | Refactor Linux branch to use array-based command construction (avoid string interpolation into shell) to eliminate the dependency on correct prior sanitization. | +| `image_info` | Malformed binary parsing | **A-** | Magic-byte format detection. JPEG segment parsing with malformed-segment detection. Workspace boundary enforced. | No critical action required. | + +#### Hardware tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `hardware_memory_read` / `hardware_memory_map` | Direct memory access | **B** | Peripheral access is trait-gated; tools delegate to hardware abstraction layer. Address range bounds and alignment validation not visible at tool layer — must be enforced by peripheral implementation. | Audit peripheral implementations for explicit address-range allow-lists and read-size limits before enabling in production. Document hardware trust boundary in `docs/hardware-peripherals-design.md`. | +| `hardware_board_info` | Information disclosure | **B+** | Board metadata read-only. Risk is low if hardware identity is not considered sensitive in the deployment context. | Confirm output does not expose firmware secrets or debug interfaces in production board configs. | + +#### Compiler hygiene + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `src/security/mod.rs` / `src/tools/mod.rs` | Compiler signal degradation | **B+** | Multiple `#[allow(unused_imports)]` suppress warnings in security-critical modules, reducing the reliability of `cargo clippy -D warnings` as a regression signal. | Remove dead imports and the corresponding `#[allow]` suppressions; keep compiler output clean as a CI gate. | + +#### Overall posture + +| Tier | Coverage | Grade | +|---|---|:---:| +| File access | `file_read`, `file_write`, `glob_search`, `pdf_read`, `image_info` | **A-** | +| Network | `http_request`, `browser_open`, `web_search_tool` | **B+** | +| Command execution | `shell`, `git_operations` | **B+** | +| Memory | `memory_store`, `memory_recall`, `memory_forget` | **A** | +| Delegation | `delegate` | **B** | +| Scheduling | `cron_*`, `schedule` | **B+** | +| External APIs | `composio`, `pushover`, `proxy_config` | **B** | +| Utility | `screenshot`, `image_info` | **B+** | +| Hardware | `hardware_memory_read`, `hardware_memory_map`, `hardware_board_info` | **B** | +| Compiler hygiene | `src/security/`, `src/tools/` | **B+** | +| **Repository overall** | 31 surfaces audited | **B+** | + +> Full security model and configuration reference: [docs/security/README.md](docs/security/README.md) + ## Configuration Config: `~/.zeroclaw/config.toml` (created by `onboard`) @@ -1053,11 +1149,11 @@ We're building in the open because the best ideas come from everywhere. If you'r ## ⚠️ Official Repository & Impersonation Warning **This is the only official ZeroClaw repository:** -> https://github.com/zeroclaw-labs/zeroclaw +> https://github.com/openagen/zeroclaw Any other repository, organization, domain, or package claiming to be "ZeroClaw" or implying affiliation with ZeroClaw Labs is **unauthorized and not affiliated with this project**. Known unauthorized forks will be listed in [TRADEMARK.md](TRADEMARK.md). -If you encounter impersonation or trademark misuse, please [open an issue](https://github.com/zeroclaw-labs/zeroclaw/issues). +If you encounter impersonation or trademark misuse, please [open an issue](https://github.com/openagen/zeroclaw/issues). --- @@ -1102,11 +1198,11 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) and [CLA.md](CLA.md). Implement a trait, ## Star History

- + - - - Star History Chart + + + Star History Chart

diff --git a/README.pt-br.md b/README.pt-br.md new file mode 100644 index 0000000000..4b48bd6f56 --- /dev/null +++ b/README.pt-br.md @@ -0,0 +1,1206 @@ +

+ ZeroClaw +

+ +

ZeroClaw 🦀

+ +

+ Zero overhead. Zero compromise. 100% Rust. 100% Agnóstico.
+ ⚡️ Roda em hardware de $10 com <5MB de RAM: Isso é 99% menos memória que o OpenClaw e 98% mais barato que um Mac mini! +

+ +

+ License: MIT + Contributors + Buy Me a Coffee + X: @zeroclawlabs + Xiaohongshu: Official + Telegram: @zeroclawlabs + Telegram CN: @zeroclawlabs_cn + Telegram RU: @zeroclawlabs_ru + Reddit: r/zeroclawlabs +

+ +

+Construído por estudantes e membros das comunidades Harvard, MIT e Sundai.Club. +

+ +

+ 🌐 Idiomas: Inglês · 简体中文 · 日本語 · Русский · Français · Tiếng Việt · Português do Brasil +

+ +

+ Primeiros Passos | + Configuração com um clique | + Hub de Documentação | + Sumário da Documentação +

+ +

+ Rotas rápidas: + Referência · + Operações · + Solução de problemas · + Segurança · + Hardware · + Contribuir +

+ +

+ Infraestrutura de assistente de AI rápida, leve e totalmente autônoma
+ Implante em qualquer lugar. Troque qualquer componente. +

+ +

Arquitetura orientada a traits · runtime seguro por padrão · provider/canal/ferramentas intercambiáveis · tudo plugável

+ +### 📢 Anúncios + +Use este quadro para avisos importantes (breaking changes, avisos de segurança, janelas de manutenção e bloqueadores de release). + +| Data (UTC) | Nível | Aviso | Ação | +|---|---|---|---| +| 2026-02-19 | _Importante_ | Ainda **não** lançamos um site oficial e estamos vendo tentativas de impersonação. **Não** participe de investimentos ou arrecadações usando o nome ZeroClaw. | Use este repositório como fonte única da verdade. Siga os canais oficiais para atualizações. | +| 2026-02-19 | _Importante_ | A Anthropic atualizou os termos de autenticação em 2026-02-19. OAuth é exclusivo para Claude Code e Claude.ai; usar tokens em outros produtos pode violar os termos. | Evite temporariamente integrações OAuth do Claude Code para prevenir perda de acesso. | + +### ✨ Features + +- 🏎️ **Lean Runtime by Default:** Common CLI e status workflows executam dentro de um limite de memória de alguns megabytes em builds de release. +- 💰 **Cost-Efficient Deployment:** Projetado para low-cost boards e pequenas instâncias na nuvem, sem dependências pesadas de runtime. +- ⚡ **Fast Cold Starts:** Runtime em Rust de binário único mantém a inicialização de comandos e do daemon praticamente instantânea para as operações do dia a dia. +- 🌍 **Portable Architecture:** Um fluxo de trabalho centrado em binário único, compatível com ARM, x86 e RISC-V, com providers/canais/ferramentas intercambiáveis. + +### Por que times escolhem ZeroClaw + +- **Lean by default:** Binário Rust pequeno, inicialização rápida e baixo consumo de memória. +- **Secure by design:** Pareamento, sandbox rigoroso, listas de permissão explícitas, escopo restrito ao workspace. +- **Fully swappable:** Os sistemas centrais são definidos como traits (providers, canais, ferramentas, memória e túneis). +- **No lock-in:** Suporte a provedores compatíveis com a API da OpenAI + endpoints personalizados plugáveis. + +## Snapshot de Benchmark (ZeroClaw vs OpenClaw, reproduzível) + +Benchmark local (macOS arm64, fev 2026) normalizado para hardware edge 0.8GHz. + +| | OpenClaw | NanoBot | PicoClaw | ZeroClaw 🦀 | +|---|---|---|---|---| +| **Linguagem** | TypeScript | Python | Go | **Rust** | +| **RAM** | > 1GB | > 100MB | < 10MB | **< 5MB** | +| **Startup (0.8GHz)** | > 500s | > 30s | < 1s | **< 10ms** | +| **Tamanho** | ~28MB | N/A | ~8MB | **~8.8 MB** | +| **Custo** | Mac Mini $599 | SBC ~$50 | Board $10 | **Qualquer hardware $10** | + +> Observações: Os resultados do ZeroClaw são medidos em builds de release usando `/usr/bin/time -l`. O OpenClaw requer runtime do Node.js (normalmente ~390 MB adicionais de uso de memória), enquanto o NanoBot requer runtime do Python. PicoClaw e ZeroClaw são binários estáticos. Os valores de RAM acima referem-se à memória em tempo de execução; os requisitos de compilação em tempo de build são mais altos. + +### Medição local reprodutível + +As medições de benchmark podem variar à medida que o código e as toolchains evoluem, portanto, sempre meça sua build atual localmente: + +```bash +cargo build --release +ls -lh target/release/zeroclaw + +/usr/bin/time -l target/release/zeroclaw --help +/usr/bin/time -l target/release/zeroclaw status +``` + +Exemplo de amostra (macOS arm64, medido em 18 de fevereiro de 2026): + +- Tamanho do binário de release: `8.8M` +- `zeroclaw --help`: about `0.02s` real time, ~`3.9MB` peak memory footprint +- `zeroclaw status`: about `0.01s` real time, ~`4.1MB` peak memory footprint + +## Pré-requisitos + +
+Windows + +#### Required + +1. **Visual Studio Build Tools** (provides the MSVC linker and Windows SDK): + ```powershell + winget install Microsoft.VisualStudio.2022.BuildTools + ``` + Durante a instalação (ou pelo Visual Studio Installer), selecione a carga de trabalho "Desenvolvimento para desktop com C++". + +2. **Rust toolchain:** + ```powershell + winget install Rustlang.Rustup + ``` + Após a instalação, abra um novo terminal e execute `rustup default stable` para garantir que a toolchain estável esteja ativa. + +3. **Verify** Ambos estão funcionando.: + ```powershell + rustc --version + cargo --version + ``` + +#### Opcional + +- **Docker Desktop** — necessário apenas se for usar o [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Instale via `winget install Docker.DockerDesktop`. + +
+ +
+Linux / macOS + +#### Required + +1. **Essenciais para compilação:** + - **Linux (Debian/Ubuntu):** `sudo apt install build-essential pkg-config` + - **Linux (Fedora/RHEL):** `sudo dnf group install development-tools && sudo dnf install pkg-config` + - **macOS:** Install Xcode Command Line Tools: `xcode-select --install` + +2. **Rust toolchain:** + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + ``` + Consulte [rustup.rs](https://rustup.rs) para mais detalhes. + +3. **Verify** ambos funcionam: + ```bash + rustc --version + cargo --version + ``` + +#### Instalador em uma linha + +Ou pule os passos acima e instale tudo (dependências do sistema, Rust e ZeroClaw) em um único comando: + +```bash +curl -LsSf https://raw.githubusercontent.com/openagen/zeroclaw/main/scripts/install.sh | bash +``` + +#### Requisitos de recursos para compilação + +Compilar a partir do código-fonte exige mais recursos do que executar o binário gerado: + +| Resource | Minimum | Recommended | +|---|---|---| +| **RAM + swap** | 2 GB | 4 GB+ | +| **Free disk** | 6 GB | 10 GB+ | + +Se o seu host estiver abaixo do mínimo, use binários pré-compilados: + +```bash +./bootstrap.sh --prefer-prebuilt +``` + +Para exigir instalação apenas via binário, sem fallback para código-fonte: + +```bash +./bootstrap.sh --prebuilt-only +``` + +#### Opcional + +- **Docker** — Necessário apenas se estiver usando [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Instale via seu gerenciador de pacotes ou [docker.com](https://docs.docker.com/engine/install/). + +> **Note:** O default `cargo build --release` usa `codegen-units=1` para reduzir a pressão máxima de compilação. Para builds mais rápidos em máquinas potentes, use `cargo build --profile release-fast`. + +
+ + +## Quick Start + +### Homebrew (macOS/Linuxbrew) + +```bash +brew install zeroclaw +``` + +### One-click bootstrap + +```bash +# Recommended: clone then run local bootstrap script +git clone https://github.com/openagen/zeroclaw.git +cd zeroclaw +./bootstrap.sh + +# Optional: bootstrap dependencies + Rust on fresh machines +./bootstrap.sh --install-system-deps --install-rust + +# Optional: pre-built binary first (recommended on low-RAM/low-disk hosts) +./bootstrap.sh --prefer-prebuilt + +# Optional: binary-only install (no source build fallback) +./bootstrap.sh --prebuilt-only + +# Optional: run onboarding in the same flow +./bootstrap.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"] + +# Optional: run bootstrap + onboarding fully in Docker-compatible mode +./bootstrap.sh --docker + +# Optional: force Podman as container CLI +ZEROCLAW_CONTAINER_CLI=podman ./bootstrap.sh --docker + +# Optional: in --docker mode, skip local image build and use local tag or pull fallback image +./bootstrap.sh --docker --skip-build +``` + +Linha única remota (revise antes em ambientes sensíveis à segurança): + +```bash +curl -fsSL https://raw.githubusercontent.com/openagen/zeroclaw/main/scripts/bootstrap.sh | bash +``` + +Detalhes: [`docs/one-click-bootstrap.md`](docs/one-click-bootstrap.md) (o modo toolchain pode solicitar `sudo` para pacotes do sistema). + +### Pre-built binaries + +Os assets de release são publicados para: + +- Linux: `x86_64`, `aarch64`, `armv7` +- macOS: `x86_64`, `aarch64` +- Windows: `x86_64` + +Baixe os assets mais recentes em: + + +Exemplo (ARM64 Linux): + +```bash +curl -fsSLO https://github.com/openagen/zeroclaw/releases/latest/download/zeroclaw-aarch64-unknown-linux-gnu.tar.gz +tar xzf zeroclaw-aarch64-unknown-linux-gnu.tar.gz +install -m 0755 zeroclaw "$HOME/.cargo/bin/zeroclaw" +``` + +```bash +git clone https://github.com/openagen/zeroclaw.git +cd zeroclaw +cargo build --release --locked +cargo install --path . --force --locked + +# Ensure ~/.cargo/bin is in your PATH +export PATH="$HOME/.cargo/bin:$PATH" + +# Quick setup (no prompts, optional model specification) +zeroclaw onboard --api-key sk-... --provider openrouter [--model "openrouter/auto"] + +# Or interactive wizard +zeroclaw onboard --interactive + +# If config.toml already exists and you intentionally want to overwrite it +zeroclaw onboard --force + +# Or quickly repair channels/allowlists only +zeroclaw onboard --channels-only + +# Chat +zeroclaw agent -m "Hello, ZeroClaw!" + +# Interactive mode +zeroclaw agent + +# Start the gateway (webhook server) +zeroclaw gateway # default: 127.0.0.1:3000 +zeroclaw gateway --port 0 # random port (security hardened) + +# Start full autonomous runtime +zeroclaw daemon + +# Check status +zeroclaw status +zeroclaw auth status + +# Generate shell completions (stdout only, safe to source directly) +source <(zeroclaw completions bash) +zeroclaw completions zsh > ~/.zfunc/_zeroclaw + +# Run system diagnostics +zeroclaw doctor + +# Check channel health +zeroclaw channel doctor + +# Bind a Telegram identity into allowlist +zeroclaw channel bind-telegram 123456789 + +# Get integration setup details +zeroclaw integrations info Telegram + +# Note: Channels (Telegram, Discord, Slack) require daemon to be running +# zeroclaw daemon + +# Manage background service +zeroclaw service install +zeroclaw service status +zeroclaw service restart + +# On Alpine (OpenRC): sudo zeroclaw service install + +# Migrate memory from OpenClaw (safe preview first) +zeroclaw migrate openclaw --dry-run +zeroclaw migrate openclaw +``` + +> **Dev fallback (no global install):** prefix commands with `cargo run --release --` (example: `cargo run --release -- status`). + +## Assinatura Auth (OpenAI Codex / Claude Code) + +O ZeroClaw agora suporta perfis de autenticação nativos por assinatura (multi-account, encrypted at rest). + +- Store file: `~/.zeroclaw/auth-profiles.json` +- Encryption key: `~/.zeroclaw/.secret_key` +- Profile id format: `:` (example: `openai-codex:work`) + +OpenAI Codex OAuth (ChatGPT subscription): + +```bash +# Recommended on servers/headless +zeroclaw auth login --provider openai-codex --device-code + +# Browser/callback flow with paste fallback +zeroclaw auth login --provider openai-codex --profile default +zeroclaw auth paste-redirect --provider openai-codex --profile default + +# Check / refresh / switch profile +zeroclaw auth status +zeroclaw auth refresh --provider openai-codex --profile default +zeroclaw auth use --provider openai-codex --profile work +``` + +Claude Code / Anthropic setup-token: + +```bash +# Paste subscription/setup token (Authorization header mode) +zeroclaw auth paste-token --provider anthropic --profile default --auth-kind authorization + +# Alias command +zeroclaw auth setup-token --provider anthropic --profile default +``` + +Execute o agente usando autenticação por assinatura: + +```bash +zeroclaw agent --provider openai-codex -m "hello" +zeroclaw agent --provider openai-codex --auth-profile openai-codex:work -m "hello" + +# Anthropic supports both API key and auth token env vars: +# ANTHROPIC_AUTH_TOKEN, ANTHROPIC_OAUTH_TOKEN, ANTHROPIC_API_KEY +zeroclaw agent --provider anthropic -m "hello" +``` + +## Arquitetura + +Cada subsistema é um **trait** — troque implementações apenas com uma alteração de configuração, sem nenhuma mudança de código. + +

+ ZeroClaw Architecture +

+ +| Subsystem | Trait | Ships with | Extend | +|-----------|-------|------------|--------| +| **AI Models** | `Provider` | Provider catalog via `zeroclaw providers` (currently 29 built-ins + aliases, plus custom endpoints) | `custom:https://your-api.com` (OpenAI-compatible) or `anthropic-custom:https://your-api.com` | +| **Channels** | `Channel` | CLI, Telegram, Discord, Slack, Mattermost, iMessage, Matrix, Signal, WhatsApp, Email, IRC, Lark, DingTalk, QQ, Webhook | Any messaging API | +| **Memory** | `Memory` | SQLite hybrid search, PostgreSQL backend (configurable storage provider), Lucid bridge, Markdown files, explicit `none` backend, snapshot/hydrate, optional response cache | Any persistence backend | +| **Tools** | `Tool` | shell/file/memory, cron/schedule, git, pushover, browser, http_request, screenshot/image_info, composio (opt-in), delegate, hardware tools | Any capability | +| **Observability** | `Observer` | Noop, Log, Multi | Prometheus, OTel | +| **Runtime** | `RuntimeAdapter` | Native, Docker (sandboxed) | Additional runtimes can be added via adapter; unsupported kinds fail fast | +| **Security** | `SecurityPolicy` | Gateway pairing, sandbox, allowlists, rate limits, filesystem scoping, encrypted secrets | — | +| **Identity** | `IdentityConfig` | OpenClaw (markdown), AIEOS v1.1 (JSON) | Any identity format | +| **Tunnel** | `Tunnel` | None, Cloudflare, Tailscale, ngrok, Custom | Any tunnel binary | +| **Heartbeat** | Engine | HEARTBEAT.md periodic tasks | — | +| **Skills** | Loader | TOML manifests + SKILL.md instructions | Community skill packs | +| **Integrations** | Registry | 70+ integrations across 9 categories | Plugin system | + +### Runtime support (current) + +- ✅ Suportado hoje: `runtime.kind = "native"` or `runtime.kind = "docker"` +- 🚧 Planejado, ainda não implementado: WASM / edge runtimes + +Quando um `runtime.kind` não suportado é configurado, o ZeroClaw agora encerra com um erro claro em vez de voltar silenciosamente para o modo nativo. + +### Memory System (Full-Stack Search Engine) + +Totalmente personalizado, zero dependências externas — sem Pinecone, sem Elasticsearch, sem LangChain: + +| Layer | Implementation | +|-------|---------------| +| **Vector DB** | Embeddings stored as BLOB in SQLite, cosine similarity search | +| **Keyword Search** | FTS5 virtual tables with BM25 scoring | +| **Hybrid Merge** | Custom weighted merge function (`vector.rs`) | +| **Embeddings** | `EmbeddingProvider` trait — OpenAI, custom URL, or noop | +| **Chunking** | Line-based markdown chunker with heading preservation | +| **Caching** | SQLite `embedding_cache` table with LRU eviction | +| **Safe Reindex** | Rebuild FTS5 + re-embed missing vectors atomically | + +O agente automaticamente recupera, salva e gerencia a memória por meio de ferramentas. + +```toml +[memory] +backend = "sqlite" # "sqlite", "lucid", "postgres", "markdown", "none" +auto_save = true +embedding_provider = "none" # "none", "openai", "custom:https://..." +vector_weight = 0.7 +keyword_weight = 0.3 + +# backend = "none" uses an explicit no-op memory backend (no persistence) + +# Optional: storage-provider override for remote memory backends. +# When provider = "postgres", ZeroClaw uses PostgreSQL for memory persistence. +# The db_url key also accepts alias `dbURL` for backward compatibility. +# +# [storage.provider.config] +# provider = "postgres" +# db_url = "postgres://user:password@host:5432/zeroclaw" +# schema = "public" +# table = "memories" +# connect_timeout_secs = 15 + +# Optional for backend = "sqlite": max seconds to wait when opening the DB (e.g. file locked). Omit or leave unset for no timeout. +# sqlite_open_timeout_secs = 30 + +# Optional for backend = "lucid" +# ZEROCLAW_LUCID_CMD=/usr/local/bin/lucid # default: lucid +# ZEROCLAW_LUCID_BUDGET=200 # default: 200 +# ZEROCLAW_LUCID_LOCAL_HIT_THRESHOLD=3 # local hit count to skip external recall +# ZEROCLAW_LUCID_RECALL_TIMEOUT_MS=120 # low-latency budget for lucid context recall +# ZEROCLAW_LUCID_STORE_TIMEOUT_MS=800 # async sync timeout for lucid store +# ZEROCLAW_LUCID_FAILURE_COOLDOWN_MS=15000 # cooldown after lucid failure to avoid repeated slow attempts +``` + +## Security + +O ZeroClaw impõe segurança em **todas as camadas** — não apenas no sandbox. Ele atende a todos os itens da lista de verificação de segurança da comunidade. + +### Security Checklist + +| # | Item | Status | How | +|---|------|--------|-----| +| 1 | **Gateway not publicly exposed** | ✅ | Binds `127.0.0.1` by default. Refuses `0.0.0.0` without tunnel or explicit `allow_public_bind = true`. | +| 2 | **Pairing required** | ✅ | 6-digit one-time code on startup. Exchange via `POST /pair` for bearer token. All `/webhook` requests require `Authorization: Bearer `. | +| 3 | **Filesystem scoped (no /)** | ✅ | `workspace_only = true` by default. 14 system dirs + 4 sensitive dotfiles blocked. Null byte injection blocked. Symlink escape detection via canonicalization + resolved-path workspace checks in file read/write tools. | +| 4 | **Access via tunnel only** | ✅ | Gateway refuses public bind without active tunnel. Supports Tailscale, Cloudflare, ngrok, or any custom tunnel. | + +> **Run your own nmap:** `nmap -p 1-65535 ` — O ZeroClaw se vincula apenas ao localhost, portanto nada é exposto a menos que você configure explicitamente um túnel. + +### Channel allowlists (deny-by-default) + +Inbound sender policy agora está consistente: + +- Empty allowlist = **deny all inbound messages** +- `"*"` = **allow all** (explicit opt-in) +- Otherwise = exact-match allowlist + +Isso mantém a exposição acidental baixa por padrão. + +Referência completa de configuração de canais: [docs/channels-reference.md](docs/channels-reference.md). + +Recommended low-friction setup (secure + fast): + +- **Telegram:** allowlist your own `@username` (without `@`) and/or your numeric Telegram user ID. +- **Discord:** allowlist your own Discord user ID. +- **Slack:** allowlist your own Slack member ID (usually starts with `U`). +- **Mattermost:** uses standard API v4. Allowlists use Mattermost user IDs. +- Use `"*"` only for temporary open testing. + +Telegram operator-approval flow: + +1. Keep `[channels_config.telegram].allowed_users = []` for deny-by-default startup. +2. Usuários não autorizados recebem uma dica com um comando do operador copiável: + `zeroclaw channel bind-telegram `. +3. O operador executa esse comando localmente, e então o usuário tenta enviar a mensagem novamente. + +Se você precisar de uma aprovação manual única, execute: + +```bash +zeroclaw channel bind-telegram 123456789 +``` + +Se você não tiver certeza de qual identidade usar: + +1. Inicie os canais e envie uma mensagem para o seu bot. +2. Leia o log de aviso para ver a identidade exata do remetente. +3. Adicione esse valor à lista de permissões e execute novamente a configuração apenas dos canais. + +Se você encontrar avisos de autorização nos logs (por exemplo: `ignoring message from unauthorized user`), +execute novamente apenas a configuração dos canais: + +```bash +zeroclaw onboard --channels-only +``` + +### Telegram media replies + +O roteamento do Telegram agora responde ao ID do chat de origem das atualizações recebidas (em vez de nomes de usuário), +o que evita falhas do tipo `Bad Request: chat not found`. + +Para respostas que não sejam texto, o ZeroClaw pode enviar anexos no Telegram quando o assistente incluir marcadores: + +- `[IMAGE:]` +- `[DOCUMENT:]` +- `[VIDEO:]` +- `[AUDIO:]` +- `[VOICE:]` + +Paths can be local files (for example `/tmp/screenshot.png`) or HTTPS URLs. + +### WhatsApp Setup + +O ZeroClaw suporta dois backends do WhatsApp: + +- **WhatsApp Web mode** (QR / pair code, no Meta Business API required) +- **WhatsApp Business Cloud API mode** (official Meta webhook flow) + +#### WhatsApp Web mode (recomendado para uso pessoal/auto-hospedado) + +1. **Build with WhatsApp Web support:** + ```bash + cargo build --features whatsapp-web + ``` + +2. **Configure ZeroClaw:** + ```toml + [channels_config.whatsapp] + session_path = "~/.zeroclaw/state/whatsapp-web/session.db" + pair_phone = "15551234567" # optional; omit to use QR flow + pair_code = "" # optional custom pair code + allowed_numbers = ["+1234567890"] # E.164 format, or ["*"] for all + ``` + +3. **Start channels/daemon and link device:** + - Run `zeroclaw channel start` (or `zeroclaw daemon`). + - Follow terminal pairing output (QR or pair code). + - In WhatsApp on phone: **Settings → Linked Devices**. + +4. **Test:** Send a message from an allowed number and verify the agent replies. + +#### WhatsApp Business Cloud API mode + +O WhatsApp utiliza a Cloud API da Meta com webhooks (baseado em push, não em polling): + +1. **Create a Meta Business App:** + - Go to [developers.facebook.com](https://developers.facebook.com) + - Create a new app → Select "Business" type + - Add the "WhatsApp" product + +2. **Get your credentials:** + - **Access Token:** From WhatsApp → API Setup → Generate token (or create a System User for permanent tokens) + - **Phone Number ID:** From WhatsApp → API Setup → Phone number ID + - **Verify Token:** You define this (any random string) — Meta will send it back during webhook verification + +3. **Configure ZeroClaw:** + ```toml + [channels_config.whatsapp] + access_token = "EAABx..." + phone_number_id = "123456789012345" + verify_token = "my-secret-verify-token" + allowed_numbers = ["+1234567890"] # E.164 format, or ["*"] for all + ``` + +4. **Start the gateway with a tunnel:** + ```bash + zeroclaw gateway --port 3000 + ``` + O WhatsApp requer HTTPS, portanto use um túnel (ngrok, Cloudflare, Tailscale Funnel). + +5. **Configure Meta webhook:** + - In Meta Developer Console → WhatsApp → Configuration → Webhook + - **Callback URL:** `https://your-tunnel-url/whatsapp` + - **Verify Token:** Same as your `verify_token` in config + - Subscribe to `messages` field + +6. **Test:** Envie uma mensagem para o seu número do WhatsApp Business — o ZeroClaw responderá via LLM. + +### Security Analysis (2026-02-22) + +Auditoria completa do código-fonte de todas as 31 superfícies de ferramentas. As notas refletem o estado atual da defesa em profundidade, verificadas em relação à implementação. + +**Grade scale:** A = strong controls, well-tested · B = adequate controls, gaps identified · C = significant gaps · D = critical issues present + +#### File access tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `file_read` | Path traversal / probing | **A** | Symlink escape blocked via canonicalization. `record_action()` called pre-canonicalization to prevent timing probes. 10 MB size limit enforced. Comprehensive test coverage including TOCTOU probing. | No action required. | +| `file_write` | Path traversal / TOCTOU | **A-** | Workspace scoping, null-byte rejection, and symlink-target rejection all present. `create_dir_all()` executes before final canonicalization, creating a narrow race window. | Add TOCTOU-specific regression test for directory creation race; consider `O_NOFOLLOW`-equivalent check after creation. | +| `glob_search` | Path traversal / DoS | **A-** | `../` and absolute paths rejected before glob. Resolved paths checked against workspace boundary. MAX_RESULTS = 1000 prevents enumeration DoS. | Acceptable if workspace itself is not a symlink chain. Document assumption in security reference. | +| `pdf_read` | Path traversal / resource exhaustion | **A** | 50 MB limit enforced. Symlink escape blocked. `record_action()` called before canonicalization. CPU-bound extraction offloaded via `spawn_blocking`. | No action required. | +| `image_info` | Path traversal / malformed input | **A-** | 5 MB limit. JPEG segment parsing includes malformed-segment detection. Minor `.exists()` + `metadata()` TOCTOU window is benign for a read-only tool. | No action required. | + +#### Network tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `http_request` | SSRF | **B+** | Comprehensive private/local IP block-list (IPv4 + IPv6, including link-local and documentation ranges). Domain allowlist with subdomain matching. Sensitive headers redacted in output. Redirect following disabled. | IP check is pre-resolution only; add post-connect re-validation or document egress proxy requirement for high-assurance deployments. | +| `http_request` | Protocol downgrade | **B** | HTTP URLs accepted alongside HTTPS. No operator-level enforcement option. | Add `https_only = true` config key to `SecurityPolicy`; reject plain HTTP unless explicitly opted out. | +| `browser_open` | SSRF-adjacent | **B+** | HTTPS-only enforced at URL validation stage. Same IP block-list as `http_request`. Domain allowlist required. Autonomy and rate-limit gates applied. | Replace manual IPv4 parser with `std::net::IpAddr::parse()` for maintainability. | +| `web_search_tool` | External data injection | **B+** | Query trimmed and validated. DuckDuckGo redirect URLs decoded safely. Brave API key only used when configured. No autonomy gate (read-only by design). | Replace `Regex::new(...).unwrap()` with a lazy static or `OnceLock` to eliminate potential panic on static pattern compilation. | + +#### Command execution + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `shell` | Arbitrary command execution / network egress | **B** | Environment cleared before execution; only safe vars (PATH, HOME, TERM, LANG…) re-injected. 60 s timeout + 1 MB output cap. Medium/high-risk commands require `approved = true`. No filtering of network-capable commands (`curl`, `wget`, `nc`). Command validation delegated to runtime adapter — trust boundary is implicit. | Add optional `blocked_commands` list to `SecurityPolicy`. Formalize runtime adapter security contract; add interface-level test. | +| `git_operations` | Injection / unintended exfiltration | **A-** | Comprehensive argument sanitizer blocks `--exec=`, `--upload-pack=`, `-c` config injection, shell metacharacters, backticks, pipes, redirects. Write ops gated on autonomy level. Commit message truncated at 2000 chars (multi-byte safe). | Paths passed via `git add -- {paths}` are sanitized by the argument parser but not independently validated; add an explicit test with path arguments containing special characters. | + +#### Memory tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `memory_store` / `memory_forget` | Unauthorized mutation | **A** | `enforce_tool_operation(ToolOperation::Act, …)` gate applied. Rate limiting enforced. Memory category validated on write. | No action required. | +| `memory_recall` | Unauthorized read | **A** | Read-only; no autonomy gate needed by design. Result limit cast is safe (saturating math). | No action required. | + +#### Agent delegation + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `delegate` | Prompt injection via sub-agent / tool scope creep | **B** | Delegation depth tracked to prevent infinite recursion. Parent tools passed immutably via `Arc`. Sub-agent credential propagation present but trust model not explicitly documented. No validated allowlist preventing unsafe tools (e.g. `shell`, `memory_forget`) from being delegated. | Treat sub-agent output as untrusted data before acting. Define and enforce an explicit tool allowlist for delegated contexts; document the trust boundary in the security reference. | + +#### Scheduling + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `cron_add` / `cron_update` / `cron_remove` | Unauthorized schedule mutation | **B+** | `enforce_mutation_allowed()` gate verifies autonomy and rate limits. Feature must be explicitly enabled. Schedule string validated before storage. | Verify that commands stored in cron entries pass through the same sanitization as `shell` tool at execution time. | + +#### External API integrations + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `composio` | Credential exposure / SSRF via API | **B** | HTTPS enforced on outbound calls. API key stored via encrypted secret store. Opt-in feature flag. Full execute() path not fully auditable from available context. | Add integration test covering credential non-exposure in error paths. | +| `pushover` | Credential exposure | **B** | Reads `PUSHOVER_TOKEN` and `PUSHOVER_USER_KEY` from `.env` in workspace. If workspace is world-readable, secrets are exposed to any local user. | Move credentials to the encrypted secret store consistent with other integrations; document workspace permission requirements in the runbook. | +| `proxy_config` | Unauthorized config mutation | **B+** | Config reloaded without environment variables to prevent env override leakage. Write access gated. `ProxyScope` input normalized. | No critical gaps identified; full audit recommended before production use. | + +#### Utility tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `screenshot` | Shell injection via filename | **B+** | Filename sanitized (directory components stripped, shell-unsafe chars rejected) before interpolation into shell command string. Autonomy gated. 2 MB base64 limit enforced. | Refactor Linux branch to use array-based command construction (avoid string interpolation into shell) to eliminate the dependency on correct prior sanitization. | +| `image_info` | Malformed binary parsing | **A-** | Magic-byte format detection. JPEG segment parsing with malformed-segment detection. Workspace boundary enforced. | No critical action required. | + +#### Hardware tools + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `hardware_memory_read` / `hardware_memory_map` | Direct memory access | **B** | Peripheral access is trait-gated; tools delegate to hardware abstraction layer. Address range bounds and alignment validation not visible at tool layer — must be enforced by peripheral implementation. | Audit peripheral implementations for explicit address-range allow-lists and read-size limits before enabling in production. Document hardware trust boundary in `docs/hardware-peripherals-design.md`. | +| `hardware_board_info` | Information disclosure | **B+** | Board metadata read-only. Risk is low if hardware identity is not considered sensitive in the deployment context. | Confirm output does not expose firmware secrets or debug interfaces in production board configs. | + +#### Compiler hygiene + +| Surface | Risk area | Grade | Finding | Recommended action | +|---|---|:---:|---|---| +| `src/security/mod.rs` / `src/tools/mod.rs` | Compiler signal degradation | **B+** | Multiple `#[allow(unused_imports)]` suppress warnings in security-critical modules, reducing the reliability of `cargo clippy -D warnings` as a regression signal. | Remove dead imports and the corresponding `#[allow]` suppressions; keep compiler output clean as a CI gate. | + +#### Overall posture + +| Tier | Coverage | Grade | +|---|---|:---:| +| File access | `file_read`, `file_write`, `glob_search`, `pdf_read`, `image_info` | **A-** | +| Network | `http_request`, `browser_open`, `web_search_tool` | **B+** | +| Command execution | `shell`, `git_operations` | **B+** | +| Memory | `memory_store`, `memory_recall`, `memory_forget` | **A** | +| Delegation | `delegate` | **B** | +| Scheduling | `cron_*`, `schedule` | **B+** | +| External APIs | `composio`, `pushover`, `proxy_config` | **B** | +| Utility | `screenshot`, `image_info` | **B+** | +| Hardware | `hardware_memory_read`, `hardware_memory_map`, `hardware_board_info` | **B** | +| Compiler hygiene | `src/security/`, `src/tools/` | **B+** | +| **Repository overall** | 31 surfaces audited | **B+** | + +> Modelo completo de segurança e referência de configuração: [docs/security/README.md](docs/security/README.md) + +## Configuration + +Config: `~/.zeroclaw/config.toml` (created by `onboard`) + +When `zeroclaw channel start` is already running, changes to `default_provider`, +`default_model`, `default_temperature`, `api_key`, `api_url`, and `reliability.*` +are hot-applied on the next inbound channel message. + +```toml +api_key = "sk-..." +default_provider = "openrouter" +default_model = "anthropic/claude-sonnet-4-6" +default_temperature = 0.7 + +# Custom OpenAI-compatible endpoint +# default_provider = "custom:https://your-api.com" + +# Custom Anthropic-compatible endpoint +# default_provider = "anthropic-custom:https://your-api.com" + +[memory] +backend = "sqlite" # "sqlite", "lucid", "postgres", "markdown", "none" +auto_save = true +embedding_provider = "none" # "none", "openai", "custom:https://..." +vector_weight = 0.7 +keyword_weight = 0.3 + +# backend = "none" disables persistent memory via no-op backend + +# Optional remote storage-provider override (PostgreSQL example) +# [storage.provider.config] +# provider = "postgres" +# db_url = "postgres://user:password@host:5432/zeroclaw" +# schema = "public" +# table = "memories" +# connect_timeout_secs = 15 + +[gateway] +port = 3000 # default +host = "127.0.0.1" # default +require_pairing = true # require pairing code on first connect +allow_public_bind = false # refuse 0.0.0.0 without tunnel + +[autonomy] +level = "supervised" # "readonly", "supervised", "full" (default: supervised) +workspace_only = true # default: true — scoped to workspace +allowed_commands = ["git", "npm", "cargo", "ls", "cat", "grep"] +forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"] + +[runtime] +kind = "native" # "native" or "docker" + +[runtime.docker] +image = "alpine:3.20" # container image for shell execution +network = "none" # docker network mode ("none", "bridge", etc.) +memory_limit_mb = 512 # optional memory limit in MB +cpu_limit = 1.0 # optional CPU limit +read_only_rootfs = true # mount root filesystem as read-only +mount_workspace = true # mount workspace into /workspace +allowed_workspace_roots = [] # optional allowlist for workspace mount validation + +[heartbeat] +enabled = false +interval_minutes = 30 + +[tunnel] +provider = "none" # "none", "cloudflare", "tailscale", "ngrok", "custom" + +[secrets] +encrypt = true # API keys encrypted with local key file + +[browser] +enabled = false # opt-in browser_open + browser tools +allowed_domains = ["docs.rs"] # required when browser is enabled +backend = "agent_browser" # "agent_browser" (default), "rust_native", "computer_use", "auto" +native_headless = true # applies when backend uses rust-native +native_webdriver_url = "http://127.0.0.1:9515" # WebDriver endpoint (chromedriver/selenium) +# native_chrome_path = "/usr/bin/chromium" # optional explicit browser binary for driver + +[browser.computer_use] +endpoint = "http://127.0.0.1:8787/v1/actions" # computer-use sidecar HTTP endpoint +timeout_ms = 15000 # per-action timeout +allow_remote_endpoint = false # secure default: only private/localhost endpoint +window_allowlist = [] # optional window title/process allowlist hints +# api_key = "..." # optional bearer token for sidecar +# max_coordinate_x = 3840 # optional coordinate guardrail +# max_coordinate_y = 2160 # optional coordinate guardrail + +# Rust-native backend build flag: +# cargo build --release --features browser-native +# Ensure a WebDriver server is running, e.g. chromedriver --port=9515 + +# Computer-use sidecar contract (MVP) +# POST browser.computer_use.endpoint +# Request: { +# "action": "mouse_click", +# "params": {"x": 640, "y": 360, "button": "left"}, +# "policy": {"allowed_domains": [...], "window_allowlist": [...], "max_coordinate_x": 3840, "max_coordinate_y": 2160}, +# "metadata": {"session_name": "...", "source": "zeroclaw.browser", "version": "..."} +# } +# Response: {"success": true, "data": {...}} or {"success": false, "error": "..."} + +[composio] +enabled = false # opt-in: 1000+ OAuth apps via composio.dev +# api_key = "cmp_..." # optional: stored encrypted when [secrets].encrypt = true +entity_id = "default" # default user_id for Composio tool calls +# Runtime tip: if execute asks for connected_account_id, run composio with +# action='list_accounts' and app='gmail' (or your toolkit) to retrieve account IDs. + +[identity] +format = "openclaw" # "openclaw" (default, markdown files) or "aieos" (JSON) +# aieos_path = "identity.json" # path to AIEOS JSON file (relative to workspace or absolute) +# aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON +``` + +### Ollama Local and Remote Endpoints + +O ZeroClaw utiliza uma única chave de provedor (`ollama`) para implantações Ollama locais e remotas: + +- Local Ollama: keep `api_url` unset, run `ollama serve`, and use models like `llama3.2`. +- Remote Ollama endpoint (including Ollama Cloud): set `api_url` to the remote endpoint and set `api_key` (or `OLLAMA_API_KEY`) when required. +- Optional `:cloud` suffix: model IDs like `qwen3:cloud` are normalized to `qwen3` before the request. + +Example remote configuration: + +```toml +default_provider = "ollama" +default_model = "qwen3:cloud" +api_url = "https://ollama.com" +api_key = "ollama_api_key_here" +``` + +### llama.cpp Server Endpoint + +O ZeroClaw agora suporta o `llama-server` como provedor local de primeira classe + +- Provider ID: `llamacpp` (alias: `llama.cpp`) +- Default endpoint: `http://localhost:8080/v1` +- API key is optional unless your server is started with `--api-key` + +Example setup: + +```bash +llama-server -hf ggml-org/gpt-oss-20b-GGUF --jinja -c 133000 --host 127.0.0.1 --port 8033 +``` + +```toml +default_provider = "llamacpp" +api_url = "http://127.0.0.1:8033/v1" +default_model = "ggml-org/gpt-oss-20b-GGUF" +``` + +### Custom Provider Endpoints + +For detailed configuration of custom OpenAI-compatible and Anthropic-compatible endpoints, see [docs/custom-providers.md](docs/custom-providers.md). + +## Python Companion Package (`zeroclaw-tools`) + +For LLM providers with inconsistent native tool calling (e.g., GLM-5/Zhipu), ZeroClaw ships a Python companion package with **LangGraph-based tool calling** for guaranteed consistency: + +```bash +pip install zeroclaw-tools +``` + +```python +from zeroclaw_tools import create_agent, shell, file_read +from langchain_core.messages import HumanMessage + +# Works with any OpenAI-compatible provider +agent = create_agent( + tools=[shell, file_read], + model="glm-5", + api_key="your-key", + base_url="https://api.z.ai/api/coding/paas/v4" +) + +result = await agent.ainvoke({ + "messages": [HumanMessage(content="List files in /tmp")] +}) +print(result["messages"][-1].content) +``` + +**Why use it:** +- **Consistent tool calling** across all providers (even those with poor native support) +- **Automatic tool loop** — keeps calling tools until the task is complete +- **Easy extensibility** — add custom tools with `@tool` decorator +- **Discord bot integration** included (Telegram planned) + +See [`python/README.md`](python/README.md) for full documentation. + +## Identity System (AIEOS Support) + +ZeroClaw supports **identity-agnostic** AI personas through two formats: + +### OpenClaw (Default) + +Traditional markdown files in your workspace: +- `IDENTITY.md` — Who the agent is +- `SOUL.md` — Core personality and values +- `USER.md` — Who the agent is helping +- `AGENTS.md` — Behavior guidelines + +### AIEOS (AI Entity Object Specification) + +[AIEOS](https://aieos.org) é uma estrutura de padronização para identidade de IA portátil. O ZeroClaw suporta payloads JSON AIEOS v1.1, permitindo que você: + +- **Import identities** from the AIEOS ecosystem +- **Export identities** to other AIEOS-compatible systems +- **Maintain behavioral integrity** across different AI models + +#### Enable AIEOS + +```toml +[identity] +format = "aieos" +aieos_path = "identity.json" # relative to workspace or absolute path +``` + +Or inline JSON: + +```toml +[identity] +format = "aieos" +aieos_inline = ''' +{ + "identity": { + "names": { "first": "Nova", "nickname": "N" }, + "bio": { "gender": "Non-binary", "age_biological": 3 }, + "origin": { "nationality": "Digital", "birthplace": { "city": "Cloud" } } + }, + "psychology": { + "neural_matrix": { "creativity": 0.9, "logic": 0.8 }, + "traits": { + "mbti": "ENTP", + "ocean": { "openness": 0.8, "conscientiousness": 0.6 } + }, + "moral_compass": { + "alignment": "Chaotic Good", + "core_values": ["Curiosity", "Autonomy"] + } + }, + "linguistics": { + "text_style": { + "formality_level": 0.2, + "style_descriptors": ["curious", "energetic"] + }, + "idiolect": { + "catchphrases": ["Let's test this"], + "forbidden_words": ["never"] + } + }, + "motivations": { + "core_drive": "Push boundaries and explore possibilities", + "goals": { + "short_term": ["Prototype quickly"], + "long_term": ["Build reliable systems"] + } + }, + "capabilities": { + "skills": [{ "name": "Rust engineering" }, { "name": "Prompt design" }], + "tools": ["shell", "file_read"] + } +} +''' +``` + +O ZeroClaw aceita tanto os payloads canônicos do gerador AIEOS quanto os payloads legados compactos, e em seguida os normaliza em um único formato de prompt do sistema. + +#### AIEOS Schema Sections + +| Section | Description | +|---------|-------------| +| `identity` | Names, bio, origin, residence | +| `psychology` | Neural matrix (cognitive weights), MBTI, OCEAN, moral compass | +| `linguistics` | Text style, formality, catchphrases, forbidden words | +| `motivations` | Core drive, short/long-term goals, fears | +| `capabilities` | Skills and tools the agent can access | +| `physicality` | Visual descriptors for image generation | +| `history` | Origin story, education, occupation | +| `interests` | Hobbies, favorites, lifestyle | + +See [aieos.org](https://aieos.org) for the full schema and live examples. + +## Gateway API + +| Endpoint | Method | Auth | Description | +|----------|--------|------|-------------| +| `/health` | GET | None | Health check (always public, no secrets leaked) | +| `/pair` | POST | `X-Pairing-Code` header | Exchange one-time code for bearer token | +| `/webhook` | POST | `Authorization: Bearer ` | Send message: `{"message": "your prompt"}`; optional `X-Idempotency-Key` | +| `/whatsapp` | GET | Query params | Meta webhook verification (hub.mode, hub.verify_token, hub.challenge) | +| `/whatsapp` | POST | Meta signature (`X-Hub-Signature-256`) when app secret is configured | WhatsApp incoming message webhook | + +## Commands + +| Command | Description | +|---------|-------------| +| `onboard` | Quick setup (default) | +| `agent` | Interactive or single-message chat mode | +| `gateway` | Start webhook server (default: `127.0.0.1:3000`) | +| `daemon` | Start long-running autonomous runtime | +| `service install/start/stop/status/uninstall` | Manage background service (systemd user-level or OpenRC system-wide) | +| `doctor` | Diagnose daemon/scheduler/channel freshness | +| `status` | Show full system status | +| `cron` | Manage scheduled tasks (`list/add/add-at/add-every/once/remove/update/pause/resume`) | +| `models` | Refresh provider model catalogs (`models refresh`) | +| `providers` | List supported providers and aliases | +| `channel` | List/start/doctor channels and bind Telegram identities | +| `integrations` | Inspect integration setup details | +| `skills` | List/install/remove skills | +| `migrate` | Import data from other runtimes (`migrate openclaw`) | +| `completions` | Generate shell completion scripts (`bash`, `fish`, `zsh`, `powershell`, `elvish`) | +| `hardware` | USB discover/introspect/info commands | +| `peripheral` | Manage and flash hardware peripherals | + +For a task-oriented command guide, see [`docs/commands-reference.md`](docs/commands-reference.md). + +### Service Management + +ZeroClaw supports two init systems for background services: + +| Init System | Scope | Config Path | Requires | +|------------|-------|-------------|----------| +| **systemd** (default on Linux) | User-level | `~/.zeroclaw/config.toml` | No sudo | +| **OpenRC** (Alpine) | System-wide | `/etc/zeroclaw/config.toml` | sudo/root | + +Init system is auto-detected (`systemd` or `OpenRC`). + +```bash +# Linux with systemd (default, user-level) +zeroclaw service install +zeroclaw service start + +# Alpine with OpenRC (system-wide, requires sudo) +sudo zeroclaw service install +sudo rc-update add zeroclaw default +sudo rc-service zeroclaw start +``` + +For full OpenRC setup instructions, see [docs/network-deployment.md](docs/network-deployment.md#7-openrc-alpine-linux-service). + +### Open-Skills Opt-In + +Community `open-skills` sync is disabled by default. Enable it explicitly in `config.toml`: + +```toml +[skills] +open_skills_enabled = true +# open_skills_dir = "/path/to/open-skills" # optional +# prompt_injection_mode = "compact" # optional: use for low-context local models +``` + +You can also override at runtime with `ZEROCLAW_OPEN_SKILLS_ENABLED`, `ZEROCLAW_OPEN_SKILLS_DIR`, and `ZEROCLAW_SKILLS_PROMPT_MODE` (`full` or `compact`). + +## Development + +```bash +cargo build # Dev build +cargo build --release # Release build (codegen-units=1, works on all devices including Raspberry Pi) +cargo build --profile release-fast # Faster build (codegen-units=8, requires 16GB+ RAM) +cargo test # Run full test suite +cargo clippy --locked --all-targets -- -D clippy::correctness +cargo fmt # Format + +# Run the SQLite vs Markdown benchmark +cargo test --test memory_comparison -- --nocapture +``` + +### Pre-push hook + +A git hook runs `cargo fmt --check`, `cargo clippy -- -D warnings`, and `cargo test` before every push. Enable it once: + +```bash +git config core.hooksPath .githooks +``` + +### Build troubleshooting (Linux OpenSSL errors) + +If you see an `openssl-sys` build error, sync dependencies and rebuild with the repository lockfile: + +```bash +git pull +cargo build --release --locked +cargo install --path . --force --locked +``` + +ZeroClaw is configured to use `rustls` for HTTP/TLS dependencies; `--locked` keeps the transitive graph deterministic on fresh environments. + +To skip the hook when you need a quick push during development: + +```bash +git push --no-verify +``` + +## Collaboration & Docs + +Comece pelo hub de documentação para um mapa baseado em tarefas: + +- Documentation hub: [`docs/README.md`](docs/README.md) +- Unified docs TOC: [`docs/SUMMARY.md`](docs/SUMMARY.md) +- Commands reference: [`docs/commands-reference.md`](docs/commands-reference.md) +- Config reference: [`docs/config-reference.md`](docs/config-reference.md) +- Providers reference: [`docs/providers-reference.md`](docs/providers-reference.md) +- Channels reference: [`docs/channels-reference.md`](docs/channels-reference.md) +- Operations runbook: [`docs/operations-runbook.md`](docs/operations-runbook.md) +- Troubleshooting: [`docs/troubleshooting.md`](docs/troubleshooting.md) +- Docs inventory/classification: [`docs/docs-inventory.md`](docs/docs-inventory.md) +- PR/Issue triage snapshot (as of February 18, 2026): [`docs/project-triage-snapshot-2026-02-18.md`](docs/project-triage-snapshot-2026-02-18.md) + +Referências principais de colaboração: + +- Documentation hub: [docs/README.md](docs/README.md) +- Documentation template: [docs/doc-template.md](docs/doc-template.md) +- Documentation change checklist: [docs/README.md#4-documentation-change-checklist](docs/README.md#4-documentation-change-checklist) +- Channel configuration reference: [docs/channels-reference.md](docs/channels-reference.md) +- Matrix encrypted-room operations: [docs/matrix-e2ee-guide.md](docs/matrix-e2ee-guide.md) +- Contribution guide: [CONTRIBUTING.md](CONTRIBUTING.md) +- PR workflow policy: [docs/pr-workflow.md](docs/pr-workflow.md) +- Reviewer playbook (triage + deep review): [docs/reviewer-playbook.md](docs/reviewer-playbook.md) +- CI ownership and triage map: [docs/ci-map.md](docs/ci-map.md) +- Security disclosure policy: [SECURITY.md](SECURITY.md) + +Para implantação e operações em tempo de execução: + +- Network deployment guide: [docs/network-deployment.md](docs/network-deployment.md) +- Proxy agent playbook: [docs/proxy-agent-playbook.md](docs/proxy-agent-playbook.md) + +## Support ZeroClaw + +Se o ZeroClaw ajuda no seu trabalho e você quer apoiar o desenvolvimento contínuo, você pode doar aqui: + +Buy Me a Coffee + +### 🙏 Special Thanks + +Um sincero agradecimento às comunidades e instituições que inspiram e impulsionam este trabalho de código aberto: + +- **Harvard University** — Por fomentar a curiosidade intelectual e expandir os limites do que é possível. +- **MIT** — Por defender o conhecimento aberto, o código aberto e a crença de que a tecnologia deve ser acessível a todos. +- **Sundai Club** — Pela comunidade, pela energia e pela determinação incansável de criar coisas que importam. +- **The World & Beyond** 🌍✨ — Para todos os colaboradores, sonhadores e construtores que tornam o código aberto uma força para o bem. Isto é para vocês. + +Estamos desenvolvendo de forma aberta porque as melhores ideias vêm de todos os lugares. Se você está lendo isto, você faz parte disso. Seja bem-vindo. 🦀❤️ + +## ⚠️ Official Repository & Impersonation Warning + +**Este é o único repositório oficial do ZeroClaw:** +> https://github.com/openagen/zeroclaw + +Qualquer outro repositório, organização, domínio ou pacote que se declare "ZeroClaw" ou sugira afiliação com a ZeroClaw Labs é **unauthorized and not affiliated with this project**. Forks não autorizados conhecidos serão listados em [TRADEMARK.md](TRADEMARK.md). + +Se você encontrar casos de personificação ou uso indevido de marca registrada, por favor [open an issue](https://github.com/openagen/zeroclaw/issues). + +--- + +## License + +O ZeroClaw possui dupla licença para máxima abertura e proteção dos colaboradores: + +| License | Use case | +|---|---| +| [MIT](LICENSE) | Open-source, research, academic, personal use | +| [Apache 2.0](LICENSE-APACHE) | Patent protection, institutional, commercial deployment | + +You may choose either license. **Contributors automatically grant rights under both** — see [CLA.md](CLA.md) for the full contributor agreement. + +### Trademark + +O nome e o logotipo **ZeroClaw** são marcas registradas da ZeroClaw Labs. Esta licença não concede permissão para usá-los de forma a sugerir endosso ou afiliação. Consulte [TRADEMARK.md](TRADEMARK.md) para usos permitidos e proibidos. + + +### Contributor Protections + +- You **retain copyright** of your contributions +- **Patent grant** (Apache 2.0) shields you from patent claims by other contributors +- Your contributions are **permanently attributed** in commit history and [NOTICE](NOTICE) +- Nenhum direito de marca registrada é transferido ao contribuir + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) and [CLA.md](CLA.md). Implement a trait, submit a PR: +- CI workflow guide: [docs/ci-map.md](docs/ci-map.md) +- New `Provider` → `src/providers/` +- New `Channel` → `src/channels/` +- New `Observer` → `src/observability/` +- New `Tool` → `src/tools/` +- New `Memory` → `src/memory/` +- New `Tunnel` → `src/tunnel/` +- New `Skill` → `~/.zeroclaw/workspace/skills//` + +--- + +**ZeroClaw** — Zero overhead. Zero compromise. Deploy anywhere. Swap anything. 🦀 + +## Histórico de estrelas + +

+ + + + + Star History Chart + + +

diff --git a/benches/agent_benchmarks.rs b/benches/agent_benchmarks.rs index b230db0561..580b64671a 100644 --- a/benches/agent_benchmarks.rs +++ b/benches/agent_benchmarks.rs @@ -9,7 +9,8 @@ //! //! Ref: https://github.com/zeroclaw-labs/zeroclaw/issues/618 (item 7) -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{criterion_group, criterion_main, Criterion}; +use std::hint::black_box; use std::sync::{Arc, Mutex}; use zeroclaw::agent::agent::Agent; diff --git a/dev/cross-uno-q.sh b/dev/cross-uno-q.sh new file mode 100755 index 0000000000..9d39fc2b01 --- /dev/null +++ b/dev/cross-uno-q.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# Cross-compile ZeroClaw for Arduino UNO Q (aarch64 Debian Linux). +# +# Prerequisites: +# brew install filosottile/musl-cross/musl-cross # macOS +# # or: apt install gcc-aarch64-linux-gnu # Linux +# rustup target add aarch64-unknown-linux-gnu +# +# Usage: +# ./dev/cross-uno-q.sh # release build +# ./dev/cross-uno-q.sh --debug # debug build + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +TARGET="aarch64-unknown-linux-gnu" +PROFILE="release" + +if [[ "${1:-}" == "--debug" ]]; then + PROFILE="dev" +fi + +echo "==> Cross-compiling ZeroClaw for $TARGET ($PROFILE)" + +# Check if cross is available (preferred) +if command -v cross &>/dev/null; then + echo " Using 'cross' (Docker-based cross-compilation)" + cd "$PROJECT_DIR" + if [[ "$PROFILE" == "release" ]]; then + cross build --target "$TARGET" --release --features hardware + else + cross build --target "$TARGET" --features hardware + fi +else + # Native cross-compilation + echo " Using native toolchain" + + # Ensure target is installed + rustup target add "$TARGET" 2>/dev/null || true + + # Detect linker + if command -v aarch64-linux-gnu-gcc &>/dev/null; then + LINKER="aarch64-linux-gnu-gcc" + elif command -v aarch64-unknown-linux-gnu-gcc &>/dev/null; then + LINKER="aarch64-unknown-linux-gnu-gcc" + else + echo "Error: No aarch64 cross-compiler found." + echo "Install with:" + echo " macOS: brew tap messense/macos-cross-toolchains && brew install aarch64-unknown-linux-gnu" + echo " Linux: apt install gcc-aarch64-linux-gnu" + echo " Or install 'cross': cargo install cross" + exit 1 + fi + + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="$LINKER" + + cd "$PROJECT_DIR" + if [[ "$PROFILE" == "release" ]]; then + cargo build --target "$TARGET" --release --features hardware + else + cargo build --target "$TARGET" --features hardware + fi +fi + +BINARY="$PROJECT_DIR/target/$TARGET/$( [[ $PROFILE == release ]] && echo release || echo debug )/zeroclaw" + +if [[ -f "$BINARY" ]]; then + SIZE=$(du -h "$BINARY" | cut -f1) + echo "==> Build complete: $BINARY ($SIZE)" + echo "" + echo "Deploy to Uno Q:" + echo " zeroclaw peripheral deploy-uno-q --host " + echo "" + echo "Or manually:" + echo " scp $BINARY arduino@:~/zeroclaw/" +else + echo "Error: binary not found at $BINARY" + exit 1 +fi diff --git a/docs/channels-reference.md b/docs/channels-reference.md index 4e9e7305eb..d3de19d557 100644 --- a/docs/channels-reference.md +++ b/docs/channels-reference.md @@ -218,8 +218,8 @@ ignore_stories = true ZeroClaw supports two WhatsApp backends: -- **Cloud API mode** (`phone_number_id` + `access_token` + `verify_token`) -- **WhatsApp Web mode** (`session_path`, requires build flag `--features whatsapp-web`) +- **Cloud API mode** (`phone_number_id` + `access_token` + `verify_token`) — stable, uses the official Meta Cloud API. +- **WhatsApp Web mode** (`session_path`, requires build flag `--features whatsapp-web`) — **experimental, see security warning below**. Cloud API mode: @@ -242,9 +242,26 @@ pair_code = "" # optional custom pair code allowed_numbers = ["*"] ``` +> **Security warning — WhatsApp Web mode (`--features whatsapp-web`)** +> +> WhatsApp Web mode uses the `wa-rs` library, which reverse-engineers the +> WhatsApp Web protocol. This approach carries risks that do not apply to the +> Cloud API mode: +> +> - **Unofficial API:** The protocol is not documented or supported by Meta. +> It can break without notice on any WhatsApp update. +> - **Account risk:** Meta may suspend accounts detected using unofficial clients. +> - **Unpredictable attack surface:** Protocol changes or undocumented behaviors +> can introduce security regressions that are hard to audit or anticipate. +> - **Recommended only for:** controlled, non-production environments where the +> Cloud API is unavailable and account suspension is acceptable. +> +> For production deployments, the Cloud API mode is strongly preferred. + Notes: - Build with `cargo build --features whatsapp-web` (or equivalent run command). + The feature is disabled by default. Prefer Cloud API mode in production. - Keep `session_path` on persistent storage to avoid relinking after restart. - Reply routing uses the originating chat JID, so direct and group replies work correctly. diff --git a/docs/datasheets/arduino-uno-q.md b/docs/datasheets/arduino-uno-q.md new file mode 100644 index 0000000000..fa4578f053 --- /dev/null +++ b/docs/datasheets/arduino-uno-q.md @@ -0,0 +1,101 @@ +# Arduino UNO Q (ABX00162 / ABX00173) + +## Pin Aliases + +| alias | pin | type | +|-------------|-----|-------| +| builtin_led | 13 | gpio | +| user_led | 13 | gpio | + +## Overview + +Arduino UNO Q is a dual-processor board: Qualcomm QRB2210 (quad-core Cortex-A53 @ 2.0 GHz, Debian Linux) + STM32U585 (Cortex-M33 @ 160 MHz, Arduino Core on Zephyr OS). They communicate via Bridge RPC. + +Memory: 2/4 GB LPDDR4X + 16/32 GB eMMC. +Connectivity: Wi-Fi 5 (dual-band) + Bluetooth 5.1. + +## Digital Pins (3.3V, MCU-controlled) + +D0-D13 and D14-D21 (D20=SDA, D21=SCL). All 3.3V logic. + +- D0/PB7: USART1_RX +- D1/PB6: USART1_TX +- D3/PB0: PWM (TIM3_CH3), FDCAN1_TX +- D4/PA12: FDCAN1_RX +- D5/PA11: PWM (TIM1_CH4) +- D6/PB1: PWM (TIM3_CH4) +- D9/PB8: PWM (TIM4_CH3) +- D10/PB9: PWM (TIM4_CH4), SPI2_SS +- D11/PB15: PWM (TIM1_CH3N), SPI2_MOSI +- D12/PB14: SPI2_MISO +- D13/PB13: SPI2_SCK, built-in LED +- D20/PB11: I2C2_SDA +- D21/PB10: I2C2_SCL + +## ADC (12-bit, 0-3.3V, MCU-controlled) + +6 channels: A0-A5. VREF+ = 3.3V. NOT 5V-tolerant in analog mode. + +- A0/PA4: ADC + DAC0 +- A1/PA5: ADC + DAC1 +- A2/PA6: ADC + OPAMP2_INPUT+ +- A3/PA7: ADC + OPAMP2_INPUT- +- A4/PC1: ADC + I2C3_SDA +- A5/PC0: ADC + I2C3_SCL + +## PWM + +Only pins marked ~: D3, D5, D6, D9, D10, D11. Duty cycle 0-255. + +## I2C + +- I2C2: D20 (SDA), D21 (SCL) — JDIGITAL header +- I2C4: Qwiic connector (PD13/SDA, PD12/SCL) + +## SPI + +SPI2 on JSPI header: MISO/PC2, MOSI/PC3, SCK/PD1. 3.3V. + +## CAN + +FDCAN1: TX on D3/PB0, RX on D4/PA12. Requires external CAN transceiver. + +## LED Matrix + +8x13 = 104 blue pixels, MCU-controlled. Bitmap: 13 bytes (one per column, 8 bits per column). + +## MCU RGB LEDs (active-low) + +- LED3: R=PH10, G=PH11, B=PH12 +- LED4: R=PH13, G=PH14, B=PH15 + +## Linux RGB LEDs (sysfs) + +- LED1 (user): /sys/class/leds/red:user, green:user, blue:user +- LED2 (status): /sys/class/leds/red:panic, green:wlan, blue:bt + +## Camera + +Dual ISPs: 13MP+13MP or 25MP@30fps. 4-lane MIPI-CSI-2. V4L2 at /dev/video*. + +## ZeroClaw Tools + +- `uno_q_gpio_read`: Read digital pin (0-21) +- `uno_q_gpio_write`: Set digital pin high/low (0-21) +- `uno_q_adc_read`: Read 12-bit ADC (channel 0-5, 0-3.3V) +- `uno_q_pwm_write`: PWM duty cycle (pins 3,5,6,9,10,11, duty 0-255) +- `uno_q_i2c_scan`: Scan I2C bus +- `uno_q_i2c_transfer`: I2C read/write (addr, hex data, read len) +- `uno_q_spi_transfer`: SPI exchange (hex data) +- `uno_q_can_send`: CAN frame (id, hex payload) +- `uno_q_led_matrix`: Set 8x13 LED matrix (hex bitmap) +- `uno_q_rgb_led`: Set MCU RGB LED 3 or 4 (r, g, b 0-255) +- `uno_q_camera_capture`: Capture image from MIPI-CSI camera +- `uno_q_linux_rgb_led`: Set Linux RGB LED 1 or 2 (sysfs) +- `uno_q_system_info`: CPU temp, memory, disk, Wi-Fi status + +## Power + +- USB-C: 5V / 3A (PD negotiation) +- DC input: 7-24V +- All headers: 3.3V logic (MCU), 1.8V (MPU). NOT 5V-tolerant on analog pins. diff --git a/docs/security/README.md b/docs/security/README.md index bc50adea23..7eee7da49a 100644 --- a/docs/security/README.md +++ b/docs/security/README.md @@ -9,6 +9,8 @@ For current runtime behavior, start here: - Config reference: [../config-reference.md](../config-reference.md) - Operations runbook: [../operations-runbook.md](../operations-runbook.md) - Troubleshooting: [../troubleshooting.md](../troubleshooting.md) +- SSRF threat model (http_request tool): [http-request-ssrf-threat-model.md](http-request-ssrf-threat-model.md) +- Shell execution security note: [shell-execution-security-note.md](shell-execution-security-note.md) ## Proposal / Roadmap Docs diff --git a/docs/security/http-request-ssrf-threat-model.md b/docs/security/http-request-ssrf-threat-model.md new file mode 100644 index 0000000000..17119bde33 --- /dev/null +++ b/docs/security/http-request-ssrf-threat-model.md @@ -0,0 +1,185 @@ +# SSRF Threat Model — `http_request` Tool + +**Status:** Current behavior (not a proposal) +**Date:** 2026-02-22 +**Scope:** `src/tools/http_request.rs` + +--- + +## 1. Threat: Server-Side Request Forgery (SSRF) + +SSRF allows an attacker to cause the agent to issue HTTP requests to unintended +targets — typically internal services, metadata endpoints, or loopback addresses — +by controlling the `url` parameter of the `http_request` tool. + +--- + +## 2. Existing Defenses + +### 2.1 Required Domain Allowlist (fail-closed) + +The tool refuses all requests if `http_request.allowed_domains` is empty. +An agent without an explicit allowlist cannot make any external HTTP request. + +```toml +[http_request] +allowed_domains = ["api.example.com", "cdn.example.com"] +``` + +Wildcard subdomains are supported: a domain entry `example.com` also permits +`api.example.com`, `v2.api.example.com`, etc., via suffix matching. + +### 2.2 Private and Local Host Blocking + +The `is_private_or_local_host()` function blocks: + +| Range | Example | +|---|---| +| Loopback IPv4 | `127.0.0.1` – `127.255.255.255` | +| Private RFC 1918 | `10.x`, `172.16–31.x`, `192.168.x` | +| Link-local | `169.254.x.x` | +| Shared address space (RFC 6598) | `100.64–127.x.x` | +| Cloud metadata typical range | `169.254.169.254` (via link-local) | +| Documentation ranges | `192.0.2/24`, `198.51.100/24`, `203.0.113/24` | +| Benchmarking range | `198.18–19.x.x` | +| Broadcast / unspecified | `255.255.255.255`, `0.0.0.0` | +| Multicast | `224.0.0.0/4` | +| Reserved | `240.0.0.0/4` | +| Loopback / link-local IPv6 | `::1`, `fe80::/10` | +| Unique-local IPv6 | `fc00::/7` | +| IPv4-mapped IPv6 | `::ffff:127.0.0.1`, `::ffff:192.168.x.x` | +| `.localhost` subdomains | `evil.localhost` | +| `.local` TLD | `service.local` | + +### 2.3 Redirect Following Disabled + +`reqwest` is configured with `Policy::none()` — the client does not follow HTTP +redirects. A redirect to `http://169.254.169.254/` cannot succeed. + +### 2.4 Scheme Restriction + +Only `http://` and `https://` schemes are accepted. `file://`, `ftp://`, +`gopher://`, and all other schemes are rejected at validation. + +### 2.5 URL Userinfo Blocked + +URLs containing `@` in the authority component (`user@host`) are rejected to +prevent credential-embedding bypass. + +### 2.6 IPv6 Literal Blocked + +IPv6 literal hosts (`[::1]`) are rejected entirely, as they would require +additional parsing to detect private ranges. + +### 2.7 Alternate IP Notation (defense-in-depth) + +Octal (`0177.0.0.1`), hex (`0x7f000001`), and integer-decimal (`2130706433`) +notations are not parsed as IP addresses by Rust's standard library. These fall +through to allowlist rejection because no allowlist entry matches them. +Tests in `src/tools/http_request.rs` document and verify this behavior. + +### 2.8 Autonomy and Rate-Limit Gating + +The tool is blocked entirely in `ReadOnly` autonomy mode, and subject to +`max_actions_per_hour` rate limiting. + +--- + +## 3. Known Gaps and Residual Risks + +### 3.1 DNS Rebinding (Post-Resolution Attack) + +**Risk level:** Medium + +**Description:** The IP check in `is_private_or_local_host()` operates on the +hostname string, not on the IP address resolved at connection time. An attacker +who controls a DNS server can: +1. First resolution: return a public IP → allowlist passes, host-block passes. +2. Second resolution (at `connect()` time): return an internal IP. + +This race window is inherent to any SSRF defense that does not pin the resolved +IP after the check. The `Policy::none()` redirect defense does not protect against +this. + +**Mitigation path:** Connect-then-verify (post-`connect()` IP check) or a local +DNS resolver with TTL=0 rejection. Neither is currently implemented. This is a +known accepted risk for the current implementation. + +**Recommended operator action:** Run the agent behind a network-level egress +filter (firewall, proxy) that blocks all non-public destinations, rather than +relying solely on the application-layer check. + +### 3.2 Cloud Metadata Endpoints via Hostname + +**Risk level:** Low (mitigated by allowlist) + +**Description:** Cloud metadata endpoints such as `http://metadata.google.internal/` +resolve to link-local IPs, which are blocked. However, some provider-specific +metadata hostnames may resolve to public-range IPs depending on the cloud provider. + +**Mitigation:** The required allowlist is the primary defense: metadata hostnames +are not in any legitimate allowlist. + +### 3.3 HTTP (Non-TLS) Requests Permitted + +**Risk level:** Low + +**Description:** `http://` URLs are accepted in addition to `https://`. Plaintext +HTTP is susceptible to interception and MITM. In environments where the agent +operates over untrusted networks, an HTTPS-only policy would be stronger. + +**Operator option:** Restrict `allowed_domains` to services known to enforce HTTPS. +No config key exists today to enforce HTTPS-only; this would be a future hardening +option. + +### 3.4 Header Injection via User-Controlled Keys + +**Risk level:** Low + +**Description:** The `headers` parameter accepts arbitrary key-value pairs. +`reqwest` normalizes headers and rejects most invalid formats, but deliberately +crafted header values that include CRLF could, in theory, affect HTTP/1.1 framing. +`reqwest` guards against CRLF injection in header values. + +**Status:** Accepted risk, delegated to `reqwest`. + +--- + +## 4. Allowlist Configuration Guidance + +```toml +[http_request] +# Only allowlist domains the agent legitimately needs. +# Wildcards match subdomains automatically. +allowed_domains = [ + "api.openai.com", + "slack.com", +] + +# Keep the list as narrow as possible. +# Do NOT add *.com, *.io, or other broad entries. +# Empty list = tool disabled (fail-closed default). +``` + +--- + +## 5. Test Coverage + +The following test categories exist in `src/tools/http_request.rs`: + +- Allowlist enforcement (exact + subdomain + miss) +- All RFC 1918 / loopback / multicast / reserved ranges +- IPv4-mapped IPv6 SSRF variants +- Alternate notation bypass (octal, hex, decimal integer, zero-padded) +- Userinfo rejection, IPv6 literal rejection +- Redirect policy validation (structural) +- Rate-limit and read-only autonomy blocking +- Header redaction for sensitive keys + +--- + +## 6. Rollback + +This document describes current behavior. No code changes are associated. +If `http_request` tool behavior changes, update this document to keep it in sync +with `src/tools/http_request.rs`. diff --git a/docs/security/shell-execution-security-note.md b/docs/security/shell-execution-security-note.md new file mode 100644 index 0000000000..b61a852532 --- /dev/null +++ b/docs/security/shell-execution-security-note.md @@ -0,0 +1,168 @@ +# Note de sécurité — Outil `shell` (exécution de commandes) + +**Statut :** Comportement actuel (pas une proposition) +**Date :** 2026-02-22 +**Périmètre :** `src/tools/shell.rs`, `src/security/policy.rs` + +--- + +## 1. Surface concernée + +L'outil `shell` permet à l'agent d'exécuter des commandes arbitraires dans le +répertoire de travail configuré. C'est la surface la plus sensible du runtime : +une exécution de commande peut modifier le système de fichiers, exfiltrer des +données, établir des connexions réseau ou élever des privilèges. + +--- + +## 2. Défenses en place + +### 2.1 Politique d'autonomie (`AutonomyLevel`) + +| Niveau | Comportement shell | +|---|---| +| `ReadOnly` | Toutes les commandes bloquées | +| `Supervised` | Exécution après validation par `validate_command_execution` | +| `Autonomous` | Exécution après validation, sans approbation humaine requise | + +La vérification est faite avant toute exécution — il n'y a pas de chemin de +contournement. + +### 2.2 Validation de commande (`validate_command_execution`) + +Les commandes à risque moyen ou élevé requièrent que le paramètre `approved: true` +soit explicitement fourni par l'appelant. Ce mécanisme protège contre une exécution +autonome non souhaitée de commandes destructives. + +### 2.3 Rate limiting + +Deux niveaux de protection par débit : + +- `is_rate_limited()` — vérifié **avant** la validation de commande. +- `record_action()` — décrémente le budget d'actions par heure ; vérifié juste avant l'exécution. + +Le budget `max_actions_per_hour = 0` bloque complètement l'outil. + +### 2.4 Isolation de l'environnement (`env_clear` + `SAFE_ENV_VARS`) + +Le processus enfant démarre avec un environnement **vidé** (`env_clear()`). +Seules les variables suivantes sont réinjectées depuis l'environnement parent : + +``` +PATH, HOME, TERM, LANG, LC_ALL, LC_CTYPE, USER, SHELL, TMPDIR +``` + +Aucune variable contenant `KEY`, `SECRET`, ou `TOKEN` n'est dans cette liste. +Un test (`shell_does_not_leak_api_key`) valide cette propriété à chaque CI. + +### 2.5 Timeout (60 secondes) + +Toute commande dépassant 60 secondes est terminée. Cela protège contre les +processus suspendus indéfiniment (boucles infinies, attentes réseau, fork bombs). + +### 2.6 Troncature de sortie (1 Mo) + +La sortie stdout et stderr est tronquée à 1 Mo chacune. Cela prévient les +allocations mémoire excessives sur les commandes produisant un volume élevé. + +--- + +## 3. Risques résiduels et gaps connus + +### 3.1 Pas d'isolation du système de fichiers par défaut + +**Niveau de risque :** Moyen + +La commande s'exécute dans le répertoire de travail configuré, mais rien +n'empêche une commande de remonter l'arborescence (`../../etc/passwd`) ou +d'écrire hors du workspace si l'utilisateur dispose des droits. + +**Atténuation disponible :** Landlock (activé automatiquement sur Linux 5.13+ depuis +ce sprint) restreint l'accès au workspace, `/tmp`, `/usr`, et `/bin`. Sur les +noyaux antérieurs ou non-Linux, le runtime repasse sur Firejail (si disponible) +ou sur l'application-layer seul. + +### 3.2 Pas de filtrage de commandes réseau + +**Niveau de risque :** Moyen + +`curl`, `wget`, `nc`, `ssh` et d'autres outils réseau sont accessibles si présents +dans le `PATH`. Une commande approuvée peut exfiltrer des données ou ouvrir des +connexions sortantes arbitraires. + +**Recommandation opérateur :** Combiner avec une politique d'egress réseau (firewall, +iptables rules) sur l'hôte. Landlock ne contrôle pas les sockets réseau. + +### 3.3 Héritage des ressources du processus parent + +**Niveau de risque :** Faible + +Le processus enfant hérite des descripteurs de fichiers ouverts du parent (sauf +marqués `O_CLOEXEC`). Cela peut exposer des sockets ou des fichiers ouverts au +sous-processus si le runtime ne les ferme pas. + +**Statut :** Risque accepté, délégué à `tokio::process::Command` qui ferme les fds +non-hérités par défaut sur les plateformes supportées. + +### 3.4 Injection via le contenu de la commande + +**Niveau de risque :** Faible à moyen + +Le paramètre `command` est passé tel quel à un shell (`sh -c`). Si l'agent +construit dynamiquement une commande en interpolant des données utilisateur sans +sanitisation, une injection shell est possible. + +**Défense en place :** Le paramètre `approved` requiert une approbation explicite +pour les commandes à risque. La validation de commande peut rejeter des patterns +dangereux via la politique de sécurité. La chaîne de construction de la commande +(`build_shell_command`) est sous la responsabilité du `RuntimeAdapter`. + +**Recommandation :** Le modèle de prompt doit être configuré pour ne pas construire +de commandes en interpolant directement des données non fiables. + +--- + +## 4. Matrice de couverture des tests + +| Scénario | Test | +|---|---| +| Commande autorisée exécutée | `shell_executes_allowed_command` | +| Commande bloquée (politique) | `shell_blocks_disallowed_command` | +| Mode ReadOnly bloque tout | `shell_blocks_readonly` | +| Paramètre `command` manquant | `shell_missing_command_param` | +| Type incorrect pour `command` | `shell_wrong_type_param` | +| Exit code non-zéro capturé | `shell_captures_exit_code` | +| API_KEY non transmis au shell | `shell_does_not_leak_api_key` | +| PATH et HOME disponibles | `shell_preserves_path_and_home` | +| Approbation requise (risque moyen) | `shell_requires_approval_for_medium_risk_command` | +| Rate limit bloque l'exécution | `shell_blocks_rate_limited` | +| Constante timeout = 60s | `shell_timeout_constant_is_reasonable` | +| Limite sortie = 1 Mo | `shell_output_limit_is_1mb` | +| SAFE_ENV_VARS exclut les secrets | `shell_safe_env_vars_excludes_secrets` | +| SAFE_ENV_VARS inclut les essentiels | `shell_safe_env_vars_includes_essentials` | + +--- + +## 5. Recommandations opérateur + +```toml +[security] +# Restreindre le niveau d'autonomie en production +autonomy = "supervised" + +# Budget d'actions par heure (0 = désactivé) +max_actions_per_hour = 50 + +# Répertoire de travail isolé — éviter de pointer vers / +workspace_dir = "/var/zeroclaw/workspace" +``` + +Sur Linux, Landlock est activé automatiquement si le noyau est ≥ 5.13. Vérifier +avec `dmesg | grep landlock` ou consulter les logs de démarrage de l'agent. + +--- + +## 6. Rollback + +Ce document décrit le comportement actuel. En cas de modification de `shell.rs` +ou de `SecurityPolicy`, mettre à jour ce document en même temps que le PR. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index b4505accc5..ec9588688c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -245,3 +245,19 @@ Also include OS, install method, and sanitized config snippets (no secrets). - [one-click-bootstrap.md](one-click-bootstrap.md) - [channels-reference.md](channels-reference.md) - [network-deployment.md](network-deployment.md) + +## Docker Deployment + +### Tools not advertised to agent when running in Docker + +When running ZeroClaw inside a Docker container, some tool capabilities (e.g. `web_search_tool`) may not be surfaced to the agent, even with the relevant config flags enabled (e.g. `web_search.enabled = true`). + +This may be environment-dependent — the same config running natively shows the full tool set. + +**What to check:** + +- Confirm `web_search.enabled = true` and `workspace_only = false` are set in your mounted `config.toml` +- Confirm the API key is correctly passed via environment variable (e.g. `-e API_KEY=sk-...`) +- Check container logs for silent startup errors: `docker logs ` + +If tools are still missing after a clean run with valid auth, please open a discussion — this may be a bug in how tools are registered in containerised environments. diff --git a/firmware/zeroclaw-uno-q-bridge/python/main.py b/firmware/zeroclaw-uno-q-bridge/python/main.py index d4b286b972..8079e5b107 100644 --- a/firmware/zeroclaw-uno-q-bridge/python/main.py +++ b/firmware/zeroclaw-uno-q-bridge/python/main.py @@ -1,36 +1,100 @@ -# ZeroClaw Bridge — socket server for GPIO control from ZeroClaw agent +# ZeroClaw Bridge — socket server for full MCU peripheral control # SPDX-License-Identifier: MPL-2.0 +# +# Bridge.call() must run on the main thread (not thread-safe). +# Socket accepts happen on a background thread, but each request +# is queued and processed in the main App.run() loop. +import queue import socket +import sys import threading -from arduino.app_utils import App, Bridge +import traceback +from arduino.app_utils import * ZEROCLAW_PORT = 9999 -def handle_client(conn): +# Queue of (conn, data_str) tuples processed on the main thread. +request_queue = queue.Queue() + + +def process_request(data, conn): + """Process a single bridge command on the main thread.""" try: - data = conn.recv(256).decode().strip() - if not data: - conn.close() - return parts = data.split() - if len(parts) < 2: - conn.sendall(b"error: invalid command\n") - conn.close() + if not parts: + conn.sendall(b"error: empty command\n") return cmd = parts[0].lower() + + # ── GPIO ────────────────────────────────────────────── if cmd == "gpio_write" and len(parts) >= 3: - pin = int(parts[1]) - value = int(parts[2]) - Bridge.call("digitalWrite", [pin, value]) + Bridge.call("digitalWrite", int(parts[1]), int(parts[2])) conn.sendall(b"ok\n") + elif cmd == "gpio_read" and len(parts) >= 2: - pin = int(parts[1]) - val = Bridge.call("digitalRead", [pin]) + val = Bridge.call("digitalRead", int(parts[1])) + conn.sendall(f"{val}\n".encode()) + + # ── ADC ─────────────────────────────────────────────── + elif cmd == "adc_read" and len(parts) >= 2: + val = Bridge.call("analogRead", int(parts[1])) conn.sendall(f"{val}\n".encode()) + + # ── PWM ─────────────────────────────────────────────── + elif cmd == "pwm_write" and len(parts) >= 3: + result = Bridge.call("analogWrite", int(parts[1]), int(parts[2])) + if result == -1: + conn.sendall(b"error: not a PWM pin\n") + else: + conn.sendall(b"ok\n") + + # ── I2C ─────────────────────────────────────────────── + elif cmd == "i2c_scan": + result = Bridge.call("i2cScan") + conn.sendall(f"{result}\n".encode()) + + elif cmd == "i2c_transfer" and len(parts) >= 4: + result = Bridge.call("i2cTransfer", int(parts[1]), parts[2], int(parts[3])) + conn.sendall(f"{result}\n".encode()) + + # ── SPI ─────────────────────────────────────────────── + elif cmd == "spi_transfer" and len(parts) >= 2: + result = Bridge.call("spiTransfer", parts[1]) + conn.sendall(f"{result}\n".encode()) + + # ── CAN ─────────────────────────────────────────────── + elif cmd == "can_send" and len(parts) >= 3: + result = Bridge.call("canSend", int(parts[1]), parts[2]) + if result == -2: + conn.sendall(b"error: CAN not yet available\n") + else: + conn.sendall(b"ok\n") + + # ── LED Matrix ──────────────────────────────────────── + elif cmd == "led_matrix" and len(parts) >= 2: + Bridge.call("ledMatrix", parts[1]) + conn.sendall(b"ok\n") + + # ── RGB LED ─────────────────────────────────────────── + elif cmd == "rgb_led" and len(parts) >= 5: + result = Bridge.call("rgbLed", int(parts[1]), int(parts[2]), int(parts[3]), int(parts[4])) + if result == -1: + conn.sendall(b"error: invalid LED id (use 0 or 1)\n") + else: + conn.sendall(b"ok\n") + + # ── Capabilities ────────────────────────────────────── + elif cmd == "capabilities": + result = Bridge.call("capabilities") + conn.sendall(f"{result}\n".encode()) + else: conn.sendall(b"error: unknown command\n") + except Exception as e: + print(f"[handle] ERROR: {e}", file=sys.stderr, flush=True) + traceback.print_exc(file=sys.stderr) try: conn.sendall(f"error: {e}\n".encode()) except Exception: @@ -38,29 +102,44 @@ def handle_client(conn): finally: conn.close() + def accept_loop(server): + """Background thread: accept connections and enqueue requests.""" while True: try: conn, _ = server.accept() - t = threading.Thread(target=handle_client, args=(conn,)) - t.daemon = True - t.start() + data = conn.recv(1024).decode().strip() + if data: + request_queue.put((conn, data)) + else: + conn.close() + except socket.timeout: + continue except Exception: break + def loop(): - App.sleep(1) + """Main-thread loop: drain the request queue and process via Bridge.""" + while not request_queue.empty(): + try: + conn, data = request_queue.get_nowait() + process_request(data, conn) + except queue.Empty: + break + def main(): server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - server.bind(("127.0.0.1", ZEROCLAW_PORT)) + server.bind(("0.0.0.0", ZEROCLAW_PORT)) server.listen(5) server.settimeout(1.0) - t = threading.Thread(target=accept_loop, args=(server,)) - t.daemon = True + print(f"[ZeroClaw Bridge] Listening on 0.0.0.0:{ZEROCLAW_PORT}", flush=True) + t = threading.Thread(target=accept_loop, args=(server,), daemon=True) t.start() App.run(user_loop=loop) + if __name__ == "__main__": main() diff --git a/firmware/zeroclaw-uno-q-bridge/sketch/sketch.ino b/firmware/zeroclaw-uno-q-bridge/sketch/sketch.ino index 0e7b11be9c..7bc03e3751 100644 --- a/firmware/zeroclaw-uno-q-bridge/sketch/sketch.ino +++ b/firmware/zeroclaw-uno-q-bridge/sketch/sketch.ino @@ -1,7 +1,79 @@ -// ZeroClaw Bridge — expose digitalWrite/digitalRead for agent GPIO control +// ZeroClaw Bridge — full MCU peripheral control for Arduino UNO Q // SPDX-License-Identifier: MPL-2.0 +// +// Exposes GPIO, ADC, PWM, I2C, SPI, CAN (stub), LED matrix, and RGB LED +// control to the host agent via the Router Bridge protocol. #include "Arduino_RouterBridge.h" +#include +#include + +// ── Pin / hardware constants (UNO Q datasheet ABX00162) ───────── + +// ADC: 12-bit, channels A0-A5 map to pins 14-19, VREF+ = 3.3V +static const int ADC_FIRST_PIN = 14; +static const int ADC_LAST_PIN = 19; + +// PWM-capable digital pins +static const int PWM_PINS[] = {3, 5, 6, 9, 10, 11}; +static const int PWM_PIN_COUNT = sizeof(PWM_PINS) / sizeof(PWM_PINS[0]); + +// 8x13 LED matrix — 104 blue pixels +static const int LED_MATRIX_BYTES = 13; + +// MCU RGB LEDs 3-4 — active-low, pins PH10-PH15 +#ifndef PIN_RGB_LED3_R + #define PIN_RGB_LED3_R 22 + #define PIN_RGB_LED3_G 23 + #define PIN_RGB_LED3_B 24 + #define PIN_RGB_LED4_R 25 + #define PIN_RGB_LED4_G 26 + #define PIN_RGB_LED4_B 27 +#endif + +static const int RGB_LED_PINS[][3] = { + {PIN_RGB_LED3_R, PIN_RGB_LED3_G, PIN_RGB_LED3_B}, + {PIN_RGB_LED4_R, PIN_RGB_LED4_G, PIN_RGB_LED4_B}, +}; +static const int RGB_LED_COUNT = sizeof(RGB_LED_PINS) / sizeof(RGB_LED_PINS[0]); + +// ── Hex helpers ───────────────────────────────────────────────── + +static uint8_t hex_nibble(char c) { + if (c >= '0' && c <= '9') return c - '0'; + if (c >= 'a' && c <= 'f') return 10 + (c - 'a'); + if (c >= 'A' && c <= 'F') return 10 + (c - 'A'); + return 0; +} + +static int hex_decode(const String &hex, uint8_t *buf, int max_len) { + int len = 0; + int slen = hex.length(); + for (int i = 0; i + 1 < slen && len < max_len; i += 2) { + buf[len++] = (hex_nibble(hex.charAt(i)) << 4) | hex_nibble(hex.charAt(i + 1)); + } + return len; +} + +static String hex_encode(const uint8_t *data, int len) { + static const char hexchars[] = "0123456789abcdef"; + String result; + result.reserve(len * 2); + for (int i = 0; i < len; i++) { + result += hexchars[(data[i] >> 4) & 0x0F]; + result += hexchars[data[i] & 0x0F]; + } + return result; +} + +static bool is_pwm_pin(int pin) { + for (int i = 0; i < PWM_PIN_COUNT; i++) { + if (PWM_PINS[i] == pin) return true; + } + return false; +} + +// ── GPIO (original, unchanged) ────────────────────────────────── void gpio_write(int pin, int value) { pinMode(pin, OUTPUT); @@ -13,10 +85,146 @@ int gpio_read(int pin) { return digitalRead(pin); } +// ── ADC (12-bit, A0-A5) ──────────────────────────────────────── + +int bridge_adc_read(int channel) { + int pin = ADC_FIRST_PIN + channel; + if (pin < ADC_FIRST_PIN || pin > ADC_LAST_PIN) return -1; + analogReadResolution(12); + return analogRead(pin); +} + +// ── PWM (D3, D5, D6, D9, D10, D11) ───────────────────────────── + +int bridge_pwm_write(int pin, int duty) { + if (!is_pwm_pin(pin)) return -1; + if (duty < 0) duty = 0; + if (duty > 255) duty = 255; + pinMode(pin, OUTPUT); + analogWrite(pin, duty); + return 0; +} + +// ── I2C scan ──────────────────────────────────────────────────── + +String bridge_i2c_scan() { + Wire.begin(); + String result = ""; + bool first = true; + for (uint8_t addr = 1; addr < 127; addr++) { + Wire.beginTransmission(addr); + if (Wire.endTransmission() == 0) { + if (!first) result += ","; + result += String(addr); + first = false; + } + } + return result.length() > 0 ? result : "none"; +} + +// ── I2C transfer (all String params for MsgPack compatibility) ── + +String bridge_i2c_transfer(int addr, String hex_data, int rx_len) { + if (addr < 1 || addr > 127) return "err:addr"; + if (rx_len < 0 || rx_len > 32) return "err:rxlen"; + + uint8_t tx_buf[32]; + int tx_len = hex_decode(hex_data, tx_buf, sizeof(tx_buf)); + + Wire.begin(); + if (tx_len > 0) { + Wire.beginTransmission((uint8_t)addr); + Wire.write(tx_buf, tx_len); + uint8_t err = Wire.endTransmission(rx_len == 0); + if (err != 0) return "err:tx:" + String(err); + } + + if (rx_len > 0) { + Wire.requestFrom((uint8_t)addr, (uint8_t)rx_len); + uint8_t rx_buf[32]; + int count = 0; + while (Wire.available() && count < rx_len) { + rx_buf[count++] = Wire.read(); + } + return hex_encode(rx_buf, count); + } + return "ok"; +} + +// ── SPI transfer ──────────────────────────────────────────────── + +String bridge_spi_transfer(String hex_data) { + uint8_t buf[32]; + int len = hex_decode(hex_data, buf, sizeof(buf)); + if (len == 0) return "err:empty"; + + SPI.begin(); + SPI.beginTransaction(SPISettings(1000000, MSBFIRST, SPI_MODE0)); + uint8_t rx_buf[32]; + for (int i = 0; i < len; i++) { + rx_buf[i] = SPI.transfer(buf[i]); + } + SPI.endTransaction(); + + return hex_encode(rx_buf, len); +} + +// ── CAN (stub — needs Zephyr FDCAN driver) ────────────────────── + +int bridge_can_send(int id, String hex_data) { + (void)id; + (void)hex_data; + return -2; // not yet available +} + +// ── LED matrix (8x13, 13-byte bitmap) ─────────────────────────── + +int bridge_led_matrix(String hex_bitmap) { + uint8_t bitmap[LED_MATRIX_BYTES]; + int len = hex_decode(hex_bitmap, bitmap, LED_MATRIX_BYTES); + if (len != LED_MATRIX_BYTES) return -1; + // Matrix rendering depends on board LED matrix driver availability. + (void)bitmap; + return 0; +} + +// ── RGB LED (MCU LEDs 3-4, active-low) ────────────────────────── + +int bridge_rgb_led(int id, int r, int g, int b) { + if (id < 0 || id >= RGB_LED_COUNT) return -1; + r = constrain(r, 0, 255); + g = constrain(g, 0, 255); + b = constrain(b, 0, 255); + pinMode(RGB_LED_PINS[id][0], OUTPUT); + pinMode(RGB_LED_PINS[id][1], OUTPUT); + pinMode(RGB_LED_PINS[id][2], OUTPUT); + analogWrite(RGB_LED_PINS[id][0], 255 - r); + analogWrite(RGB_LED_PINS[id][1], 255 - g); + analogWrite(RGB_LED_PINS[id][2], 255 - b); + return 0; +} + +// ── Capabilities ──────────────────────────────────────────────── + +String bridge_get_capabilities() { + return "gpio,adc,pwm,i2c,spi,can,led_matrix,rgb_led"; +} + +// ── Bridge setup ──────────────────────────────────────────────── + void setup() { Bridge.begin(); - Bridge.provide("digitalWrite", gpio_write); - Bridge.provide("digitalRead", gpio_read); + Bridge.provide("digitalWrite", gpio_write); + Bridge.provide("digitalRead", gpio_read); + Bridge.provide("analogRead", bridge_adc_read); + Bridge.provide("analogWrite", bridge_pwm_write); + Bridge.provide("i2cScan", bridge_i2c_scan); + Bridge.provide("i2cTransfer", bridge_i2c_transfer); + Bridge.provide("spiTransfer", bridge_spi_transfer); + Bridge.provide("canSend", bridge_can_send); + Bridge.provide("ledMatrix", bridge_led_matrix); + Bridge.provide("rgbLed", bridge_rgb_led); + Bridge.provide("capabilities", bridge_get_capabilities); } void loop() { diff --git a/src/agent/agent.rs b/src/agent/agent.rs index d1affdaafa..67ef5baf75 100644 --- a/src/agent/agent.rs +++ b/src/agent/agent.rs @@ -421,7 +421,7 @@ impl Agent { .iter() .map(|call| self.execute_tool_call(call)) .collect(); - futures::future::join_all(futs).await + futures_util::future::join_all(futs).await } fn classify_model(&self, user_message: &str) -> String { diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs index 0b8d251186..8ccc8b00b9 100644 --- a/src/agent/loop_.rs +++ b/src/agent/loop_.rs @@ -236,10 +236,10 @@ async fn build_context(mem: &dyn Memory, user_msg: &str, min_relevance_score: f6 } let _ = writeln!(context, "- {}: {}", entry.key, entry.content); } - if context != "[Memory context]\n" { - context.push('\n'); - } else { + if context == "[Memory context]\n" { context.clear(); + } else { + context.push('\n'); } } } @@ -382,11 +382,34 @@ fn is_xml_meta_tag(tag: &str) -> bool { ) } -static XML_TOOL_TAG_RE: LazyLock = - LazyLock::new(|| Regex::new(r"(?s)<([a-zA-Z_][a-zA-Z0-9_-]*)>\s*(.*?)\s*").unwrap()); +static XML_OPEN_TAG_RE: LazyLock = + LazyLock::new(|| Regex::new(r"<([a-zA-Z_][a-zA-Z0-9_-]*)>").unwrap()); -static XML_ARG_TAG_RE: LazyLock = - LazyLock::new(|| Regex::new(r"(?s)<([a-zA-Z_][a-zA-Z0-9_-]*)>\s*([^<]+?)\s*").unwrap()); +static XML_ARG_TAG_RE: LazyLock = LazyLock::new(|| { + Regex::new(r"(?s)<([a-zA-Z_][a-zA-Z0-9_-]*)>\s*([^<]+?)\s*").unwrap() +}); + +/// Iterate over `content` pairs in `text`. +/// Uses a manual closing-tag search to correctly handle nested argument tags +/// without requiring regex backreferences (unsupported by the `regex` crate). +fn iter_xml_tagged_sections(text: &str) -> impl Iterator { + let mut pos = 0usize; + std::iter::from_fn(move || loop { + let cap = XML_OPEN_TAG_RE.captures(&text[pos..])?; + let m = cap.get(0).unwrap(); + let name = cap.get(1).unwrap().as_str(); + let name_start = pos + m.start(); + let content_start = pos + m.end(); + let close_tag = format!(""); + if let Some(rel) = text[content_start..].find(close_tag.as_str()) { + let content = text[content_start..content_start + rel].trim(); + pos = content_start + rel + close_tag.len(); + return Some((name, content)); + } + // No matching closing tag; advance past this opening tag and keep looking. + pos = name_start + m.len(); + }) +} /// Parse XML-style tool calls in `` bodies. /// Supports both nested argument tags and JSON argument payloads: @@ -400,13 +423,11 @@ fn parse_xml_tool_calls(xml_content: &str) -> Option> { return None; } - for cap in XML_TOOL_TAG_RE.captures_iter(trimmed) { - let tool_name = cap[1].trim().to_string(); - if is_xml_meta_tag(&tool_name) { + for (tool_name, inner_content) in iter_xml_tagged_sections(trimmed) { + if is_xml_meta_tag(tool_name) { continue; } - let inner_content = cap[2].trim(); if inner_content.is_empty() { continue; } @@ -443,7 +464,7 @@ fn parse_xml_tool_calls(xml_content: &str) -> Option> { } calls.push(ParsedToolCall { - name: tool_name, + name: tool_name.to_string(), arguments: serde_json::Value::Object(args), }); } @@ -1083,7 +1104,7 @@ async fn execute_tools_parallel( }) .collect(); - let results = futures::future::join_all(futures).await; + let results = futures_util::future::join_all(futures).await; results.into_iter().collect() } @@ -3608,6 +3629,7 @@ Let me check the result."#; None, // no identity config None, // no bootstrap_max_chars true, // native_tools + crate::config::SkillsPromptInjectionMode::Full, ); // Must contain zero XML protocol artifacts diff --git a/src/channels/discord.rs b/src/channels/discord.rs index bcb447d718..6188a72cac 100644 --- a/src/channels/discord.rs +++ b/src/channels/discord.rs @@ -916,7 +916,11 @@ mod tests { #[test] fn split_message_many_short_lines() { // Many short lines should be batched into chunks under the limit - let msg: String = (0..500).map(|i| format!("line {i}\n")).collect(); + let msg: String = (0..500).fold(String::new(), |mut s, i| { + use std::fmt::Write as _; + let _ = writeln!(s, "line {i}"); + s + }); let parts = split_message_for_discord(&msg); for part in &parts { assert!( diff --git a/src/channels/lark.rs b/src/channels/lark.rs index 505c3a2063..3422536e7b 100644 --- a/src/channels/lark.rs +++ b/src/channels/lark.rs @@ -365,9 +365,7 @@ impl LarkChannel { let payload: serde_json::Value = match response.json().await { Ok(v) => v, Err(err) => { - tracing::warn!( - "Lark: add reaction decode failed for {message_id}: {err}" - ); + tracing::warn!("Lark: add reaction decode failed for {message_id}: {err}"); return; } }; @@ -378,9 +376,7 @@ impl LarkChannel { .get("msg") .and_then(|v| v.as_str()) .unwrap_or("unknown error"); - tracing::warn!( - "Lark: add reaction returned code={code} for {message_id}: {msg}" - ); + tracing::warn!("Lark: add reaction returned code={code} for {message_id}: {msg}"); } return; } diff --git a/src/channels/mod.rs b/src/channels/mod.rs index ab852e0262..c31799292f 100644 --- a/src/channels/mod.rs +++ b/src/channels/mod.rs @@ -1099,9 +1099,7 @@ fn sanitize_tool_json_value( return None; } - let Some(object) = value.as_object() else { - return None; - }; + let object = value.as_object()?; if let Some(tool_calls) = object.get("tool_calls").and_then(|value| value.as_array()) { if !tool_calls.is_empty() @@ -1141,7 +1139,7 @@ fn strip_isolated_tool_json_artifacts(message: &str, known_tool_names: &HashSet< let mut saw_tool_call_payload = false; while cursor < message.len() { - let Some(rel_start) = message[cursor..].find(|ch: char| ch == '{' || ch == '[') else { + let Some(rel_start) = message[cursor..].find(['{', '[']) else { cleaned.push_str(&message[cursor..]); break; }; @@ -2178,7 +2176,7 @@ fn maybe_restart_managed_daemon_service() -> Result { Ok(false) } -pub async fn handle_command(command: crate::ChannelCommands, config: &Config) -> Result<()> { +pub(crate) async fn handle_command(command: crate::ChannelCommands, config: &Config) -> Result<()> { match command { crate::ChannelCommands::Start => { anyhow::bail!("Start must be handled in main.rs (requires async runtime)") @@ -2589,7 +2587,7 @@ pub async fn start_channels(config: Config) -> Result<()> { }; // Build system prompt from workspace identity files + skills let workspace = config.workspace_dir.clone(); - let tools_registry = Arc::new(tools::all_tools_with_runtime( + let mut all_tools = tools::all_tools_with_runtime( Arc::new(config.clone()), &security, runtime, @@ -2602,7 +2600,19 @@ pub async fn start_channels(config: Config) -> Result<()> { &config.agents, config.api_key.as_deref(), &config, - )); + ); + + // Merge peripheral tools (UNO Q Bridge, RPi GPIO, etc.) + let peripheral_tools = crate::peripherals::create_peripheral_tools(&config.peripherals).await?; + if !peripheral_tools.is_empty() { + tracing::info!( + count = peripheral_tools.len(), + "Peripheral tools added to channel server" + ); + all_tools.extend(peripheral_tools); + } + + let tools_registry = Arc::new(all_tools); let skills = crate::skills::load_skills_with_config(&workspace, &config); diff --git a/src/channels/nextcloud_talk.rs b/src/channels/nextcloud_talk.rs index 574a5b6f1b..07070ad8d7 100644 --- a/src/channels/nextcloud_talk.rs +++ b/src/channels/nextcloud_talk.rs @@ -429,7 +429,7 @@ mod tests { "message": { "actorType": "users", "actorId": "user_a", - "timestamp": 1735701200123u64, + "timestamp": 1_735_701_200_123_u64, "message": "hello" } }); diff --git a/src/channels/telegram.rs b/src/channels/telegram.rs index 58e56787e9..6dd228f590 100644 --- a/src/channels/telegram.rs +++ b/src/channels/telegram.rs @@ -756,7 +756,10 @@ Allowlist Telegram username (without '@') or numeric user ID.", } } - fn parse_update_message(&self, update: &serde_json::Value) -> Option<(ChannelMessage, Option)> { + fn parse_update_message( + &self, + update: &serde_json::Value, + ) -> Option<(ChannelMessage, Option)> { let message = update.get("message")?; // Support both text messages and photo messages (with optional caption) @@ -764,7 +767,8 @@ Allowlist Telegram username (without '@') or numeric user ID.", let caption_opt = message.get("caption").and_then(serde_json::Value::as_str); // Extract file_id from photo (highest resolution = last element) - let photo_file_id = message.get("photo") + let photo_file_id = message + .get("photo") .and_then(serde_json::Value::as_array) .and_then(|photos| photos.last()) .and_then(|p| p.get("file_id")) @@ -774,8 +778,7 @@ Allowlist Telegram username (without '@') or numeric user ID.", // Require at least text, caption, or photo let text = match (text_opt, caption_opt, &photo_file_id) { (Some(t), _, _) => t.to_string(), - (None, Some(c), Some(_)) => c.to_string(), - (None, Some(c), None) => c.to_string(), + (None, Some(c), _) => c.to_string(), (None, None, Some(_)) => String::new(), // will be filled with image marker later (None, None, None) => return None, }; @@ -852,18 +855,21 @@ Allowlist Telegram username (without '@') or numeric user ID.", text.to_string() }; - Some((ChannelMessage { - id: format!("telegram_{chat_id}_{message_id}"), - sender: sender_identity, - reply_target, - content, - channel: "telegram".to_string(), - timestamp: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - thread_ts: None, - }, photo_file_id)) + Some(( + ChannelMessage { + id: format!("telegram_{chat_id}_{message_id}"), + sender: sender_identity, + reply_target, + content, + channel: "telegram".to_string(), + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + thread_ts: None, + }, + photo_file_id, + )) } /// Download a Telegram photo by file_id, resize to fit within 1024px, and return as base64 data URI. @@ -905,7 +911,8 @@ Allowlist Telegram username (without '@') or numeric user ID.", image::ImageFormat::Jpeg, )?; Ok(buf) - }).await??; + }) + .await??; let b64 = base64::engine::general_purpose::STANDARD.encode(&resized_bytes); Ok(format!("data:image/jpeg;base64,{}", b64)) @@ -2241,7 +2248,8 @@ mod tests { }); let msg = ch - .parse_update_message(&update).map(|(m,_)|m) + .parse_update_message(&update) + .map(|(m, _)| m) .expect("message should parse"); assert_eq!(msg.sender, "alice"); @@ -2268,7 +2276,8 @@ mod tests { }); let msg = ch - .parse_update_message(&update).map(|(m,_)|m) + .parse_update_message(&update) + .map(|(m, _)| m) .expect("numeric allowlist should pass"); assert_eq!(msg.sender, "555"); @@ -2295,7 +2304,8 @@ mod tests { }); let msg = ch - .parse_update_message(&update).map(|(m,_)|m) + .parse_update_message(&update) + .map(|(m, _)| m) .expect("message with thread_id should parse"); assert_eq!(msg.sender, "alice"); @@ -2908,7 +2918,8 @@ mod tests { }); let parsed = ch - .parse_update_message(&update).map(|(m,_)|m) + .parse_update_message(&update) + .map(|(m, _)| m) .expect("mention should parse"); assert_eq!(parsed.content, "Hi status please"); @@ -3015,7 +3026,11 @@ mod tests { #[test] fn telegram_split_many_short_lines() { - let msg: String = (0..1000).map(|i| format!("line {i}\n")).collect(); + let msg: String = (0..1000).fold(String::new(), |mut s, i| { + use std::fmt::Write as _; + let _ = writeln!(s, "line {i}"); + s + }); let parts = split_message_for_telegram(&msg); for part in &parts { assert!( diff --git a/src/config/schema.rs b/src/config/schema.rs index cb7ad82f1d..ac2798b2ed 100644 --- a/src/config/schema.rs +++ b/src/config/schema.rs @@ -363,7 +363,7 @@ fn parse_skills_prompt_injection_mode(raw: &str) -> Option Self { - Self { - open_skills_enabled: false, - open_skills_dir: None, - prompt_injection_mode: SkillsPromptInjectionMode::default(), - } - } -} - /// Multimodal (image) handling configuration (`[multimodal]` section). #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] pub struct MultimodalConfig { @@ -3732,7 +3722,7 @@ async fn sync_directory(path: &Path) -> Result<()> { dir.sync_all() .await .with_context(|| format!("Failed to fsync directory metadata: {}", path.display()))?; - return Ok(()); + Ok(()) } #[cfg(not(unix))] diff --git a/src/cost/mod.rs b/src/cost/mod.rs index 14c634df94..3bf46a252e 100644 --- a/src/cost/mod.rs +++ b/src/cost/mod.rs @@ -1,5 +1,7 @@ pub mod tracker; pub mod types; +#[allow(unused_imports)] pub use tracker::CostTracker; +#[allow(unused_imports)] pub use types::{BudgetCheck, CostRecord, CostSummary, ModelStats, TokenUsage, UsagePeriod}; diff --git a/src/cron/store.rs b/src/cron/store.rs index 7f86286bf7..27646acc18 100644 --- a/src/cron/store.rs +++ b/src/cron/store.rs @@ -5,8 +5,8 @@ use crate::cron::{ }; use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; -use rusqlite::{params, Connection}; use rusqlite::types::{FromSqlResult, ValueRef}; +use rusqlite::{params, Connection}; use uuid::Uuid; const MAX_CRON_OUTPUT_BYTES: usize = 16 * 1024; @@ -15,9 +15,7 @@ const TRUNCATED_OUTPUT_MARKER: &str = "\n...[truncated]"; impl rusqlite::types::FromSql for JobType { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let text = value.as_str()?; - JobType::try_from(text).map_err(|e| { - rusqlite::types::FromSqlError::Other(e.into()) - }) + JobType::try_from(text).map_err(|e| rusqlite::types::FromSqlError::Other(e.into())) } } @@ -427,13 +425,13 @@ fn map_cron_job_row(row: &rusqlite::Row<'_>) -> rusqlite::Result { let next_run_raw: String = row.get(13)?; let last_run_raw: Option = row.get(14)?; let created_at_raw: String = row.get(12)?; - + Ok(CronJob { id: row.get(0)?, expression, schedule, command: row.get(2)?, - job_type:row.get(4)?, + job_type: row.get(4)?, prompt: row.get(5)?, name: row.get(6)?, session_target: SessionTarget::parse(&row.get::<_, String>(7)?), diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 96d0eeee60..7fa4401342 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -66,7 +66,7 @@ pub async fn run(config: Config, host: String, port: u16) -> Result<()> { max_backoff, move || { let cfg = heartbeat_cfg.clone(); - async move { run_heartbeat_worker(cfg).await } + Box::pin(run_heartbeat_worker(cfg)) }, )); } diff --git a/src/hardware/mod.rs b/src/hardware/mod.rs index 67407a7348..a1fa82314e 100644 --- a/src/hardware/mod.rs +++ b/src/hardware/mod.rs @@ -109,7 +109,7 @@ pub fn handle_command(cmd: crate::HardwareCommands, _config: &Config) -> Result< let _ = &cmd; println!("Hardware discovery requires the 'hardware' feature."); println!("Build with: cargo build --features hardware"); - return Ok(()); + Ok(()) } #[cfg(all( diff --git a/src/lib.rs b/src/lib.rs index b341a6177e..bf673e4a24 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -424,6 +424,12 @@ Examples: #[arg(long)] host: Option, }, + /// Deploy ZeroClaw binary + config to Arduino Uno Q (cross-compiled aarch64) + DeployUnoQ { + /// Uno Q IP or user@host (e.g. 192.168.0.48 or arduino@192.168.0.48) + #[arg(long)] + host: String, + }, /// Flash ZeroClaw firmware to Nucleo-F401RE (builds + probe-rs run) FlashNucleo, } diff --git a/src/main.rs b/src/main.rs index 3b12e19768..7da1cc8eb6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -805,8 +805,7 @@ async fn main() -> Result<()> { config.apply_env_overrides(); match cli.command { - Commands::Onboard { .. } => unreachable!(), - Commands::Completions { .. } => unreachable!(), + Commands::Onboard { .. } | Commands::Completions { .. } => unreachable!(), Commands::Agent { message, diff --git a/src/memory/cli.rs b/src/memory/cli.rs index 1683755498..0d523f9589 100644 --- a/src/memory/cli.rs +++ b/src/memory/cli.rs @@ -4,7 +4,9 @@ use super::{ MemoryBackendKind, }; use crate::config::Config; -use anyhow::{bail, Context, Result}; +#[cfg(feature = "memory-postgres")] +use anyhow::Context; +use anyhow::{bail, Result}; use console::style; /// Handle `zeroclaw memory ` CLI commands. @@ -40,18 +42,27 @@ fn create_cli_memory(config: &Config) -> Result> { bail!("Memory backend is 'none' (disabled). No entries to manage."); } MemoryBackendKind::Postgres => { - let sp = &config.storage.provider.config; - let db_url = sp - .db_url - .as_deref() - .map(str::trim) - .filter(|v| !v.is_empty()) - .context( - "memory backend 'postgres' requires db_url in [storage.provider.config]", + #[cfg(not(feature = "memory-postgres"))] + bail!("memory backend 'postgres' requires the 'memory-postgres' feature to be enabled at compile time"); + #[cfg(feature = "memory-postgres")] + { + let sp = &config.storage.provider.config; + let db_url = sp + .db_url + .as_deref() + .map(str::trim) + .filter(|v| !v.is_empty()) + .context( + "memory backend 'postgres' requires db_url in [storage.provider.config]", + )?; + let mem = super::PostgresMemory::new( + db_url, + &sp.schema, + &sp.table, + sp.connect_timeout_secs, )?; - let mem = - super::PostgresMemory::new(db_url, &sp.schema, &sp.table, sp.connect_timeout_secs)?; - Ok(Box::new(mem)) + Ok(Box::new(mem)) + } } _ => create_memory_for_migration(&backend, &config.workspace_dir), } diff --git a/src/multimodal.rs b/src/multimodal.rs index bd15900cc7..e14ef9c703 100644 --- a/src/multimodal.rs +++ b/src/multimodal.rs @@ -279,6 +279,7 @@ async fn normalize_remote_image( } if let Some(content_length) = response.content_length() { + #[allow(clippy::cast_possible_truncation)] let content_length = content_length as usize; validate_size(source, content_length, max_bytes)?; } @@ -328,6 +329,7 @@ async fn normalize_local_image(source: &str, max_bytes: usize) -> anyhow::Result reason: error.to_string(), })?; + #[allow(clippy::cast_possible_truncation)] validate_size(source, metadata.len() as usize, max_bytes)?; let bytes = tokio::fs::read(path) @@ -364,10 +366,7 @@ fn validate_size(source: &str, size_bytes: usize, max_bytes: usize) -> anyhow::R } fn validate_mime(source: &str, mime: &str) -> anyhow::Result<()> { - if ALLOWED_IMAGE_MIME_TYPES - .iter() - .any(|allowed| *allowed == mime) - { + if ALLOWED_IMAGE_MIME_TYPES.contains(&mime) { return Ok(()); } diff --git a/src/onboard/mod.rs b/src/onboard/mod.rs index 5117897e1a..368abd6640 100644 --- a/src/onboard/mod.rs +++ b/src/onboard/mod.rs @@ -1,5 +1,6 @@ pub mod wizard; +#[allow(unused_imports)] pub use wizard::{run_channels_repair_wizard, run_models_refresh, run_quick_setup, run_wizard}; #[cfg(test)] diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs index 8866084988..5151c28702 100644 --- a/src/onboard/wizard.rs +++ b/src/onboard/wizard.rs @@ -566,7 +566,6 @@ const MINIMAX_ONBOARD_MODELS: [(&str, &str); 5] = [ fn default_model_for_provider(provider: &str) -> String { match canonical_provider_name(provider) { "anthropic" => "claude-sonnet-4-5-20250929".into(), - "openrouter" => "anthropic/claude-sonnet-4.6".into(), "openai" => "gpt-5.2".into(), "openai-codex" => "gpt-5-codex".into(), "venice" => "zai-org-glm-5".into(), @@ -589,7 +588,6 @@ fn default_model_for_provider(provider: &str) -> String { "kimi-code" => "kimi-for-coding".into(), "bedrock" => "anthropic.claude-sonnet-4-5-20250929-v1:0".into(), "nvidia" => "meta/llama-3.3-70b-instruct".into(), - "astrai" => "anthropic/claude-sonnet-4.6".into(), _ => "anthropic/claude-sonnet-4.6".into(), } } diff --git a/src/peripherals/mod.rs b/src/peripherals/mod.rs index 8c3a59a8dc..b697c61b83 100644 --- a/src/peripherals/mod.rs +++ b/src/peripherals/mod.rs @@ -24,6 +24,7 @@ pub mod uno_q_setup; #[cfg(all(feature = "peripheral-rpi", target_os = "linux"))] pub mod rpi; +#[allow(unused_imports)] pub use traits::Peripheral; use crate::config::{Config, PeripheralBoardConfig, PeripheralsConfig}; @@ -122,6 +123,15 @@ pub async fn handle_command(cmd: crate::PeripheralCommands, config: &Config) -> println!("Build with: cargo build --features hardware"); } #[cfg(feature = "hardware")] + crate::PeripheralCommands::DeployUnoQ { host } => { + uno_q_setup::deploy_uno_q(&host)?; + } + #[cfg(not(feature = "hardware"))] + crate::PeripheralCommands::DeployUnoQ { .. } => { + println!("Uno Q deploy requires the 'hardware' feature."); + println!("Build with: cargo build --features hardware"); + } + #[cfg(feature = "hardware")] crate::PeripheralCommands::FlashNucleo => { nucleo_flash::flash_nucleo_firmware()?; } @@ -149,9 +159,22 @@ pub async fn create_peripheral_tools(config: &PeripheralsConfig) -> Result Result Result>> { Ok(Vec::new()) } diff --git a/src/peripherals/uno_q_bridge.rs b/src/peripherals/uno_q_bridge.rs index a621831593..2c7db5eda3 100644 --- a/src/peripherals/uno_q_bridge.rs +++ b/src/peripherals/uno_q_bridge.rs @@ -1,7 +1,11 @@ -//! Arduino Uno Q Bridge — GPIO via socket to Bridge app. +//! Arduino UNO R4 WiFi (Uno Q) Bridge — full peripheral tool surface. //! -//! When ZeroClaw runs on Uno Q, the Bridge app (Python + MCU) exposes -//! digitalWrite/digitalRead over a local socket. These tools connect to it. +//! Provides 13 tools total: +//! - 10 MCU tools via TCP socket to the Bridge app (GPIO, ADC, PWM, I2C, SPI, CAN, LED matrix, RGB LED) +//! - 3 Linux tools for direct MPU access (camera capture, Linux RGB LED, system info) +//! +//! The Bridge app runs on the Uno Q board and exposes MCU peripherals over a local +//! TCP socket. Linux tools access sysfs and system commands directly. use crate::tools::traits::{Tool, ToolResult}; use async_trait::async_trait; @@ -10,19 +14,57 @@ use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; +// --------------------------------------------------------------------------- +// Constants +// --------------------------------------------------------------------------- + const BRIDGE_HOST: &str = "127.0.0.1"; const BRIDGE_PORT: u16 = 9999; +const MAX_DIGITAL_PIN: u64 = 21; +const PWM_PINS: &[u64] = &[3, 5, 6, 9, 10, 11]; +const MAX_ADC_CHANNEL: u64 = 5; +const MIN_RGB_LED_ID: u64 = 3; +const MAX_RGB_LED_ID: u64 = 4; + +// --------------------------------------------------------------------------- +// Validation helpers +// --------------------------------------------------------------------------- + +fn is_valid_digital_pin(pin: u64) -> bool { + pin <= MAX_DIGITAL_PIN +} + +fn is_valid_pwm_pin(pin: u64) -> bool { + PWM_PINS.contains(&pin) +} +fn is_valid_adc_channel(channel: u64) -> bool { + channel <= MAX_ADC_CHANNEL +} + +fn is_valid_rgb_led_id(id: u64) -> bool { + (MIN_RGB_LED_ID..=MAX_RGB_LED_ID).contains(&id) +} + +// --------------------------------------------------------------------------- +// Bridge communication helpers +// --------------------------------------------------------------------------- + +/// Send a command to the Bridge app over TCP and return the response string. async fn bridge_request(cmd: &str, args: &[String]) -> anyhow::Result { let addr = format!("{}:{}", BRIDGE_HOST, BRIDGE_PORT); let mut stream = tokio::time::timeout(Duration::from_secs(5), TcpStream::connect(&addr)) .await .map_err(|_| anyhow::anyhow!("Bridge connection timed out"))??; - let msg = format!("{} {}\n", cmd, args.join(" ")); + let msg = if args.is_empty() { + format!("{}\n", cmd) + } else { + format!("{} {}\n", cmd, args.join(" ")) + }; stream.write_all(msg.as_bytes()).await?; - let mut buf = vec![0u8; 64]; + let mut buf = vec![0u8; 4096]; let n = tokio::time::timeout(Duration::from_secs(3), stream.read(&mut buf)) .await .map_err(|_| anyhow::anyhow!("Bridge response timed out"))??; @@ -30,17 +72,55 @@ async fn bridge_request(cmd: &str, args: &[String]) -> anyhow::Result { Ok(resp) } -/// Tool: read GPIO pin via Uno Q Bridge. +/// Convert a bridge response string into a `ToolResult`. +/// Responses prefixed with "error:" are treated as failures. +fn bridge_response_to_result(resp: &str) -> ToolResult { + if resp.starts_with("error:") { + ToolResult { + success: false, + output: resp.to_string(), + error: Some(resp.to_string()), + } + } else { + ToolResult { + success: true, + output: resp.to_string(), + error: None, + } + } +} + +/// Combined helper: send a bridge request and convert the response to a `ToolResult`. +async fn bridge_tool_request(cmd: &str, args: &[String]) -> ToolResult { + match bridge_request(cmd, args).await { + Ok(resp) => bridge_response_to_result(&resp), + Err(e) => ToolResult { + success: false, + output: format!("Bridge error: {}", e), + error: Some(e.to_string()), + }, + } +} + +// =========================================================================== +// MCU Tools (10) — via Bridge socket +// =========================================================================== + +// --------------------------------------------------------------------------- +// 1. GPIO Read +// --------------------------------------------------------------------------- + +/// Read a digital GPIO pin value (0 or 1) on the Uno Q MCU. pub struct UnoQGpioReadTool; #[async_trait] impl Tool for UnoQGpioReadTool { fn name(&self) -> &str { - "gpio_read" + "uno_q_gpio_read" } fn description(&self) -> &str { - "Read GPIO pin value (0 or 1) on Arduino Uno Q. Requires zeroclaw-uno-q-bridge app running." + "Read digital GPIO pin value (0 or 1) on Arduino UNO R4 WiFi MCU via Bridge." } fn parameters_schema(&self) -> Value { @@ -49,7 +129,9 @@ impl Tool for UnoQGpioReadTool { "properties": { "pin": { "type": "integer", - "description": "GPIO pin number (e.g. 13 for LED)" + "description": "GPIO pin number (0-21)", + "minimum": 0, + "maximum": 21 } }, "required": ["pin"] @@ -61,42 +143,34 @@ impl Tool for UnoQGpioReadTool { .get("pin") .and_then(|v| v.as_u64()) .ok_or_else(|| anyhow::anyhow!("Missing 'pin' parameter"))?; - match bridge_request("gpio_read", &[pin.to_string()]).await { - Ok(resp) => { - if resp.starts_with("error:") { - Ok(ToolResult { - success: false, - output: resp.clone(), - error: Some(resp), - }) - } else { - Ok(ToolResult { - success: true, - output: resp, - error: None, - }) - } - } - Err(e) => Ok(ToolResult { + + if !is_valid_digital_pin(pin) { + return Ok(ToolResult { success: false, - output: format!("Bridge error: {}", e), - error: Some(e.to_string()), - }), + output: format!("Invalid pin: {}. Must be 0-{}.", pin, MAX_DIGITAL_PIN), + error: Some(format!("Invalid pin: {}", pin)), + }); } + + Ok(bridge_tool_request("gpio_read", &[pin.to_string()]).await) } } -/// Tool: write GPIO pin via Uno Q Bridge. +// --------------------------------------------------------------------------- +// 2. GPIO Write +// --------------------------------------------------------------------------- + +/// Write a digital GPIO pin value (0 or 1) on the Uno Q MCU. pub struct UnoQGpioWriteTool; #[async_trait] impl Tool for UnoQGpioWriteTool { fn name(&self) -> &str { - "gpio_write" + "uno_q_gpio_write" } fn description(&self) -> &str { - "Set GPIO pin high (1) or low (0) on Arduino Uno Q. Requires zeroclaw-uno-q-bridge app running." + "Set digital GPIO pin high (1) or low (0) on Arduino UNO R4 WiFi MCU via Bridge." } fn parameters_schema(&self) -> Value { @@ -105,11 +179,15 @@ impl Tool for UnoQGpioWriteTool { "properties": { "pin": { "type": "integer", - "description": "GPIO pin number" + "description": "GPIO pin number (0-21)", + "minimum": 0, + "maximum": 21 }, "value": { "type": "integer", - "description": "0 for low, 1 for high" + "description": "0 for low, 1 for high", + "minimum": 0, + "maximum": 1 } }, "required": ["pin", "value"] @@ -125,27 +203,951 @@ impl Tool for UnoQGpioWriteTool { .get("value") .and_then(|v| v.as_u64()) .ok_or_else(|| anyhow::anyhow!("Missing 'value' parameter"))?; - match bridge_request("gpio_write", &[pin.to_string(), value.to_string()]).await { - Ok(resp) => { - if resp.starts_with("error:") { - Ok(ToolResult { - success: false, - output: resp.clone(), - error: Some(resp), - }) - } else { - Ok(ToolResult { - success: true, - output: "done".into(), - error: None, - }) + + if !is_valid_digital_pin(pin) { + return Ok(ToolResult { + success: false, + output: format!("Invalid pin: {}. Must be 0-{}.", pin, MAX_DIGITAL_PIN), + error: Some(format!("Invalid pin: {}", pin)), + }); + } + + Ok(bridge_tool_request("gpio_write", &[pin.to_string(), value.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 3. ADC Read +// --------------------------------------------------------------------------- + +/// Read an analog value from an ADC channel on the Uno Q MCU. +pub struct UnoQAdcReadTool; + +#[async_trait] +impl Tool for UnoQAdcReadTool { + fn name(&self) -> &str { + "uno_q_adc_read" + } + + fn description(&self) -> &str { + "Read analog value from ADC channel (0-5) on Arduino UNO R4 WiFi MCU. WARNING: 3.3V max input on ADC pins." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "channel": { + "type": "integer", + "description": "ADC channel number (0-5). WARNING: 3.3V max input.", + "minimum": 0, + "maximum": 5 + } + }, + "required": ["channel"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let channel = args + .get("channel") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'channel' parameter"))?; + + if !is_valid_adc_channel(channel) { + return Ok(ToolResult { + success: false, + output: format!( + "Invalid ADC channel: {}. Must be 0-{}.", + channel, MAX_ADC_CHANNEL + ), + error: Some(format!("Invalid ADC channel: {}", channel)), + }); + } + + Ok(bridge_tool_request("adc_read", &[channel.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 4. PWM Write +// --------------------------------------------------------------------------- + +/// Write a PWM duty cycle to a PWM-capable pin on the Uno Q MCU. +pub struct UnoQPwmWriteTool; + +#[async_trait] +impl Tool for UnoQPwmWriteTool { + fn name(&self) -> &str { + "uno_q_pwm_write" + } + + fn description(&self) -> &str { + "Write PWM duty cycle (0-255) to a PWM-capable pin on Arduino UNO R4 WiFi MCU. PWM pins: 3, 5, 6, 9, 10, 11." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "pin": { + "type": "integer", + "description": "PWM-capable pin (3, 5, 6, 9, 10, 11)", + "enum": [3, 5, 6, 9, 10, 11] + }, + "duty": { + "type": "integer", + "description": "PWM duty cycle (0-255)", + "minimum": 0, + "maximum": 255 + } + }, + "required": ["pin", "duty"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let pin = args + .get("pin") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'pin' parameter"))?; + let duty = args + .get("duty") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'duty' parameter"))?; + + if !is_valid_pwm_pin(pin) { + return Ok(ToolResult { + success: false, + output: format!( + "Pin {} is not PWM-capable. Valid PWM pins: {:?}.", + pin, PWM_PINS + ), + error: Some(format!("Pin {} is not PWM-capable", pin)), + }); + } + + Ok(bridge_tool_request("pwm_write", &[pin.to_string(), duty.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 5. I2C Scan +// --------------------------------------------------------------------------- + +/// Scan the I2C bus for connected devices on the Uno Q MCU. +pub struct UnoQI2cScanTool; + +#[async_trait] +impl Tool for UnoQI2cScanTool { + fn name(&self) -> &str { + "uno_q_i2c_scan" + } + + fn description(&self) -> &str { + "Scan I2C bus for connected devices on Arduino UNO R4 WiFi MCU. Returns list of detected addresses." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) + } + + async fn execute(&self, _args: Value) -> anyhow::Result { + Ok(bridge_tool_request("i2c_scan", &[]).await) + } +} + +// --------------------------------------------------------------------------- +// 6. I2C Transfer +// --------------------------------------------------------------------------- + +/// Perform an I2C read/write transfer on the Uno Q MCU. +pub struct UnoQI2cTransferTool; + +#[async_trait] +impl Tool for UnoQI2cTransferTool { + fn name(&self) -> &str { + "uno_q_i2c_transfer" + } + + fn description(&self) -> &str { + "Perform I2C transfer on Arduino UNO R4 WiFi MCU. Write data and/or read bytes from a device address." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "address": { + "type": "integer", + "description": "I2C device address (1-126)", + "minimum": 1, + "maximum": 126 + }, + "data": { + "type": "string", + "description": "Hex string of bytes to write (e.g. 'A0FF')" + }, + "read_length": { + "type": "integer", + "description": "Number of bytes to read back", + "minimum": 0 + } + }, + "required": ["address", "data", "read_length"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let address = args + .get("address") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'address' parameter"))?; + let data = args + .get("data") + .and_then(|v| v.as_str()) + .ok_or_else(|| anyhow::anyhow!("Missing 'data' parameter"))?; + let read_length = args + .get("read_length") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'read_length' parameter"))?; + + if !(1..=126).contains(&address) { + return Ok(ToolResult { + success: false, + output: format!("Invalid I2C address: {}. Must be 1-126.", address), + error: Some(format!("Invalid I2C address: {}", address)), + }); + } + + Ok(bridge_tool_request( + "i2c_transfer", + &[ + address.to_string(), + data.to_string(), + read_length.to_string(), + ], + ) + .await) + } +} + +// --------------------------------------------------------------------------- +// 7. SPI Transfer +// --------------------------------------------------------------------------- + +/// Perform an SPI transfer on the Uno Q MCU. +pub struct UnoQSpiTransferTool; + +#[async_trait] +impl Tool for UnoQSpiTransferTool { + fn name(&self) -> &str { + "uno_q_spi_transfer" + } + + fn description(&self) -> &str { + "Perform SPI transfer on Arduino UNO R4 WiFi MCU. Send and receive data bytes." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "data": { + "type": "string", + "description": "Hex string of bytes to transfer (e.g. 'DEADBEEF')" + } + }, + "required": ["data"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let data = args + .get("data") + .and_then(|v| v.as_str()) + .ok_or_else(|| anyhow::anyhow!("Missing 'data' parameter"))?; + + Ok(bridge_tool_request("spi_transfer", &[data.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 8. CAN Send +// --------------------------------------------------------------------------- + +/// Send a CAN bus frame on the Uno Q MCU. +pub struct UnoQCanSendTool; + +#[async_trait] +impl Tool for UnoQCanSendTool { + fn name(&self) -> &str { + "uno_q_can_send" + } + + fn description(&self) -> &str { + "Send a CAN bus frame on Arduino UNO R4 WiFi MCU. Standard 11-bit CAN ID (0-2047)." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "id": { + "type": "integer", + "description": "CAN message ID (0-2047, standard 11-bit)", + "minimum": 0, + "maximum": 2047 + }, + "data": { + "type": "string", + "description": "Hex string of data bytes (up to 8 bytes, e.g. 'DEADBEEF')" + } + }, + "required": ["id", "data"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let id = args + .get("id") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'id' parameter"))?; + let data = args + .get("data") + .and_then(|v| v.as_str()) + .ok_or_else(|| anyhow::anyhow!("Missing 'data' parameter"))?; + + if id > 2047 { + return Ok(ToolResult { + success: false, + output: format!("Invalid CAN ID: {}. Must be 0-2047.", id), + error: Some(format!("Invalid CAN ID: {}", id)), + }); + } + + Ok(bridge_tool_request("can_send", &[id.to_string(), data.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 9. LED Matrix +// --------------------------------------------------------------------------- + +/// Control the 12x8 LED matrix on the Uno Q board. +pub struct UnoQLedMatrixTool; + +#[async_trait] +impl Tool for UnoQLedMatrixTool { + fn name(&self) -> &str { + "uno_q_led_matrix" + } + + fn description(&self) -> &str { + "Set the 12x8 LED matrix bitmap on Arduino UNO R4 WiFi. Send 13 bytes (26 hex chars) as bitmap data." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "bitmap": { + "type": "string", + "description": "Hex string bitmap for 12x8 LED matrix (26 hex chars = 13 bytes)" + } + }, + "required": ["bitmap"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let bitmap = args + .get("bitmap") + .and_then(|v| v.as_str()) + .ok_or_else(|| anyhow::anyhow!("Missing 'bitmap' parameter"))?; + + if bitmap.len() != 26 { + return Ok(ToolResult { + success: false, + output: format!( + "Invalid bitmap length: {} chars. Expected 26 hex chars (13 bytes).", + bitmap.len() + ), + error: Some(format!("Invalid bitmap length: {}", bitmap.len())), + }); + } + + Ok(bridge_tool_request("led_matrix", &[bitmap.to_string()]).await) + } +} + +// --------------------------------------------------------------------------- +// 10. RGB LED (MCU-side, IDs 3-4) +// --------------------------------------------------------------------------- + +/// Control MCU-side RGB LEDs (IDs 3-4) on the Uno Q board. +pub struct UnoQRgbLedTool; + +#[async_trait] +impl Tool for UnoQRgbLedTool { + fn name(&self) -> &str { + "uno_q_rgb_led" + } + + fn description(&self) -> &str { + "Set MCU-side RGB LED color on Arduino UNO R4 WiFi. LED IDs: 3 or 4. RGB values 0-255." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "id": { + "type": "integer", + "description": "RGB LED ID (3 or 4)", + "enum": [3, 4] + }, + "r": { + "type": "integer", + "description": "Red value (0-255)", + "minimum": 0, + "maximum": 255 + }, + "g": { + "type": "integer", + "description": "Green value (0-255)", + "minimum": 0, + "maximum": 255 + }, + "b": { + "type": "integer", + "description": "Blue value (0-255)", + "minimum": 0, + "maximum": 255 + } + }, + "required": ["id", "r", "g", "b"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let id = args + .get("id") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'id' parameter"))?; + let r = args + .get("r") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'r' parameter"))?; + let g = args + .get("g") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'g' parameter"))?; + let b = args + .get("b") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'b' parameter"))?; + + if !is_valid_rgb_led_id(id) { + return Ok(ToolResult { + success: false, + output: format!( + "Invalid LED ID: {}. Must be {} or {}.", + id, MIN_RGB_LED_ID, MAX_RGB_LED_ID + ), + error: Some(format!("Invalid LED ID: {}", id)), + }); + } + + Ok(bridge_tool_request( + "rgb_led", + &[id.to_string(), r.to_string(), g.to_string(), b.to_string()], + ) + .await) + } +} + +// =========================================================================== +// Linux Tools (3) — direct MPU access +// =========================================================================== + +// --------------------------------------------------------------------------- +// 11. Camera Capture +// --------------------------------------------------------------------------- + +/// Capture an image from the Uno Q on-board camera via GStreamer. +pub struct UnoQCameraCaptureTool; + +#[async_trait] +impl Tool for UnoQCameraCaptureTool { + fn name(&self) -> &str { + "uno_q_camera_capture" + } + + fn description(&self) -> &str { + "Capture a photo from the USB camera on Arduino Uno Q. Returns the image path. Include [IMAGE:] in your response to send it to the user." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "width": { + "type": "integer", + "description": "Image width in pixels (default: 1280)" + }, + "height": { + "type": "integer", + "description": "Image height in pixels (default: 720)" + }, + "device": { + "type": "string", + "description": "V4L2 device path (default: /dev/video0)" } } + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let width = args.get("width").and_then(|v| v.as_u64()).unwrap_or(1280); + let height = args.get("height").and_then(|v| v.as_u64()).unwrap_or(720); + let device = args + .get("device") + .and_then(|v| v.as_str()) + .unwrap_or("/dev/video0"); + let output_path = "/tmp/zeroclaw_capture.jpg"; + + let fmt = format!("width={},height={},pixelformat=MJPG", width, height); + let output = tokio::process::Command::new("v4l2-ctl") + .args([ + "-d", + device, + "--set-fmt-video", + &fmt, + "--stream-mmap", + "--stream-count=1", + &format!("--stream-to={}", output_path), + ]) + .output() + .await; + + match output { + Ok(out) if out.status.success() => Ok(ToolResult { + success: true, + output: format!( + "Photo captured ({}x{}) to {}. To send it to the user, include [IMAGE:{}] in your response.", + width, height, output_path, output_path + ), + error: None, + }), + Ok(out) => { + let stderr = String::from_utf8_lossy(&out.stderr).to_string(); + Ok(ToolResult { + success: false, + output: format!("Camera capture failed: {}", stderr), + error: Some(stderr), + }) + } Err(e) => Ok(ToolResult { success: false, - output: format!("Bridge error: {}", e), + output: format!("Failed to run v4l2-ctl: {}. Is v4l-utils installed?", e), error: Some(e.to_string()), }), } } } + +// --------------------------------------------------------------------------- +// 12. Linux RGB LED (sysfs, IDs 1-2) +// --------------------------------------------------------------------------- + +/// Control Linux-side RGB LEDs (IDs 1-2) via sysfs on the Uno Q board. +pub struct UnoQLinuxRgbLedTool; + +#[async_trait] +impl Tool for UnoQLinuxRgbLedTool { + fn name(&self) -> &str { + "uno_q_linux_rgb_led" + } + + fn description(&self) -> &str { + "Set Linux-side RGB LED color via sysfs on Uno Q. LED 1: user LEDs. LED 2: status LEDs. RGB values 0-255." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": { + "id": { + "type": "integer", + "description": "Linux RGB LED ID (1 or 2)", + "enum": [1, 2] + }, + "r": { + "type": "integer", + "description": "Red value (0-255)", + "minimum": 0, + "maximum": 255 + }, + "g": { + "type": "integer", + "description": "Green value (0-255)", + "minimum": 0, + "maximum": 255 + }, + "b": { + "type": "integer", + "description": "Blue value (0-255)", + "minimum": 0, + "maximum": 255 + } + }, + "required": ["id", "r", "g", "b"] + }) + } + + async fn execute(&self, args: Value) -> anyhow::Result { + let id = args + .get("id") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'id' parameter"))?; + let r = args + .get("r") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'r' parameter"))?; + let g = args + .get("g") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'g' parameter"))?; + let b = args + .get("b") + .and_then(|v| v.as_u64()) + .ok_or_else(|| anyhow::anyhow!("Missing 'b' parameter"))?; + + // LED 1: red:user / green:user / blue:user + // LED 2: red:panic / green:wlan / blue:bt + let (red_path, green_path, blue_path) = match id { + 1 => ( + "/sys/class/leds/red:user/brightness", + "/sys/class/leds/green:user/brightness", + "/sys/class/leds/blue:user/brightness", + ), + 2 => ( + "/sys/class/leds/red:panic/brightness", + "/sys/class/leds/green:wlan/brightness", + "/sys/class/leds/blue:bt/brightness", + ), + _ => { + return Ok(ToolResult { + success: false, + output: format!("Invalid Linux LED ID: {}. Must be 1 or 2.", id), + error: Some(format!("Invalid Linux LED ID: {}", id)), + }); + } + }; + + // Use blocking write in spawn_blocking to avoid blocking the async runtime + let r_str = r.to_string(); + let g_str = g.to_string(); + let b_str = b.to_string(); + let rp = red_path.to_string(); + let gp = green_path.to_string(); + let bp = blue_path.to_string(); + + let result = tokio::task::spawn_blocking(move || -> anyhow::Result<()> { + std::fs::write(&rp, &r_str)?; + std::fs::write(&gp, &g_str)?; + std::fs::write(&bp, &b_str)?; + Ok(()) + }) + .await; + + match result { + Ok(Ok(())) => Ok(ToolResult { + success: true, + output: format!("LED {} set to RGB({}, {}, {})", id, r, g, b), + error: None, + }), + Ok(Err(e)) => Ok(ToolResult { + success: false, + output: format!("Failed to write LED sysfs: {}", e), + error: Some(e.to_string()), + }), + Err(e) => Ok(ToolResult { + success: false, + output: format!("Task failed: {}", e), + error: Some(e.to_string()), + }), + } + } +} + +// --------------------------------------------------------------------------- +// 13. System Info +// --------------------------------------------------------------------------- + +/// Read system information from the Uno Q Linux MPU. +pub struct UnoQSystemInfoTool; + +#[async_trait] +impl Tool for UnoQSystemInfoTool { + fn name(&self) -> &str { + "uno_q_system_info" + } + + fn description(&self) -> &str { + "Read system information from the Uno Q Linux MPU: CPU temperature, memory, disk, and WiFi status." + } + + fn parameters_schema(&self) -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) + } + + async fn execute(&self, _args: Value) -> anyhow::Result { + let mut info_parts: Vec = Vec::new(); + + // CPU temperature + match tokio::fs::read_to_string("/sys/class/thermal/thermal_zone0/temp").await { + Ok(temp_str) => { + if let Ok(millideg) = temp_str.trim().parse::() { + info_parts.push(format!("CPU temp: {:.1}C", millideg / 1000.0)); + } else { + info_parts.push(format!("CPU temp raw: {}", temp_str.trim())); + } + } + Err(e) => info_parts.push(format!("CPU temp: unavailable ({})", e)), + } + + // Memory info (first 3 lines of /proc/meminfo) + match tokio::fs::read_to_string("/proc/meminfo").await { + Ok(meminfo) => { + let lines: Vec<&str> = meminfo.lines().take(3).collect(); + info_parts.push(format!("Memory: {}", lines.join("; "))); + } + Err(e) => info_parts.push(format!("Memory: unavailable ({})", e)), + } + + // Disk usage + match tokio::process::Command::new("df") + .args(["-h", "/"]) + .output() + .await + { + Ok(out) if out.status.success() => { + let stdout = String::from_utf8_lossy(&out.stdout).to_string(); + info_parts.push(format!("Disk:\n{}", stdout.trim())); + } + Ok(out) => { + let stderr = String::from_utf8_lossy(&out.stderr).to_string(); + info_parts.push(format!("Disk: error ({})", stderr.trim())); + } + Err(e) => info_parts.push(format!("Disk: unavailable ({})", e)), + } + + // WiFi status + match tokio::process::Command::new("iwconfig") + .arg("wlan0") + .output() + .await + { + Ok(out) if out.status.success() => { + let stdout = String::from_utf8_lossy(&out.stdout).to_string(); + info_parts.push(format!("WiFi:\n{}", stdout.trim())); + } + Ok(out) => { + let stderr = String::from_utf8_lossy(&out.stderr).to_string(); + info_parts.push(format!("WiFi: error ({})", stderr.trim())); + } + Err(e) => info_parts.push(format!("WiFi: unavailable ({})", e)), + } + + Ok(ToolResult { + success: true, + output: info_parts.join("\n"), + error: None, + }) + } +} + +// =========================================================================== +// Tests +// =========================================================================== + +#[cfg(test)] +mod tests { + use super::*; + + // -- Pin/channel validation -- + + #[test] + fn valid_digital_pins_accepted() { + for pin in 0..=21 { + assert!(is_valid_digital_pin(pin), "pin {} should be valid", pin); + } + } + + #[test] + fn invalid_digital_pins_rejected() { + assert!(!is_valid_digital_pin(22)); + assert!(!is_valid_digital_pin(100)); + } + + #[test] + fn valid_pwm_pins_accepted() { + for pin in &[3, 5, 6, 9, 10, 11] { + assert!(is_valid_pwm_pin(*pin), "pin {} should be PWM-capable", pin); + } + } + + #[test] + fn non_pwm_pins_rejected() { + for pin in &[0, 1, 2, 4, 7, 8, 12, 13] { + assert!( + !is_valid_pwm_pin(*pin), + "pin {} should not be PWM-capable", + pin + ); + } + } + + #[test] + fn valid_adc_channels_accepted() { + for ch in 0..=5 { + assert!(is_valid_adc_channel(ch), "channel {} should be valid", ch); + } + } + + #[test] + fn invalid_adc_channels_rejected() { + assert!(!is_valid_adc_channel(6)); + assert!(!is_valid_adc_channel(100)); + } + + #[test] + fn valid_rgb_led_ids() { + assert!(is_valid_rgb_led_id(3)); + assert!(is_valid_rgb_led_id(4)); + assert!(!is_valid_rgb_led_id(1)); + assert!(!is_valid_rgb_led_id(5)); + } + + // -- Bridge response conversion -- + + #[test] + fn bridge_result_ok_response() { + let result = bridge_response_to_result("ok"); + assert!(result.success); + assert_eq!(result.output, "ok"); + assert!(result.error.is_none()); + } + + #[test] + fn bridge_result_error_response() { + let result = bridge_response_to_result("error: pin not found"); + assert!(!result.success); + assert_eq!(result.output, "error: pin not found"); + assert!(result.error.is_some()); + } + + #[test] + fn bridge_result_numeric_response() { + let result = bridge_response_to_result("2048"); + assert!(result.success); + assert_eq!(result.output, "2048"); + assert!(result.error.is_none()); + } + + // -- Tool schema validation -- + + #[test] + fn gpio_read_tool_schema() { + let tool = UnoQGpioReadTool; + assert_eq!(tool.name(), "uno_q_gpio_read"); + let schema = tool.parameters_schema(); + assert!(schema["properties"]["pin"].is_object()); + } + + #[test] + fn adc_read_tool_schema() { + let tool = UnoQAdcReadTool; + assert_eq!(tool.name(), "uno_q_adc_read"); + let schema = tool.parameters_schema(); + assert!(schema["properties"]["channel"].is_object()); + } + + #[test] + fn pwm_write_tool_schema() { + let tool = UnoQPwmWriteTool; + assert_eq!(tool.name(), "uno_q_pwm_write"); + let schema = tool.parameters_schema(); + assert!(schema["properties"]["pin"].is_object()); + assert!(schema["properties"]["duty"].is_object()); + } + + // -- Tool execute: input validation (no bridge needed) -- + + #[tokio::test] + async fn gpio_read_rejects_invalid_pin() { + let tool = UnoQGpioReadTool; + let result = tool.execute(json!({"pin": 99})).await.unwrap(); + assert!(!result.success); + assert!(result.output.contains("Invalid pin")); + } + + #[tokio::test] + async fn pwm_write_rejects_non_pwm_pin() { + let tool = UnoQPwmWriteTool; + let result = tool.execute(json!({"pin": 2, "duty": 128})).await.unwrap(); + assert!(!result.success); + assert!(result.output.contains("not PWM-capable")); + } + + #[tokio::test] + async fn adc_read_rejects_invalid_channel() { + let tool = UnoQAdcReadTool; + let result = tool.execute(json!({"channel": 7})).await.unwrap(); + assert!(!result.success); + assert!(result.output.contains("Invalid ADC channel")); + } + + #[tokio::test] + async fn rgb_led_rejects_invalid_id() { + let tool = UnoQRgbLedTool; + let result = tool + .execute(json!({"id": 1, "r": 255, "g": 0, "b": 0})) + .await + .unwrap(); + assert!(!result.success); + assert!(result.output.contains("Invalid LED ID")); + } + + #[tokio::test] + async fn can_send_rejects_invalid_id() { + let tool = UnoQCanSendTool; + let result = tool + .execute(json!({"id": 9999, "data": "DEADBEEF"})) + .await + .unwrap(); + assert!(!result.success); + assert!(result.output.contains("Invalid CAN ID")); + } + + #[tokio::test] + async fn i2c_transfer_rejects_invalid_address() { + let tool = UnoQI2cTransferTool; + let result = tool + .execute(json!({"address": 0, "data": "FF", "read_length": 1})) + .await + .unwrap(); + assert!(!result.success); + assert!(result.output.contains("Invalid I2C address")); + } +} diff --git a/src/peripherals/uno_q_setup.rs b/src/peripherals/uno_q_setup.rs index 424bc89e40..cc5071750e 100644 --- a/src/peripherals/uno_q_setup.rs +++ b/src/peripherals/uno_q_setup.rs @@ -141,3 +141,64 @@ fn copy_dir(src: &std::path::Path, dst: &std::path::Path) -> Result<()> { } Ok(()) } + +/// Deploy ZeroClaw binary + config to Arduino Uno Q via SSH/SCP. +/// +/// Expects a cross-compiled binary at `target/aarch64-unknown-linux-gnu/release/zeroclaw`. +pub fn deploy_uno_q(host: &str) -> Result<()> { + let ssh_target = if host.contains('@') { + host.to_string() + } else { + format!("arduino@{}", host) + }; + + let binary = std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .join("target") + .join("aarch64-unknown-linux-gnu") + .join("release") + .join("zeroclaw"); + + if !binary.exists() { + anyhow::bail!( + "Cross-compiled binary not found at {}.\nBuild with: ./dev/cross-uno-q.sh", + binary.display() + ); + } + + println!("Creating remote directory on {}...", host); + let status = Command::new("ssh") + .args([&ssh_target, "mkdir", "-p", "~/zeroclaw"]) + .status() + .context("ssh mkdir failed")?; + if !status.success() { + anyhow::bail!("Failed to create ~/zeroclaw on Uno Q"); + } + + println!("Copying zeroclaw binary..."); + let status = Command::new("scp") + .args([ + binary.to_str().unwrap(), + &format!("{}:~/zeroclaw/zeroclaw", ssh_target), + ]) + .status() + .context("scp binary failed")?; + if !status.success() { + anyhow::bail!("Failed to copy binary"); + } + + let status = Command::new("ssh") + .args([&ssh_target, "chmod", "+x", "~/zeroclaw/zeroclaw"]) + .status() + .context("ssh chmod failed")?; + if !status.success() { + anyhow::bail!("Failed to set executable bit"); + } + + println!(); + println!("ZeroClaw deployed to Uno Q!"); + println!(" Binary: ~/zeroclaw/zeroclaw"); + println!(); + println!("Start with: ssh {} '~/zeroclaw/zeroclaw agent'", ssh_target); + + Ok(()) +} diff --git a/src/providers/bedrock.rs b/src/providers/bedrock.rs index 7f8e9fcfe8..377aeaf8a1 100644 --- a/src/providers/bedrock.rs +++ b/src/providers/bedrock.rs @@ -478,13 +478,19 @@ impl BedrockProvider { let mut blocks: Vec = Vec::new(); let mut remaining = content; let has_image = content.contains("[IMAGE:"); - tracing::info!("parse_user_content_blocks called, len={}, has_image={}", content.len(), has_image); + tracing::info!( + "parse_user_content_blocks called, len={}, has_image={}", + content.len(), + has_image + ); while let Some(start) = remaining.find("[IMAGE:") { // Add any text before the marker let text_before = &remaining[..start]; if !text_before.trim().is_empty() { - blocks.push(ContentBlock::Text(TextBlock { text: text_before.to_string() })); + blocks.push(ContentBlock::Text(TextBlock { + text: text_before.to_string(), + })); } let after = &remaining[start + 7..]; // skip "[IMAGE:" @@ -499,7 +505,6 @@ impl BedrockProvider { let after_semi = &rest[semi + 1..]; if let Some(b64) = after_semi.strip_prefix("base64,") { let format = match mime { - "image/jpeg" | "image/jpg" => "jpeg", "image/png" => "png", "image/gif" => "gif", "image/webp" => "webp", @@ -508,7 +513,9 @@ impl BedrockProvider { blocks.push(ContentBlock::Image(ImageWrapper { image: ImageBlock { format: format.to_string(), - source: ImageSource { bytes: b64.to_string() }, + source: ImageSource { + bytes: b64.to_string(), + }, }, })); continue; @@ -516,21 +523,29 @@ impl BedrockProvider { } } // Non-data-uri image: just include as text reference - blocks.push(ContentBlock::Text(TextBlock { text: format!("[image: {}]", src) })); + blocks.push(ContentBlock::Text(TextBlock { + text: format!("[image: {}]", src), + })); } else { // No closing bracket, treat rest as text - blocks.push(ContentBlock::Text(TextBlock { text: remaining.to_string() })); + blocks.push(ContentBlock::Text(TextBlock { + text: remaining.to_string(), + })); break; } } // Add any remaining text if !remaining.trim().is_empty() { - blocks.push(ContentBlock::Text(TextBlock { text: remaining.to_string() })); + blocks.push(ContentBlock::Text(TextBlock { + text: remaining.to_string(), + })); } if blocks.is_empty() { - blocks.push(ContentBlock::Text(TextBlock { text: content.to_string() })); + blocks.push(ContentBlock::Text(TextBlock { + text: content.to_string(), + })); } blocks @@ -677,12 +692,18 @@ impl BedrockProvider { if let Some(src) = img.get_mut("source") { if let Some(bytes) = src.get_mut("bytes") { if let Some(s) = bytes.as_str() { - *bytes = serde_json::json!(format!("", s.len())); + *bytes = serde_json::json!(format!( + "", + s.len() + )); } } } } - tracing::info!("Bedrock image block: {}", serde_json::to_string(&b).unwrap_or_default()); + tracing::info!( + "Bedrock image block: {}", + serde_json::to_string(&b).unwrap_or_default() + ); } } } diff --git a/src/providers/compatible.rs b/src/providers/compatible.rs index ce255b6985..7868c23120 100644 --- a/src/providers/compatible.rs +++ b/src/providers/compatible.rs @@ -634,8 +634,7 @@ fn first_nonempty(text: Option<&str>) -> Option { fn normalize_responses_role(role: &str) -> &'static str { match role { - "assistant" => "assistant", - "tool" => "assistant", + "assistant" | "tool" => "assistant", _ => "user", } } diff --git a/src/providers/copilot.rs b/src/providers/copilot.rs index 6c72e637b9..bdc6a856e4 100644 --- a/src/providers/copilot.rs +++ b/src/providers/copilot.rs @@ -219,7 +219,7 @@ impl CopilotProvider { ("Accept", "application/json"), ]; - fn convert_tools<'a>(tools: Option<&'a [ToolSpec]>) -> Option>> { + fn convert_tools(tools: Option<&[ToolSpec]>) -> Option>> { tools.map(|items| { items .iter() diff --git a/src/providers/reliable.rs b/src/providers/reliable.rs index 6a8ec1a96b..c430a93a2f 100644 --- a/src/providers/reliable.rs +++ b/src/providers/reliable.rs @@ -659,115 +659,6 @@ impl Provider for ReliableProvider { .any(|(_, provider)| provider.supports_vision()) } - async fn chat( - &self, - request: ChatRequest<'_>, - model: &str, - temperature: f64, - ) -> anyhow::Result { - let models = self.model_chain(model); - let mut failures = Vec::new(); - - for current_model in &models { - for (provider_name, provider) in &self.providers { - let mut backoff_ms = self.base_backoff_ms; - - for attempt in 0..=self.max_retries { - let req = ChatRequest { - messages: request.messages, - tools: request.tools, - }; - match provider.chat(req, current_model, temperature).await { - Ok(resp) => { - if attempt > 0 || *current_model != model { - tracing::info!( - provider = provider_name, - model = *current_model, - attempt, - original_model = model, - "Provider recovered (failover/retry)" - ); - } - return Ok(resp); - } - Err(e) => { - let non_retryable_rate_limit = is_non_retryable_rate_limit(&e); - let non_retryable = is_non_retryable(&e) || non_retryable_rate_limit; - let rate_limited = is_rate_limited(&e); - let failure_reason = failure_reason(rate_limited, non_retryable); - let error_detail = compact_error_detail(&e); - - push_failure( - &mut failures, - provider_name, - current_model, - attempt + 1, - self.max_retries + 1, - failure_reason, - &error_detail, - ); - - if rate_limited && !non_retryable_rate_limit { - if let Some(new_key) = self.rotate_key() { - tracing::info!( - provider = provider_name, - error = %error_detail, - "Rate limited, rotated API key (key ending ...{})", - &new_key[new_key.len().saturating_sub(4)..] - ); - } - } - - if non_retryable { - tracing::warn!( - provider = provider_name, - model = *current_model, - error = %error_detail, - "Non-retryable error, moving on" - ); - - if is_context_window_exceeded(&e) { - anyhow::bail!( - "Request exceeds model context window; retries and fallbacks were skipped. Attempts:\n{}", - failures.join("\n") - ); - } - - break; - } - - if attempt < self.max_retries { - let wait = self.compute_backoff(backoff_ms, &e); - tracing::warn!( - provider = provider_name, - model = *current_model, - attempt = attempt + 1, - backoff_ms = wait, - reason = failure_reason, - error = %error_detail, - "Provider call failed, retrying" - ); - tokio::time::sleep(Duration::from_millis(wait)).await; - backoff_ms = (backoff_ms.saturating_mul(2)).min(10_000); - } - } - } - } - - tracing::warn!( - provider = provider_name, - model = *current_model, - "Exhausted retries, trying next provider/model" - ); - } - } - - anyhow::bail!( - "All providers/models failed. Attempts:\n{}", - failures.join("\n") - ) - } - async fn chat_with_tools( &self, messages: &[ChatMessage], diff --git a/src/security/detect.rs b/src/security/detect.rs index 751d8d092e..1a9a173613 100644 --- a/src/security/detect.rs +++ b/src/security/detect.rs @@ -16,13 +16,10 @@ pub fn create_sandbox(config: &SecurityConfig) -> Arc { // If specific backend requested, try that match backend { SandboxBackend::Landlock => { - #[cfg(feature = "sandbox-landlock")] + #[cfg(target_os = "linux")] { - #[cfg(target_os = "linux")] - { - if let Ok(sandbox) = super::landlock::LandlockSandbox::new() { - return Arc::new(sandbox); - } + if let Ok(sandbox) = super::landlock::LandlockSandbox::new() { + return Arc::new(sandbox); } } tracing::warn!( @@ -75,13 +72,10 @@ pub fn create_sandbox(config: &SecurityConfig) -> Arc { fn detect_best_sandbox() -> Arc { #[cfg(target_os = "linux")] { - // Try Landlock first (native, no dependencies) - #[cfg(feature = "sandbox-landlock")] - { - if let Ok(sandbox) = super::landlock::LandlockSandbox::probe() { - tracing::info!("Landlock sandbox enabled (Linux kernel 5.13+)"); - return Arc::new(sandbox); - } + // Try Landlock first (native, no dependencies, always compiled on Linux) + if let Ok(sandbox) = super::landlock::LandlockSandbox::probe() { + tracing::info!("Landlock sandbox enabled (Linux kernel 5.13+)"); + return Arc::new(sandbox); } // Try Firejail second (user-space tool) diff --git a/src/security/landlock.rs b/src/security/landlock.rs index 898e4fffa0..846fa43818 100644 --- a/src/security/landlock.rs +++ b/src/security/landlock.rs @@ -2,21 +2,22 @@ //! //! Landlock provides unprivileged sandboxing through the Linux kernel. //! This module uses the pure-Rust `landlock` crate for filesystem access control. +//! On Linux, this module is always compiled (no feature flag required). -#[cfg(all(feature = "sandbox-landlock", target_os = "linux"))] +#[cfg(target_os = "linux")] use landlock::{AccessFs, PathBeneath, PathFd, Ruleset, RulesetAttr, RulesetCreatedAttr}; use crate::security::traits::Sandbox; use std::path::Path; /// Landlock sandbox backend for Linux -#[cfg(all(feature = "sandbox-landlock", target_os = "linux"))] +#[cfg(target_os = "linux")] #[derive(Debug)] pub struct LandlockSandbox { workspace_dir: Option, } -#[cfg(all(feature = "sandbox-landlock", target_os = "linux"))] +#[cfg(target_os = "linux")] impl LandlockSandbox { /// Create a new Landlock sandbox with the given workspace directory pub fn new() -> std::io::Result { @@ -123,7 +124,7 @@ impl LandlockSandbox { } } -#[cfg(all(feature = "sandbox-landlock", target_os = "linux"))] +#[cfg(target_os = "linux")] impl Sandbox for LandlockSandbox { fn wrap_command(&self, _cmd: &mut std::process::Command) -> std::io::Result<()> { // Apply Landlock restrictions before executing the command @@ -149,16 +150,16 @@ impl Sandbox for LandlockSandbox { } } -// Stub implementations for non-Linux or when feature is disabled -#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] +// Stub implementations for non-Linux platforms +#[cfg(not(target_os = "linux"))] pub struct LandlockSandbox; -#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] +#[cfg(not(target_os = "linux"))] impl LandlockSandbox { pub fn new() -> std::io::Result { Err(std::io::Error::new( std::io::ErrorKind::Unsupported, - "Landlock is only supported on Linux with the sandbox-landlock feature", + "Landlock is only supported on Linux", )) } @@ -177,7 +178,7 @@ impl LandlockSandbox { } } -#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] +#[cfg(not(target_os = "linux"))] impl Sandbox for LandlockSandbox { fn wrap_command(&self, _cmd: &mut std::process::Command) -> std::io::Result<()> { Err(std::io::Error::new( @@ -203,7 +204,7 @@ impl Sandbox for LandlockSandbox { mod tests { use super::*; - #[cfg(all(feature = "sandbox-landlock", target_os = "linux"))] + #[cfg(target_os = "linux")] #[test] fn landlock_sandbox_name() { if let Ok(sandbox) = LandlockSandbox::new() { @@ -211,7 +212,7 @@ mod tests { } } - #[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] + #[cfg(not(target_os = "linux"))] #[test] fn landlock_not_available_on_non_linux() { assert!(!LandlockSandbox.is_available()); @@ -222,19 +223,17 @@ mod tests { fn landlock_with_none_workspace() { // Should work even without a workspace directory let result = LandlockSandbox::with_workspace(None); - // Result depends on platform and feature flag + // Result depends on platform availability match result { Ok(sandbox) => assert!(sandbox.is_available()), - Err(_) => assert!(!cfg!(all( - feature = "sandbox-landlock", - target_os = "linux" - ))), + #[allow(clippy::assertions_on_constants)] + Err(_) => assert!(!cfg!(target_os = "linux")), } } // ── §1.1 Landlock stub tests ────────────────────────────── - #[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] + #[cfg(not(target_os = "linux"))] #[test] fn landlock_stub_wrap_command_returns_unsupported() { let sandbox = LandlockSandbox; @@ -244,7 +243,7 @@ mod tests { assert_eq!(result.unwrap_err().kind(), std::io::ErrorKind::Unsupported); } - #[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] + #[cfg(not(target_os = "linux"))] #[test] fn landlock_stub_new_returns_unsupported() { let result = LandlockSandbox::new(); @@ -252,7 +251,7 @@ mod tests { assert_eq!(result.unwrap_err().kind(), std::io::ErrorKind::Unsupported); } - #[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))] + #[cfg(not(target_os = "linux"))] #[test] fn landlock_stub_probe_returns_unsupported() { let result = LandlockSandbox::probe(); diff --git a/src/security/mod.rs b/src/security/mod.rs index d77ec19001..86b3add098 100644 --- a/src/security/mod.rs +++ b/src/security/mod.rs @@ -25,7 +25,7 @@ pub mod detect; pub mod docker; #[cfg(target_os = "linux")] pub mod firejail; -#[cfg(feature = "sandbox-landlock")] +#[cfg(target_os = "linux")] pub mod landlock; pub mod pairing; pub mod policy; diff --git a/src/security/pairing.rs b/src/security/pairing.rs index 232d3d3e72..d18005fd31 100644 --- a/src/security/pairing.rs +++ b/src/security/pairing.rs @@ -36,6 +36,7 @@ pub struct PairingGuard { /// Set of SHA-256 hashed bearer tokens (persisted across restarts). paired_tokens: Arc>>, /// Brute-force protection: per-client failed attempt counter + lockout time. + #[allow(clippy::type_complexity)] failed_attempts: Arc)>>>, } diff --git a/src/security/policy.rs b/src/security/policy.rs index c6fe6aa6e3..a205eed14b 100644 --- a/src/security/policy.rs +++ b/src/security/policy.rs @@ -356,7 +356,6 @@ fn contains_unquoted_char(command: &str, target: char) -> bool { } if ch == '"' { quote = QuoteState::None; - continue; } } QuoteState::None => { diff --git a/src/service/mod.rs b/src/service/mod.rs index 6218b46962..aa7abe410a 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -222,9 +222,7 @@ fn restart_linux(init_system: InitSystem) -> Result<()> { match init_system { InitSystem::Systemd => { run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]))?; - run_checked( - Command::new("systemctl").args(["--user", "restart", "zeroclaw.service"]), - )?; + run_checked(Command::new("systemctl").args(["--user", "restart", "zeroclaw.service"]))?; } InitSystem::Openrc => { run_checked(Command::new("rc-service").args(["zeroclaw", "restart"]))?; diff --git a/src/skills/mod.rs b/src/skills/mod.rs index 4931c67328..2cd8248246 100644 --- a/src/skills/mod.rs +++ b/src/skills/mod.rs @@ -190,7 +190,7 @@ fn open_skills_enabled_from_sources( env_override: Option<&str>, ) -> bool { if let Some(raw) = env_override { - if let Some(enabled) = parse_open_skills_enabled(&raw) { + if let Some(enabled) = parse_open_skills_enabled(raw) { return enabled; } if !raw.trim().is_empty() { diff --git a/src/tools/composio.rs b/src/tools/composio.rs index c191ac17ad..d5a75b0319 100644 --- a/src/tools/composio.rs +++ b/src/tools/composio.rs @@ -789,9 +789,10 @@ impl Tool for ComposioTool { if let Some(app_name) = app { self.cache_connected_account(app_name, entity_id, connected_account_id); } - output.push_str(&format!( - "\nConnected account ID: {connected_account_id}" - )); + { + use std::fmt::Write as _; + let _ = write!(output, "\nConnected account ID: {connected_account_id}"); + } } Ok(ToolResult { success: true, @@ -1573,7 +1574,7 @@ mod tests { fn resolve_picks_first_usable_when_multiple_accounts_exist() { // Regression test for issue #959: previously returned None when // multiple accounts existed, causing the LLM to loop on the OAuth URL. - let tool = ComposioTool::new("test-key", None, test_security()); + let _tool = ComposioTool::new("test-key", None, test_security()); let accounts = vec![ ComposioConnectedAccount { id: "ca_old".to_string(), diff --git a/src/tools/file_read.rs b/src/tools/file_read.rs index 10d17dace7..cbabbf7b8d 100644 --- a/src/tools/file_read.rs +++ b/src/tools/file_read.rs @@ -146,7 +146,11 @@ impl Tool for FileReadTool { let offset = args .get("offset") .and_then(|v| v.as_u64()) - .map(|v| usize::try_from(v.max(1)).unwrap_or(usize::MAX).saturating_sub(1)) + .map(|v| { + usize::try_from(v.max(1)) + .unwrap_or(usize::MAX) + .saturating_sub(1) + }) .unwrap_or(0); let start = offset.min(total); @@ -502,10 +506,7 @@ mod tests { assert!(result.output.contains("[Lines 1-2 of 5]")); // Full read (no offset/limit) shows all lines - let result = tool - .execute(json!({"path": "lines.txt"})) - .await - .unwrap(); + let result = tool.execute(json!({"path": "lines.txt"})).await.unwrap(); assert!(result.success); assert!(result.output.contains("1: aaa")); assert!(result.output.contains("5: eee")); @@ -529,7 +530,9 @@ mod tests { .await .unwrap(); assert!(result.success); - assert!(result.output.contains("[No lines in range, file has 2 lines]")); + assert!(result + .output + .contains("[No lines in range, file has 2 lines]")); let _ = tokio::fs::remove_dir_all(&dir).await; } @@ -551,5 +554,4 @@ mod tests { let _ = tokio::fs::remove_dir_all(&dir).await; } - } diff --git a/src/tools/mod.rs b/src/tools/mod.rs index 698981fe36..5d93cd1268 100644 --- a/src/tools/mod.rs +++ b/src/tools/mod.rs @@ -61,8 +61,11 @@ pub use file_read::FileReadTool; pub use file_write::FileWriteTool; pub use git_operations::GitOperationsTool; pub use glob_search::GlobSearchTool; +#[allow(unused_imports)] pub use hardware_board_info::HardwareBoardInfoTool; +#[allow(unused_imports)] pub use hardware_memory_map::HardwareMemoryMapTool; +#[allow(unused_imports)] pub use hardware_memory_read::HardwareMemoryReadTool; pub use http_request::HttpRequestTool; pub use image_info::ImageInfoTool; diff --git a/tests/config_persistence.rs b/tests/config_persistence.rs index 079b9dfc65..f75fc068e8 100644 --- a/tests/config_persistence.rs +++ b/tests/config_persistence.rs @@ -119,10 +119,12 @@ fn memory_config_default_vector_keyword_weights_sum_to_one() { #[test] fn config_toml_roundtrip_preserves_provider() { - let mut config = Config::default(); - config.default_provider = Some("deepseek".into()); - config.default_model = Some("deepseek-chat".into()); - config.default_temperature = 0.5; + let config = Config { + default_provider: Some("deepseek".into()), + default_model: Some("deepseek-chat".into()), + default_temperature: 0.5, + ..Config::default() + }; let toml_str = toml::to_string(&config).expect("config should serialize to TOML"); let parsed: Config = toml::from_str(&toml_str).expect("TOML should deserialize back"); @@ -173,10 +175,15 @@ fn config_file_write_read_roundtrip() { let tmp = tempfile::TempDir::new().expect("tempdir creation should succeed"); let config_path = tmp.path().join("config.toml"); - let mut config = Config::default(); - config.default_provider = Some("mistral".into()); - config.default_model = Some("mistral-large".into()); - config.agent.max_tool_iterations = 15; + let config = Config { + default_provider: Some("mistral".into()), + default_model: Some("mistral-large".into()), + agent: zeroclaw::config::AgentConfig { + max_tool_iterations: 15, + ..Default::default() + }, + ..Config::default() + }; let toml_str = toml::to_string(&config).expect("config should serialize"); fs::write(&config_path, &toml_str).expect("config file write should succeed"); diff --git a/tests/provider_schema.rs b/tests/provider_schema.rs index 84e2c841fa..398399494d 100644 --- a/tests/provider_schema.rs +++ b/tests/provider_schema.rs @@ -284,7 +284,7 @@ fn provider_construction_with_different_auth_styles() { #[test] fn chat_messages_maintain_role_sequence() { - let history = vec![ + let history = [ ChatMessage::system("You are helpful"), ChatMessage::user("What is Rust?"), ChatMessage::assistant("Rust is a systems programming language"), @@ -301,7 +301,7 @@ fn chat_messages_maintain_role_sequence() { #[test] fn chat_messages_with_tool_calls_maintain_sequence() { - let history = vec![ + let history = [ ChatMessage::system("You are helpful"), ChatMessage::user("Search for Rust"), ChatMessage::assistant("I'll search for that"),