diff --git a/.config/nextest.toml b/.config/nextest.toml index 18ee2303d..196be1c7a 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -1,2 +1,138 @@ [profile.default] +test-threads = 6 fail-fast = false +slow-timeout = { period = "60s", terminate-after = "10" } +# retries = 1 + +# All the following tests keep failing because of zebra with the following error message: +# "Serialization/Deserialization Error: data did not match any variant of untagged enum Input at line 1 column " +# We will ignore these tests for now in CI, until zebra releases the fix + +[profile.ci] +test-threads = 6 +slow-timeout = { period = "60s", terminate-after = "30" } +default-filter = """ + not test(testnet) + & not test(client_rpcs) +""" + +[profile.ci.junit] # this can be some other profile, too +path = "junit.xml" + +[profile.quick] +# Use this profile for a fast check verifying the absence of regressions. Our best assurance against +# false negatives is to run the entire test suite. +test-threads = "num-cpus" +fail-fast = false # If a test fails against this profile, that's a surprise to be investigated +slow-timeout = { period = "60s", terminate-after = "10" } +# Excludes tests that fail in low-resource environments. Flakey test are listed at the beginning +# of the expression, slow at the end. If a slow and flakey test is discovered it should be included +# in the flakey category. +# On a developer laptop: +# flakey 63 == tests fail at least once +# slow 32 == >60s runtime +default-filter = """ + not ( + test(/^chain_query_interface::find_fork_point_zcashd$/) | + test(/^chain_query_interface::get_block_range_zcashd$/) | + test(/^chain_query_interface::get_raw_transaction_zcashd$/) | + test(/^chain_query_interface::get_transaction_status_zcashd$/) | + test(/^chain_query_interface::repro_flake_zcashd$/) | + test(/^chain_query_interface::repro_nfs_drain$/) | + test(/^chain_query_interface::sync_large_chain_zcashd$/) | + test(/^chain_query_interface::sync_large_chain_zebrad$/) | + test(/^zebrad::get::mempool_info$/) | + test(/^zebrad::get::mempool_stream$/) | + test(/^zebrad::get::mempool_tx$/) | + test(/^zebrad::get::raw_mempool$/) | + test(/^zebrad::get::raw_transaction$/) | + test(/^zebrad::get::taddress_balance$/) | + test(/^zebrad::get::taddress_txids$/) | + test(/^zebrad::get::taddress_utxos$/) | + test(/^zebrad::get::taddress_utxos_stream$/) | + test(/^zebrad::get::transaction_mempool$/) | + test(/^zebrad::get::transaction_mined$/) | + test(/^zebrad::get::z::subtrees_by_index$/) | + test(/^zebrad::get::z::treestate$/) | + test(/^zcashd::get::tree_state$/) | + test(/^zcashd::zcash_indexer::check_info_no_cookie$/) | + test(/^zebrad::get::get_mempool_info$/) | + test(/^zebrad::get::raw_mempool_regtest$/) | + test(/^zebrad::get::raw_transaction_regtest$/) | + test(/^zebrad::get::address_balance_regtest$/) | + test(/^zebrad::get::address_balance$/) | + test(/^zebrad::get::address_tx_ids$/) | + test(/^zebrad::get::address_utxos$/) | + test(/^zebrad::get::block$/) | + test(/^zebrad::lightwallet_indexer::get_taddress_balance$/) | + test(/^zebrad::lightwallet_indexer::get_transaction$/) | + test(/^zcashd::monitor_unverified_mempool$/) | + test(/^zcashd::sent_to::orchard$/) | + test(/^zcashd::sent_to::sapling$/) | + test(/^zebrad::fetch_service::monitor_unverified_mempool$/) | + test(/^zebrad::fetch_service::send_to::all$/) | + test(/^zebrad::fetch_service::send_to::orchard$/) | + test(/^zebrad::fetch_service::send_to::sapling$/) | + test(/^zebrad::fetch_service::send_to::transparent$/) | + test(/^zebrad::fetch_service::shield$/) | + test(/^zebrad::state_service::monitor_unverified_mempool$/) | + test(/^zebrad::state_service::send_to::all$/) | + test(/^zebrad::state_service::send_to::orchard$/) | + test(/^zebrad::state_service::send_to::sapling$/) | + test(/^zebrad::state_service::send_to::transparent$/) | + test(/^zebrad::state_service::shield$/) | + test(/^launch_testmanager::zcashd::zaino_clients_receive_mining_reward$/) | + test(/^launch_testmanager::zebrad::state_service::zaino_clients_receive_mining_reward_and_send$/) | + test(/^zebrad::get::block_raw$/) | + test(/^zebrad::lightwallet_indexer::get_subtree_roots$/) | + test(/^launch_testmanager::zcashd::zaino_clients$/) | + test(/^zcashd::zcash_indexer::get_block_subsidy$/) | + test(/^zebra::get::address_deltas$/) | + test(/^zebra::get::z::subtrees_by_index_regtest$/) | + test(/^zebrad::get::mining_info$/) | + test(/^zebra::get::address_balance_regtest$/) | + test(/^zebra::get::raw_transaction_regtest$/) | + test(/^zebra::get::get_mempool_info$/) | + test(/^zebra::get::raw_mempool_regtest$/) | + test(/^zcashd::zcash_indexer::get_mining_info$/) | + test(/^zebrad::process_200_blocks$/) + ) + & + not ( + test(/^zcashd::process_100_blocks$/) | + test(/^zcashd::process_200_blocks$/) | + test(/^launch_testmanager::zebrad::fetch_service::zaino_clients_receive_mining_reward_and_send$/) | + test(/^zcashd::sent_to::transparent$/) | + test(/^zcashd::shield$/) | + test(/^zcashd::sent_to::all$/) | + test(/^zcashd::get::best_blockhash$/) | + test(/^zcashd::get::block_count$/) | + test(/^zcashd::get::block_range$/) | + test(/^zcashd::get::block_range_nullifiers$/) | + test(/^zcashd::get::block_subsidy$/) | + test(/^zcashd::get::mempool_info$/) | + test(/^zcashd::get::mempool_stream$/) | + test(/^zcashd::get::mempool_tx$/) | + test(/^zcashd::get::raw_mempool$/) | + test(/^zcashd::get::raw_transaction$/) | + test(/^zcashd::get::taddress_balance$/) | + test(/^zcashd::get::taddress_txids$/) | + test(/^zcashd::get::taddress_utxos$/) | + test(/^zcashd::get::taddress_utxos_stream$/) | + test(/^zcashd::get::transaction_mempool$/) | + test(/^zcashd::get::transaction_mined$/) | + test(/^zcashd::get::z::get_treestate$/) | + test(/^zcashd::get::z::subtrees_by_index$/) | + test(/^zcashd::zcash_indexer::get_difficulty$/) | + test(/^zcashd::zcash_indexer::get_mempool_info$/) | + test(/^zcashd::zcash_indexer::get_raw_mempool$/) | + test(/^zcashd::zcash_indexer::get_raw_transaction$/) | + test(/^zcashd::zcash_indexer::z_get_subtrees_by_index$/) | + test(/^zcashd::zcash_indexer::z_get_treestate$/) | + test(/^launch_testmanager::zebrad::state_service::zaino_clients_receive_mining_reward$/) | + test(/^zebrad::get::peer_info$/) + ) +""" + +[profile.quick.junit] +path = "junit.xml" diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..bd3dc94d2 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +# Build artifacts +target/ +**/target/ +**/*.rs.bk + +# VCS / CI +.git/ +.gitignore +.github/ + +# IDE / OS +.vscode/ +.idea/ +*.swp +.DS_Store + +# Misc +**/*.log + +docker_cargo/ +container-target/ + diff --git a/.env.testing-artifacts b/.env.testing-artifacts new file mode 100644 index 000000000..a31804cf9 --- /dev/null +++ b/.env.testing-artifacts @@ -0,0 +1,8 @@ +# Rust toolchain version (for rust:-bookworm base image) +RUST_VERSION=1.92 + +# zcashd Git tag (https://github.com/zcash/zcash/releases) +ZCASH_VERSION=6.11.0 + +# zebrad version tag for pulling zfnd/zebra: +ZEBRA_VERSION=4.1.0 diff --git a/.githooks/pre-push b/.githooks/pre-push new file mode 100755 index 000000000..974814f4e --- /dev/null +++ b/.githooks/pre-push @@ -0,0 +1 @@ +makers lint diff --git a/.github/ISSUE_TEMPLATE/architecture.yaml b/.github/ISSUE_TEMPLATE/architecture.yaml new file mode 100644 index 000000000..38922a229 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/architecture.yaml @@ -0,0 +1,63 @@ +name: "Architecture Issue" +description: Document structural concerns, design decisions, or refactoring needs +labels: ["architecture"] +body: + - type: markdown + attributes: + value: | + Use this template to document architectural concerns, design problems, or areas needing refactoring. + + These issues help track technical debt and guide future development. + + - type: textarea + id: observation + attributes: + label: Observation + description: What architectural concern have you identified? + placeholder: | + The X module is tightly coupled with Y... + The current design makes it difficult to... + validations: + required: true + + - type: textarea + id: location + attributes: + label: Location in codebase + description: Which modules, files, or components are affected? + placeholder: | + - `zaino-state/src/...` + - `chain_index::finalized_state::...` + validations: + required: true + + - type: dropdown + id: concern_type + attributes: + label: Type of concern + description: What category best describes this? + multiple: true + options: + - Coupling / Dependencies + - Abstraction boundaries + - Separation of concerns + - API design + - Module organization + - Type system / Safety + - Persistence layer + - Error handling + - Other + validations: + required: true + + - type: textarea + id: impact + attributes: + label: Impact + description: How does this affect development, maintenance, or correctness? + + - type: textarea + id: suggestion + attributes: + label: Suggested direction + description: Any ideas on how to address this? (optional) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 000000000..a272c498b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,88 @@ +name: "Bug Report" +description: Report a bug or unexpected behavior +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to report a bug! + + **Before submitting**, please check if a similar issue already exists. + + - type: dropdown + id: usage + attributes: + label: Usage mode + description: How are you using Zaino? + options: + - zainod (running the daemon) + - Library integration (using zaino-* crates) + - Both / Not sure + validations: + required: true + + - type: textarea + id: description + attributes: + label: Describe the bug + description: A clear description of what the bug is. + validations: + required: true + + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: List the steps to reproduce the issue. + placeholder: | + 1. Start zainod with config... + 2. Send request to... + 3. Observe error... + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected behavior + description: What did you expect to happen? + validations: + required: true + + - type: textarea + id: environment + attributes: + label: Environment + description: | + Please include: + - Zaino version or commit hash + - Backend version (zebra/zcashd) + - OS and platform + placeholder: | + Zaino: v0.x.x or commit abc123 + Backend: Zebra 27.0.0 + OS: Linux x86_64 / macOS ARM64 + validations: + required: true + + - type: textarea + id: config + attributes: + label: Configuration + description: Paste your zainod config file (redact any sensitive values). + render: toml + + - type: textarea + id: logs + attributes: + label: Logs + description: | + Relevant log output. Use debug logging if possible (`level = "debug"` in config). + Wrap long logs in `
` tags. + render: shell + + - type: textarea + id: additional + attributes: + label: Additional context + description: Any other relevant information (config snippets, block heights, tx ids, etc.) diff --git a/.github/ISSUE_TEMPLATE/code_smell.yaml b/.github/ISSUE_TEMPLATE/code_smell.yaml new file mode 100644 index 000000000..e0da66da1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/code_smell.yaml @@ -0,0 +1,67 @@ +name: "Code Smell" +description: Flag anti-patterns, naming issues, or code that needs attention +labels: ["code-smell"] +body: + - type: markdown + attributes: + value: | + Use this to flag code that works but has problems worth addressing. + + Code smells are indicators of deeper issues - they may or may not need immediate action. + + - type: textarea + id: description + attributes: + label: What's the smell? + description: Describe the problematic pattern or code. + validations: + required: true + + - type: textarea + id: location + attributes: + label: Location + description: Where is this code? (file paths, module names, function names) + placeholder: | + `chain_index/src/finalized_state/entry.rs:42` + validations: + required: true + + - type: dropdown + id: smell_type + attributes: + label: Category + description: What type of smell is this? + multiple: true + options: + - Naming confusion + - Duplication + - Long function / God object + - Primitive obsession + - Feature envy + - Inappropriate intimacy (tight coupling) + - Dead code + - Magic numbers / strings + - Missing abstraction + - Test smell + - Other + validations: + required: true + + - type: dropdown + id: priority + attributes: + label: Severity + description: How urgently should this be addressed? + options: + - Low - Nice to fix eventually + - Medium - Should fix when touching this code + - High - Actively causing problems + validations: + required: true + + - type: textarea + id: suggestion + attributes: + label: Suggested fix + description: How might this be improved? (optional) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..f057e5c1a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Questions & Discussion + url: https://github.com/zingolabs/zaino/discussions + about: Ask questions or start a discussion (if Discussions is enabled) + - name: Zcash Community Forum + url: https://forum.zcashcommunity.com/ + about: General Zcash ecosystem questions diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 000000000..a9530233b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,68 @@ +name: "Feature Request" +description: Suggest a new feature or API enhancement +labels: ["enhancement"] +body: + - type: markdown + attributes: + value: | + Thanks for suggesting an improvement! + + Feature requests help us understand what the community needs. + + - type: dropdown + id: usage + attributes: + label: Usage mode + description: How are you using Zaino? + options: + - zainod (running the daemon) + - Library integration (using zaino-* crates) + - Both / Not sure + validations: + required: true + + - type: textarea + id: problem + attributes: + label: Problem or use case + description: What problem does this solve? What are you trying to accomplish? + placeholder: I'm trying to... but currently... + validations: + required: true + + - type: textarea + id: solution + attributes: + label: Proposed solution + description: How do you envision this working? + validations: + required: true + + - type: textarea + id: alternatives + attributes: + label: Alternatives considered + description: Have you considered other approaches? What are the trade-offs? + + - type: dropdown + id: area + attributes: + label: Area + description: Which part of Zaino does this relate to? + multiple: true + options: + - gRPC API (CompactTxStreamer) + - JSON-RPC API + - Indexing / Chain state + - Configuration + - Performance + - Documentation + - Other + validations: + required: true + + - type: textarea + id: additional + attributes: + label: Additional context + description: Any other relevant information, examples from other projects, etc. diff --git a/.github/ISSUE_TEMPLATE/task.yaml b/.github/ISSUE_TEMPLATE/task.yaml new file mode 100644 index 000000000..14a14f98a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task.yaml @@ -0,0 +1,54 @@ +name: "Task" +description: Track implementation work, todos, or specific deliverables +labels: ["tracking"] +body: + - type: markdown + attributes: + value: | + Use this for tracking specific work items, implementation tasks, or deliverables. + + Tasks are actionable items with a clear definition of done. + + - type: textarea + id: description + attributes: + label: Task description + description: What needs to be done? + validations: + required: true + + - type: textarea + id: context + attributes: + label: Context + description: Why is this needed? Link to parent issues or discussions if relevant. + placeholder: | + Part of #123 + Related to the X migration... + + - type: textarea + id: acceptance + attributes: + label: Acceptance criteria + description: How do we know when this is done? + placeholder: | + - [ ] Implement X + - [ ] Add tests for Y + - [ ] Update documentation + + - type: dropdown + id: area + attributes: + label: Area + description: Which part of the codebase? + multiple: true + options: + - zainod + - zaino-state + - zaino-serve + - zaino-fetch + - zaino-proto + - chain_index + - Testing / CI + - Documentation + - Other diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..b5370f11e --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1 @@ +Fixes: diff --git a/.github/workflows/auto-tag-rc.yml b/.github/workflows/auto-tag-rc.yml new file mode 100644 index 000000000..0a5241bef --- /dev/null +++ b/.github/workflows/auto-tag-rc.yml @@ -0,0 +1,40 @@ +name: Auto RC Tag + +on: + pull_request: + types: [closed] + branches: + - 'rc/*' + +jobs: + auto-tag-rc: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get next RC tag + id: tagger + run: | + VERSION=$(echo "${{ github.ref }}" | sed -E 's#refs/heads/rc/##') + PREFIX="$VERSION-rc" + + echo "Looking for previous tags matching $PREFIX.*" + git fetch --tags + + LATEST=$(git tag -l "$PREFIX.*" | sed -E "s/.*rc\.//" | sort -n | tail -1) + NEXT=$((LATEST + 1)) + TAG="$PREFIX.$NEXT" + + echo "TAG=$TAG" >> $GITHUB_OUTPUT + + - name: Create and push tag + run: | + git config user.name github-actions + git config user.email github-actions@users.noreply.github.com + git tag ${{ steps.tagger.outputs.TAG }} + git push origin ${{ steps.tagger.outputs.TAG }} diff --git a/.github/workflows/build-n-push-ci-image.yaml b/.github/workflows/build-n-push-ci-image.yaml new file mode 100644 index 000000000..3d51f0c6d --- /dev/null +++ b/.github/workflows/build-n-push-ci-image.yaml @@ -0,0 +1,71 @@ +name: Docker Image CI/CD + +on: + workflow_dispatch: + push: + branches: + - '**' + paths: + - '.env.testing-artifacts' + - 'test_environment/*' + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN }} + + - name: Compute version-based image tag + id: taggen + run: | + source .env.testing-artifacts + export RUST_VERSION ZCASH_VERSION ZEBRA_VERSION + TAG=$(./utils/get-ci-image-tag.sh) + echo "tag=$TAG" >> "$GITHUB_OUTPUT" + + - name: Load version variables into job env + id: versions + run: | + set -a + source .env.testing-artifacts + set +a + echo "sha_short=$(git rev-parse --short HEAD)" >> "$GITHUB_OUTPUT" + echo "RUST_VERSION=$RUST_VERSION" >> "$GITHUB_ENV" + echo "ZCASH_VERSION=$ZCASH_VERSION" >> "$GITHUB_ENV" + echo "ZEBRA_VERSION=$ZEBRA_VERSION" >> "$GITHUB_ENV" + + - name: Define build target + id: target + run: | + set -a + source ./utils/helpers.sh + TARGET=$(resolve_build_target ${{ env.ZCASH_VERSION }} ${{ env.ZEBRA_VERSION }}) + echo "target=$TARGET" >> "$GITHUB_OUTPUT" + + - name: Build and Push Docker Image + uses: docker/build-push-action@v5 + with: + context: test_environment + file: test_environment/Dockerfile + platforms: linux/amd64 + target: ${{ steps.target.outputs.target }} + tags: | + zingodevops/zaino-ci:latest + zingodevops/zaino-ci:${{ steps.versions.outputs.sha_short }} + zingodevops/zaino-ci:${{ steps.taggen.outputs.tag }} + push: true + build-args: | + RUST_VERSION=${{ env.RUST_VERSION }} + ZCASH_VERSION=${{ env.ZCASH_VERSION }} + ZEBRA_VERSION=${{ env.ZEBRA_VERSION }} diff --git a/.github/workflows/ci-nightly.yaml b/.github/workflows/ci-nightly.yaml new file mode 100644 index 000000000..792ee7818 --- /dev/null +++ b/.github/workflows/ci-nightly.yaml @@ -0,0 +1,209 @@ +name: CI - Nightly + +on: + schedule: + - cron: '30 3 * * *' + workflow_dispatch: + +env: + AWS_ACCESS_KEY_ID: ${{ secrets.MINIO_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.MINIO_SECRET_KEY }} + AWS_REGION: us-east-1 + CACHE_ENDPOINT: gh-cache-hl.minio-tenants.svc.cluster.local + CACHE_PORT: 9000 + CACHE_BUCKET: gha-cache-bucket + INSECURE_CACHE: true + +jobs: + pr-checks: + uses: ./.github/workflows/ci.yml + with: + nextest-profile: ci + + compute-tag: + uses: ./.github/workflows/compute-tag.yml + + check: + name: Cargo Check + runs-on: arc-sh-runners + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + steps: + - uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Cargo Check + run: | + cargo check --all-features + cargo check --tests --all-features + + fmt: + name: Rustfmt + runs-on: arc-sh-runners + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + steps: + - uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Check formatting + run: cargo fmt --all -- --check + + clippy: + name: Clippy + runs-on: arc-sh-runners + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + steps: + - uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Run clippy + run: cargo clippy --all-targets -- --deny warnings + + doc: + name: Cargo Doc + runs-on: arc-sh-runners + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + steps: + - uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Check documentation + run: RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --document-private-items + + whitespace: + name: Whitespace check + runs-on: arc-sh-runners + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + steps: + - uses: actions/checkout@v4 + + - name: Check trailing whitespace + run: ./utils/trailing-whitespace.sh reject + working-directory: . + + cargo-hack-check: + name: Cargo Hack Check + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + runs-on: arc-sh-runners + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Install protoc + run: | + if ! command -v protoc; then + sudo apt-get update + sudo apt-get install -y protobuf-compiler + fi + + - name: Run cargo-hack + uses: zingolabs/infrastructure/.github/actions/cargo-hack@20485fed7a080e381130ed8120419dc81acae641 + with: + hack-subcommand: check + + cargo-hack-build: + name: Cargo Hack Build + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + runs-on: arc-sh-runners + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: MinIO Cache (cargo + target) + uses: tespkg/actions-cache@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target + + - name: Install protoc + run: | + if ! command -v protoc; then + sudo apt-get update + sudo apt-get install -y protobuf-compiler + fi + + - name: Run cargo-hack + uses: zingolabs/infrastructure/.github/actions/cargo-hack@20485fed7a080e381130ed8120419dc81acae641 + with: + hack-subcommand: build diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 457c7de59..3efbee055 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,139 +1,140 @@ -name: CI checks +name: CI - PR -on: [push, pull_request] +on: + workflow_call: + inputs: + nextest-profile: + type: string + default: 'quick' + workflow_dispatch: + push: + tags: + - "**" + pull_request: + types: [opened, synchronize, reopened, ready_for_review] -jobs: - test: - name: > - Test on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] +env: + AWS_ACCESS_KEY_ID: ${{ secrets.MINIO_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.MINIO_SECRET_KEY }} + AWS_REGION: us-east-1 + CACHE_ENDPOINT: gh-cache-hl.minio-tenants.svc.cluster.local + CACHE_PORT: 9000 + CACHE_BUCKET: gha-cache-bucket + INSECURE_CACHE: true - steps: - - uses: actions/checkout@v4 - - name: Run tests - run: > - cargo test - --release - --workspace - - name: Run slow tests - run: > - cargo test - --release - --workspace - -- --ignored - - name: Verify working directory is clean - run: git diff --exit-code +jobs: + compute-tag: + uses: ./.github/workflows/compute-tag.yml - build-latest: - name: Latest build on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] + build-test-artifacts: + name: Build test artifacts + needs: compute-tag + container: + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + runs-on: arc-sh-runners + if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - id: toolchain - - run: rustup override set ${{steps.toolchain.outputs.name}} - - name: Remove lockfile to build with latest dependencies - run: rm Cargo.lock - - name: Build crates - run: > - cargo build - --workspace - --all-targets - --verbose - - name: Verify working directory is clean (excluding lockfile) - run: git diff --exit-code ':!Cargo.lock' + - name: Checkout repository + uses: actions/checkout@v4 + - name: Restore cargo dependencies + id: restore-cargo + uses: tespkg/actions-cache/restore@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target/debug/deps - bitrot: - name: Bitrot check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # Build benchmarks to prevent bitrot - - name: Build benchmarks - run: cargo build --all --benches + - name: Build and archive tests + run: cargo nextest archive --verbose --workspace --all-features --archive-file nextest-archive.tar.zst - clippy: - name: Clippy (MSRV) - timeout-minutes: 30 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Run clippy - uses: actions-rs/clippy-check@v1 + - name: Save cargo dependencies + if: steps.restore-cargo.outputs.cache-hit != 'true' + uses: tespkg/actions-cache/save@v1 with: - name: Clippy (MSRV) - token: ${{ secrets.GITHUB_TOKEN }} - args: > - --all-targets - -- - -D warnings + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + path: | + ~/.cargo + target/debug/deps - clippy-beta: - name: Clippy (beta) - timeout-minutes: 30 - runs-on: ubuntu-latest - continue-on-error: true - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@beta - id: toolchain - - run: rustup override set ${{steps.toolchain.outputs.name}} - - name: Run Clippy (beta) - uses: actions-rs/clippy-check@v1 - continue-on-error: true + - name: Save nextest archive to S3 + uses: tespkg/actions-cache/save@v1 with: - name: Clippy (beta) - token: ${{ secrets.GITHUB_TOKEN }} - args: > - --all-targets - -- - -W clippy::all + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: nextest-archive-${{ github.sha }} + path: nextest-archive.tar.zst - codecov: - name: Code coverage - runs-on: ubuntu-latest + test: + name: Tests + needs: [compute-tag, build-test-artifacts] + runs-on: arc-sh-runners container: - image: xd009642/tarpaulin:develop-nightly - options: --security-opt seccomp=unconfined - + image: zingodevops/zaino-ci:${{ needs.compute-tag.outputs.image-tag }} + strategy: + fail-fast: false + matrix: + partition: + # - "integration-tests::chain_cache" FIXME: this must be reintroduced when the chain index test hangs are debugged + - "integration-tests::fetch_service" + - "integration-tests::json_server" + - "integration-tests::state_service" + - "integration-tests::test_vectors" + - "integration-tests::wallet_to_validator" + - "zainod" + - "zaino-state" + - "zaino-testutils" steps: - uses: actions/checkout@v4 - - name: Generate coverage report - run: > - cargo tarpaulin - --engine llvm - --release - --timeout 600 - --out xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3.1.5 - doc-links: - name: Intra-doc links - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: cargo fetch - # Requires #![deny(rustdoc::broken_intra_doc_links)] in crates. - - name: Check intra-doc links - run: > - cargo doc - --all - --document-private-items + - name: Restore cargo dependencies + uses: tespkg/actions-cache/restore@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: cargo-deps-${{ hashFiles('**/Cargo.lock') }} + restore-keys: cargo-deps- + path: | + ~/.cargo + target/debug/deps - fmt: - name: Rustfmt - timeout-minutes: 30 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Check formatting - run: cargo fmt --all -- --check + - name: Setup test binaries + run: | + # Run the entrypoint script with the actual working directory + /usr/local/bin/entrypoint.sh "$(pwd)" + - name: Restore nextest archive from S3 + uses: tespkg/actions-cache/restore@v1 + with: + endpoint: ${{ env.CACHE_ENDPOINT }} + port: ${{ env.CACHE_PORT }} + bucket: ${{ env.CACHE_BUCKET }} + insecure: ${{ env.INSECURE_CACHE }} + key: nextest-archive-${{ github.sha }} + path: nextest-archive.tar.zst + + - name: Run Tests (Nextest) + env: + TEST_BINARIES_DIR: ${{ github.workspace }}/test_binaries/bins + run: | + cargo nextest run --verbose --profile ${{ inputs.nextest-profile || 'quick' }} --retries 2 --no-tests fail --archive-file nextest-archive.tar.zst \ + --workspace-remap ./ --filterset "binary_id(${{ matrix.partition }})" ${{ env.NEXTEST-FLAGS }} + + - name: Test Summary + uses: test-summary/action@v2 + with: + paths: "target/nextest/${{ inputs.nextest-profile || 'quick' }}/junit.xml" diff --git a/.github/workflows/compute-tag.yml b/.github/workflows/compute-tag.yml new file mode 100644 index 000000000..ad48defb1 --- /dev/null +++ b/.github/workflows/compute-tag.yml @@ -0,0 +1,27 @@ +name: Compute Docker Image Tag + +on: + workflow_call: + outputs: + image-tag: + description: "The computed Docker image tag" + value: ${{ jobs.compute-tag.outputs.image-tag }} + +jobs: + compute-tag: + name: Compute Docker Image Tag + runs-on: ubuntu-latest + outputs: + image-tag: ${{ steps.taggen.outputs.tag }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Compute version-based image tag + id: taggen + run: | + source .env.testing-artifacts + export RUST_VERSION ZCASH_VERSION ZEBRA_VERSION DOCKER_DIR_HASH + TAG=$(./utils/get-ci-image-tag.sh) + echo "TAG=$TAG" + echo "tag=$TAG" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/final-tag-on-stable.yml b/.github/workflows/final-tag-on-stable.yml new file mode 100644 index 000000000..93a7e45c7 --- /dev/null +++ b/.github/workflows/final-tag-on-stable.yml @@ -0,0 +1,45 @@ +name: Final Tag on Stable + +on: + pull_request: + types: [closed] + branches: + - stable + +jobs: + tag-stable: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: read + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Lookup merged PR and extract version + id: get-version + uses: actions/github-script@v7 + with: + script: | + const pr = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.payload.number, + }); + + const baseBranch = pr.data.base.ref; + const headBranch = pr.data.head.ref; + + if (!headBranch.startsWith("rc/")) throw new Error("PR is not from an rc/* branch"); + const version = headBranch.replace(/^rc\//, ""); + core.setOutput("version", version); + + - name: Create final release tag + run: | + git config user.name github-actions + git config user.email github-actions@users.noreply.github.com + git tag ${{ steps.get-version.outputs.version }} + git push origin ${{ steps.get-version.outputs.version }} + diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..2e1bd7093 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,104 @@ +name: Zaino Release + +on: + push: + tags: + - '[0-9]+.[0-9]+.[0-9]+' + - '[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+' + workflow_dispatch: + +jobs: + docker: + name: Docker images (build+push) + runs-on: ubuntu-latest + env: + PUSH_IMAGES: true + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN }} + + - name: Extract metadata for Docker (default image) + id: meta-default + uses: docker/metadata-action@v5 + with: + images: ${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }} + tags: | + type=semver,pattern={{version}} + + - name: Build and Push Default Image + uses: docker/build-push-action@v5 + with: + context: . + platforms: linux/amd64 + push: ${{ env.PUSH_IMAGES }} + tags: ${{ steps.meta-default.outputs.tags }} + labels: ${{ steps.meta-default.outputs.labels }} + cache-from: | + type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }}:buildcache + type=gha + cache-to: | + type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }}:buildcache,mode=max + type=gha,mode=max + + - name: Extract metadata for Docker (no-tls image) + id: meta-no-tls + uses: docker/metadata-action@v5 + with: + images: ${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }} + flavor: | + suffix=-no-tls + tags: | + type=semver,pattern={{version}} + + - name: Build and Push No-TLS Image + uses: docker/build-push-action@v5 + with: + build-args: NO_TLS=true + context: . + platforms: linux/amd64 + push: ${{ env.PUSH_IMAGES }} + tags: "${{ steps.meta-no-tls.outputs.tags }}" + labels: ${{ steps.meta-no-tls.outputs.labels }} + cache-from: | + type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }}:buildcache-no-tls + type=gha + cache-to: | + type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.DOCKER_RELEASE_REPO || 'zaino' }}:buildcache-no-tls,mode=max + type=gha,mode=max + + create-release: + needs: docker + name: Create GitHub Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Generate Changelog + id: changelog + run: | + PREVIOUS_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "") + if [ -z "$PREVIOUS_TAG" ]; then + git log --pretty=format:"* %s" > CHANGELOG.md + else + git log --pretty=format:"* %s" $PREVIOUS_TAG..HEAD > CHANGELOG.md + fi + + - name: Create Release + uses: softprops/action-gh-release@v1 + with: + files: | + CHANGELOG.md + body_path: CHANGELOG.md + draft: false + prerelease: ${{ contains(github.ref, '-rc.') }} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/trigger-integration-tests.yml b/.github/workflows/trigger-integration-tests.yml new file mode 100644 index 000000000..dcefbccb9 --- /dev/null +++ b/.github/workflows/trigger-integration-tests.yml @@ -0,0 +1,61 @@ +name: Trigger Integration Tests + +on: + pull_request: + branches: [dev] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - .cargo/config.toml + - .github/workflows/trigger-integration-tests.yml + + push: + branches: [dev] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - .cargo/config.toml + - .github/workflows/trigger-integration-tests.yml + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: {} + +jobs: + trigger-integration: + name: Trigger integration tests + runs-on: ubuntu-latest + steps: + - name: Generate app token + id: app-token + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 + with: + app-id: ${{ secrets.DISPATCH_APP_ID }} + private-key: ${{ secrets.DISPATCH_APP_PRIVATE_KEY }} + owner: zcash + repositories: integration-tests + + - name: Get requested test branch, if any + if: github.event_name == 'pull_request' + id: test-branch + env: + GH_TOKEN: ${{ github.token }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + run: | + TEST_SHA=$(gh pr -R zingolabs/zaino list --search "${HEAD_SHA}" --json body | jq '.[0].body' | sed '/[^ ]ZIT-Revision/!d' | sed -E 's/.*ZIT-Revision: ([^\\]*)\\.*/\1/') + echo "test_sha=${TEST_SHA}" >> $GITHUB_OUTPUT + + - name: Trigger integration tests + env: + GH_TOKEN: ${{ steps.app-token.outputs.token }} + SHA: ${{ github.event.pull_request.head.sha || github.sha }} + TEST_SHA: ${{ steps.test-branch.outputs.test_sha }} + run: > + gh api repos/zcash/integration-tests/dispatches + --field event_type="zaino-interop-request" + --field client_payload[sha]="${SHA}" + --field client_payload[test_sha]="${TEST_SHA}" diff --git a/.gitignore b/.gitignore index ab1982cbf..592f8877e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,17 @@ +/externals /target /db /docs/cargo_docs/debug +/.helix /test_binaries/bins/* /integration-tests/chain_cache/* !/integration-tests/chain_cache/testnet_get_subtree_roots_sapling !/integration-tests/chain_cache/testnet_get_subtree_roots_orchard +.cargo +test_binaries/ +docker_cargo/**/* +!docker_cargo/**/.gitkeep +container-target/ +.local/ +.failed-tests +**/proptest-regressions/** diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..a0b0f8edd --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,193 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased +- [808] Adopt lightclient-protocol v0.4.0 + +### Added +### Changed +- zaino-proto now references v0.4.0 files +- `zaino_fetch::jsonrpsee::response::ErrorsTimestamp` no longer supports a String + variant. +### Removed + +### Deprecated +- `zaino-fetch::chain:to_compact` in favor of `to_compact_tx` which takes an + optional height and a `PoolTypeFilter` (see zaino-proto changes) +- +## [v0.4.0] - 2025-12-03 + +### Added +- `compact_formats.CompactTxIn` +- `compact_formats.TxOut` +- `service.PoolType` +- `service.LightdInfo` has added fields `upgradeName`, `upgradeHeight`, and + `lightwalletProtocolVersion` +- `compact_formats.CompactTx` has added fields `vin` and `vout`, + which may be used to represent transparent transaction input and output data. +- `service.BlockRange` has added field `poolTypes`, which allows + the caller of service methods that take this type as input to cause returned + data to be filtered to include information only for the specified protocols. + For backwards compatibility, when this field is set the default (empty) value, + servers should return Sapling and Orchard data. This field is to be ignored + when the type is used as part of a `service.TransparentAddressBlockFilter`. + +### Changed +- The `hash` field of `compact_formats.CompactTx` has been renamed to `txid`. + This is a serialization-compatible clarification, as the index of this field + in the .proto type does not change. +- `service.Exclude` has been renamed to `service.GetMempoolTxRequest` and has + an added `poolTypes` field, which allows the caller of this method to specify + which pools the resulting `CompactTx` values should contain data for. + +### Deprecated +- `service.CompactTxStreamer`: + - The `GetBlockNullifiers` and `GetBlockRangeNullifiers` methods are + deprecated. +- `zaino_fetch::FullTransaction::to_compact` deprecated in favor of `to_compact_tx` which includes + an optional for index to explicitly specify that the transaction is in the mempool and has no + index and `Vec` to filter pool types according to the transparent data changes of + lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Block::to_compact` deprecated in favor of `to_compact_block` allowing callers + to specify `PoolTypeFilter` to filter pools that are included into the compact block according to + lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Transaction::to_compact` deprecated in favor of `to_compact_tx` allowing callers + to specify `PoolTypFilter` to filter pools that are included into the compact transaction according + to lightclient-protocol v0.4.0. + +## [v0.3.6] - 2025-05-20 + +### Added +- `service.LightdInfo` has added field `donationAddress` +- `service.CompactTxStreamer.GetTaddressTransactions`. This duplicates + the `GetTaddressTxids` method, but is more accurately named. + +### Deprecated +- `service.CompactTxStreamer.GetTaddressTxids`. Use `GetTaddressTransactions` + instead. + +## [v0.3.5] - 2023-07-03 + +### Added +- `compact_formats.ChainMetadata` +- `service.ShieldedProtocol` +- `service.GetSubtreeRootsArg` +- `service.SubtreeRoot` +- `service.CompactTxStreamer.GetBlockNullifiers` +- `service.CompactTxStreamer.GetBlockRangeNullifiers` +- `service.CompactTxStreamer.SubtreeRoots` + +### Changed +- `compact_formats.CompactBlock` has added field `chainMetadata` +- `compact_formats.CompactSaplingOutput.epk` has been renamed to `ephemeralKey` + +## [v0.3.4] - UNKNOWN + +### Added +- `service.CompactTxStreamer.GetLatestTreeState` + +## [v0.3.3] - 2022-04-02 + +### Added +- `service.TreeState` has added field `orchardTree` + +### Changed +- `service.TreeState.tree` has been renamed to `saplingTree` + +## [v0.3.2] - 2021-12-09 + +### Changed +- `compact_formats.CompactOrchardAction.encCiphertext` has been renamed to + `CompactOrchardAction.ciphertext` + +## [v0.3.1] - 2021-12-09 + +### Added +- `compact_formats.CompactOrchardAction` +- `service.CompactTxStreamer.GetMempoolTx` (removed in 0.3.0) has been reintroduced. +- `service.Exclude` (removed in 0.3.0) has been reintroduced. + +### Changed +- `compact_formats.CompactSpend` has been renamed `CompactSaplingSpend` +- `compact_formats.CompactOutput` has been renamed `CompactSaplingOutput` + +## [v0.3.0] - 2021-07-23 + +### Added +- `service.CompactTxStreamer.GetMempoolStream` + +### Removed +- `service.CompactTxStreamer.GetMempoolTx` has been replaced by `GetMempoolStream` +- `service.Exclude` has been removed as it is now unused. + +## [v0.2.4] - 2021-01-14 + +### Changed +- `service.GetAddressUtxosArg.address` has been replaced by the + repeated field `addresses`. This is a [conditionally-safe](https://protobuf.dev/programming-guides/proto3/#conditionally-safe-changes) + format change. +- `service.GetAddressUtxosReply` has added field `address` + +## [v0.2.3] - 2021-01-14 + +### Added +- `service.LightdInfo` has added fields: + - `estimatedHeight` + - `zcashdBuild` + - `zcashdSubversion` + +## [v0.2.2] - 2020-10-22 + +### Added +- `service.TreeState` +- `service.GetAddressUtxosArg` +- `service.GetAddressUtxosReply` +- `service.GetAddressUtxosReplyList` +- `service.CompactTxStreamer.GetTreeState` +- `service.CompactTxStreamer.GetAddressUtxos` +- `service.CompactTxStreamer.GetAddressUtxosStream` + +## [v0.2.1] - 2020-10-06 + +### Added +- `service.Address` +- `service.AddressList` +- `service.Balance` +- `service.Exclude` +- `service.CompactTxStreamer.GetTaddressBalance` +- `service.CompactTxStreamer.GetTaddressBalanceStream` +- `service.CompactTxStreamer.GetMempoolTx` +- `service.LightdInfo` has added fields: + - `gitCommit` + - `branch` + - `buildDate` + - `buildUser` + +## [v0.2.0] - 2020-04-24 + +### Added +- `service.Duration` +- `service.PingResponse` +- `service.CompactTxStreamer.Ping` + +### Removed +- `service.TransparentAddress` was removed (it was unused in any service API). + +## [v0.1.1] - 2019-11-27 + +### Added +- `service.Empty` +- `service.LightdInfo` +- `service.TransparentAddress` +- `service.TransparentAddressBlockFilter` +- `service.CompactTxStreamer.GetTaddressTxids` +- `service.CompactTxStreamer.GetLightdInfo` +- `service.RawTransaction` has added field `height` + +## [v0.1.0] - 2019-09-19 + +Initial release diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..5088e89dc --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,125 @@ +# Contributing to Zaino + +Welcome! Thank you for your interest in Zaino. We look forward to your contribution to this important part of the Zcash mainnet and testing ecosystem. + +## Table of Contents +- [Getting Started](#getting-started) +- [How to Contribute Code and Documentation](#how-to-contribute) +- [How to open Bug Reports and Feature Requests](#bug-reports-and-feature-requests) +- [Local Testing](#local-testing) +- [Communication Channels](#communication-channels) +- [More Documentation](#more-documentation) +- [Software Philosophy](#software-philosophy) + +## Getting Started +To get started using Zaino, please see our [use cases document](./docs/use_cases.md) where you can find instructions for use and example use cases. + +We welcome and appreciate contributions in the form of code, documentation, bug reports and feature requests. We also generally enjoy feedback and outreach efforts. + +## Bug Reports and Feature Requests + +If you believe you have discovered a security issue and wish to disclose it non-pubicly, please contact us at: +zingodisclosure@proton.me + +Bug reports and feature requests can best be opened as [issues](https://docs.github.com/en/issues/tracking-your-work-with-issues/using-issues/creating-an-issue) on this GitHub repo. To do so you will need a [GitHub account](https://docs.github.com/en/account-and-profile). Especially for bug reports, any details you can offer will help us understand the issue better. Such details include versions or commits used in exposing the bug, what operating system is being used, and so on. + +Bug reports and feature requests can also be registered via other [communication channels](#communication-channels), but will be accepted in this way without guarantees of visibility to project software developers. + +## Communication Channels +In addition to GitHub, there is a ZingoLabs [Matrix](https://matrix.org/) channel that can be reached through [this web link](https://matrix.to/#/!cVsptZxBgWgmxWlHYB:matrix.org). Our primary languages are English and Spanish. + +Other channels where you may be able to reach Zingolabs developers that include the [Zcash Community Forum](https://forum.zcashcommunity.com/) website, Bluesky, Telegram and Twitter/X (English and Spanish), Instagram (Spanish), and Zcash related Discord. + +## How to Contribute +Code and documentation are very helpful and the lifeblood of Free Software. To merge in code to this repo, one will have to have a [GitHub account](https://docs.github.com/en/account-and-profile). + +Code, being Rust, must be formatted using `rustfmt` and applying the `clippy` suggestions. +For convenience, there are scripts included in the `utils` directory which run these tools and remove trailing whitespaces. From the project's workspace root, you can run `./utils/precommit-check.sh` + +In general, PRs should be opened against [the `dev` branch](https://github.com/zingolabs/zaino/tree/dev). + +All tests must pass, see [Local Testing](#local-testing). + +Verified commits are encouraged. The best way to verify is using a GPG signature. See [this document about commit signature verification.](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification) + +Code should be as complex as it needs to be, but no more. + +All code will be reviewed in public, as conversations on the pull request. It is very possible there will be requested changes or questions. This is not a sign of disrespect, but is necessary to keep code quality high in an important piece of software in the Zcash ecosystem. + +Documentation should be clear and accurate to your latest commit. This includes sensible and understandable doc comments. + +Contributions must be [GitHub pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests). New contributors should make PRs _from a personal fork_ of the project, _to this repo, zingolabs/zaino_. Generally pull requests will be against `dev`, the development branch. + +When code or documentation is still being developed and is not intended for review, the PR should be in the `Draft` state. +`Draft` pull requests cannot be merged: an `Open` PR is a PR that is "ready for review." The `Draft` state should be set as a default when working with GitHub on the web, and can be changed to `Open` status later, marking the PR as ready for review. + +All CI checks (remote testing and lints) must pass. + +Running `cargo update` may be considered as a requirement for some releases. + +PRs should be written by one developer, have a detailed review performed by a second developer, and be checked over and merged by a third. + +Certain situations may arise where experienced Zaino developers might bypass the merge-constraints, on a case-by-case basis. + +Finally, see our [Software Philosophy](#software-philosophy), and understand you are contribuing to a project with these principles at work. + +## Block Explorer Merge Requirements +This is an evolving document: the following merge requirements are intended for the specific case of Block Explorer RPCs. + +Any use of `TODO`s makes a PR invalid for merging into `dev`. + +Use of `.unwrap()` and `.expect()` are discouraged in non-test code. When used in code, an explicit comment is required explaining why the particular use of expect is okay, (eg matching on a known enum variant). +In test code, `.unwrap()` is wrong when a helper function might fail with insufficient information. + +Doc-tested doc-comments should be used to avoid stale docs, and skew from the underlying code. Quality doc-comments should include a doc-test, and with `pub` interface doc-comments should be considered nearly as a requirement. + +Error handling must be included and expose underlying information as much as and wherever possible, to assist developers and users. + +Merges must minimally reflect the zcash RPC spec and include a link to the relevant zcash C++ implementation (URLs that point at the analogous logic), OR reflect the C++ implementation. + +Tests are encouraged that show parity bewteen responses from `zcash-cli` + `zcashd` and `zaino`+ a `zebra` backend, and the local cache. + +## Local Testing +Local testing requires a system with ample resources, particularly RAM. + +Tier 1 denotes the reference platform. It is the latest, updated, stable [Debian 12](https://www.debian.org/releases/bookworm/), codename Bookworm, with an AMD64 `x86_64-unknown-linux-gnu` compilation target. This can be thought of as Tier 1 or "guaranteed to build and pass all tests." + +Tier 2 platforms are platforms that are currently understood to be working as well as Tier 1, but as non-canonical sources of truth. Sometimes these platforms provide valuable insights when compared with the reference Tier 1 Debian. Therefore, using them is encouraged. + +Currently, [Arch Linux](https://archlinux.org) AMD64 `x86_64-unknown-linux-gnu` is understood to be Tier 2. + +Zaino uses [`cargo nextest`](https://nexte.st/). On the linux command line, with a system already using Rust (and `cargo`), you can install this using `cargo install cargo-nextest --locked` or from GitHub with `cargo install --git https://github.com/nextest-rs/nextest --bin cargo-nextest`. + +After installing this crate, all tests can be run locally with `cargo nextest run`. + +For more details see our [testing document](./docs/testing.md). + +## More Documentation + +To see more included documentation, please see [our docs directory](./docs/). +## Software Philosophy +We believe in the power of Free and Open Source Software (FOSS) as the best path for individual and social freedom in computing. + +Very broadly, Free Software provides a clear path to make software benefit its users. That is, Free Software has the possibility to be used it like a traditional tool, extending the user's capabilities, unlike closed source software which constrains usage, visability and adaptability of the user while providing some function. + +In more detail, the Free Software Foundation states FOSS allows: + +The freedom to run a program, for any purpose, + +The freedom to study how a program works and adapt it to a person’s needs. Access to the source code is a precondition for this, + +The freedom to redistribute copies so that you can help your neighbour, and + +The freedom to improve a program and release your improvements to the public, so that the whole community benefits. Access to the source code is a precondition for this. + +Developing from this philosophical perspective has several practical advantages: + +Reduced duplication of effort, + +Building upon the work of others, + +Better quality control, + +Reduced maintenance costs. + +To read more, see [this document on wikibooks](https://en.wikibooks.org/wiki/FOSS_A_General_Introduction/Preface). diff --git a/Cargo.lock b/Cargo.lock index cda1a9ed9..02805bb55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,21 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -23,7 +23,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common", + "crypto-common 0.1.7", "generic-array", ] @@ -33,18 +33,30 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", - "cpufeatures", + "cpufeatures 0.2.17", + "zeroize", +] + +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", ] [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", "zerocopy", @@ -52,24 +64,64 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] -name = "android-tzdata" -version = "0.1.1" +name = "amplify" +version = "4.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f7fb4ac7c881e54a8e7015e399b6112a2a5bc958b6c89ac510840ff20273b31" +dependencies = [ + "amplify_derive", + "amplify_num", + "ascii", + "getrandom 0.2.17", + "getrandom 0.3.4", + "wasm-bindgen", +] + +[[package]] +name = "amplify_derive" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a6309e6b8d89b36b9f959b7a8fa093583b94922a0f6438a24fb08936de4d428" +dependencies = [ + "amplify_syn", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "amplify_num" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99bcb75a2982047f733547042fc3968c0f460dfcf7d90b90dea3b2744580e9ad" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "amplify_syn" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "7736fb8d473c0d83098b5bac44df6a561e20470375cd8bcae30516dc889fd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "android_system_properties" @@ -80,11 +132,17 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -97,54 +155,59 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", - "windows-sys 0.59.0", + "once_cell_polyfill", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "append-only-vec" -version = "0.1.2" -source = "git+https://github.com/zancas/append-only-vec.git?branch=add_debug_impl#ec919c9c8f5429edbac69f0c2203dd01c861ce15" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2114736faba96bcd79595c700d03183f61357b9fbce14852515e59f3bee4ed4a" [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +dependencies = [ + "rustversion", +] [[package]] name = "arrayref" @@ -159,36 +222,168 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] -name = "async-stream" -version = "0.3.6" +name = "arti-client" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +checksum = "0a79ca5ce63b36033a5ccbfbcc7f919cbd93db61708543aa5e2e4917856205e7" dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", + "async-trait", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "educe", + "fs-mistrust", + "futures", + "hostname-validator", + "humantime", + "humantime-serde", + "libc", + "once_cell", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "thiserror 2.0.18", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-chanmgr", + "tor-circmgr", + "tor-config", + "tor-config-path", + "tor-dircommon", + "tor-dirmgr", + "tor-error", + "tor-guardmgr", + "tor-keymgr", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-rtcompat", + "tracing", + "void", +] + +[[package]] +name = "ascii" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 2.0.18", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", + "synstructure", ] [[package]] -name = "async-stream-impl" -version = "0.3.6" +name = "asn1-rs-impl" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", +] + +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "async-compression" +version = "0.4.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68650b7df54f0293fd061972a0fb05aaf4fc0879d3b3d21a638a182c5c543b9f" +dependencies = [ + "compression-codecs", + "compression-core", + "futures-io", + "pin-project-lite", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", +] + +[[package]] +name = "async_executors" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a982d2f86de6137cc05c9db9a915a19886c97911f9790d04f174cede74be01a5" +dependencies = [ + "blanket", + "futures-core", + "futures-task", + "futures-util", + "pin-project", + "rustc_version", + "tokio", +] + +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + +[[package]] +name = "atomic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +dependencies = [ + "bytemuck", ] [[package]] @@ -199,49 +394,43 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.10.0" +version = "1.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" dependencies = [ "aws-lc-sys", - "mirai-annotations", - "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.22.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" dependencies = [ - "bindgen 0.69.5", "cc", "cmake", "dunce", "fs_extra", - "libc", - "paste", ] [[package]] name = "axum" -version = "0.7.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ - "async-trait", "axum-core", - "bytes 1.8.0", + "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "itoa", "matchit", @@ -249,66 +438,51 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 1.0.1", - "tower 0.5.1", + "serde_core", + "sync_wrapper", + "tower 0.5.3", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.4.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ - "async-trait", - "bytes 1.8.0", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "bytes", + "futures-core", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", - "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", - "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-link 0.2.1", ] [[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.7" +name = "base16ct" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] name = "base64" @@ -318,15 +492,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bech32" -version = "0.9.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" [[package]] name = "bellman" @@ -358,93 +532,131 @@ dependencies = [ "serde", ] +[[package]] +name = "bincode" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" +dependencies = [ + "serde", + "unty", +] + [[package]] name = "bindgen" version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.11.0", "cexpr", "clang-sys", "itertools 0.12.1", "lazy_static", "lazycell", - "log", - "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", - "which", + "syn 2.0.116", ] [[package]] name = "bindgen" -version = "0.70.1" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.11.0", "cexpr", "clang-sys", "itertools 0.13.0", - "log", - "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 2.1.1", "shlex", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] name = "bip0039" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68a5a99c65851e7be249f5cf510c0a136f18c9bca32139576d59bd3f577b043" +checksum = "568b6890865156d9043af490d4c4081c385dd68ea10acd6ca15733d511e6b51c" dependencies = [ - "hmac", + "hmac 0.12.1", "pbkdf2", "rand 0.8.5", - "sha2 0.10.8", + "sha2 0.10.9", + "unicode-normalization", + "zeroize", +] + +[[package]] +name = "bip0039" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da3803045ec5ba2ee8f65eb36b29115ec8a05fe519a7ae023c84770bd5f676b" +dependencies = [ + "anyhow", + "hmac 0.12.1", + "pbkdf2", + "phf 0.13.1", + "phf_codegen", + "rand 0.10.0", + "sha2 0.10.9", "unicode-normalization", "zeroize", ] [[package]] name = "bip32" -version = "0.5.2" +version = "0.6.0-pre.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa13fae8b6255872fd86f7faf4b41168661d7d78609f7bfe6771b85c6739a15b" +checksum = "143f5327f23168716be068f8e1014ba2ea16a6c91e8777bc8927da7b51e1df1f" dependencies = [ "bs58", - "hmac", + "hmac 0.13.0-pre.4", "rand_core 0.6.4", - "ripemd", - "secp256k1", - "sha2 0.10.8", + "ripemd 0.2.0-pre.4", + "secp256k1 0.29.1", + "sha2 0.11.0-pre.4", "subtle", "zeroize", ] [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin-io" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dee39a0ee5b4095224a0cfc6bf4cc1baf0f9624b96b367e53b66d974e51d953" + +[[package]] +name = "bitcoin_hashes" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" +dependencies = [ + "bitcoin-io", + "hex-conservative", +] [[package]] name = "bitflags" @@ -454,9 +666,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "bitflags-serde-legacy" @@ -464,7 +676,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.11.0", "serde", ] @@ -480,11 +692,20 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2b_simd" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +checksum = "b79834656f71332577234b50bfc009996f7449e0c056884e6a02492ded0ca2f3" dependencies = [ "arrayref", "arrayvec", @@ -493,9 +714,9 @@ dependencies = [ [[package]] name = "blake2s_simd" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" +checksum = "ee29928bad1e3f94c9d1528da29e07a1d3d04817ae8332de1e8b846c8439f4b3" dependencies = [ "arrayref", "arrayvec", @@ -503,12 +724,14 @@ dependencies = [ ] [[package]] -name = "block-buffer" -version = "0.9.0" +name = "blanket" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ - "generic-array", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] @@ -520,6 +743,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.11.0-rc.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fd016a0ddc7cb13661bf5576073ce07330a693f8608a1320b4e20561cc12cdc" +dependencies = [ + "hybrid-array", +] + [[package]] name = "bls12_381" version = "0.8.0" @@ -534,12 +766,36 @@ dependencies = [ ] [[package]] -name = "bridgetree" -version = "0.6.0" +name = "borsh" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "bounded-vec" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef977c7f8e75aa81fc589064c121ab8d32448b7939d34d58df479aa93e65ea5" +checksum = "09dc0086e469182132244e9b8d313a0742e1132da43a08c24b9dd3c18e0faf3a" dependencies = [ - "incrementalmerkletree 0.7.0", + "serde", + "thiserror 2.0.18", ] [[package]] @@ -548,75 +804,113 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.8", + "sha2 0.10.9", "tinyvec", ] [[package]] name = "bstr" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" dependencies = [ "memchr", + "regex-automata", "serde", ] [[package]] -name = "build_utils" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" - -[[package]] -name = "build_utils" -version = "0.1.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +name = "bumpalo" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] -name = "bumpalo" -version = "3.16.0" +name = "by_address" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "64fa3c856b712db6612c019f14756e64e4bcea13337a6b33b696333a9eaa2d06" [[package]] name = "byte-slice-cast" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] -name = "byteorder" -version = "1.5.0" +name = "bytecheck" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] [[package]] -name = "bytes" -version = "0.4.12" +name = "bytecheck_derive" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "byteorder", - "iovec", + "proc-macro2", + "quote", + "syn 1.0.109", ] +[[package]] +name = "bytemuck" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" -version = "1.8.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] +[[package]] +name = "caret" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4d27042e727de6261ee6391b834c6e1adec7031a03228cc1a67f95a3d8f2202" + +[[package]] +name = "cargo-lock" +version = "10.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06acb4f71407ba205a07cb453211e0e6a67b21904e47f6ba1f9589e38f2e454" +dependencies = [ + "semver", + "serde", + "toml 0.8.23", + "url", +] + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cbc" version = "0.1.2" @@ -628,10 +922,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.1" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -648,15 +943,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -670,9 +959,20 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", - "cpufeatures", + "cpufeatures 0.2.17", +] + +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.0", ] [[package]] @@ -682,7 +982,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", - "chacha20", + "chacha20 0.9.1", "cipher", "poly1305", "zeroize", @@ -690,17 +990,43 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-link 0.2.1", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", ] [[package]] @@ -709,7 +1035,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common", + "crypto-common 0.1.7", "inout", "zeroize", ] @@ -727,9 +1053,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "c5caf74d17c3aec5495110c34cc3f78644bfa89af6c8993ed4de2790e49b6499" dependencies = [ "clap_builder", "clap_derive", @@ -737,3871 +1063,6446 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "370daa45065b80218950227371916a1633217ae42b2715b2287b606dcd618e24" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.1", ] [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] name = "clap_lex" -version = "0.7.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] +[[package]] +name = "coarsetime" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e58eb270476aa4fc7843849f8a35063e8743b4dbcdf6dd0f8ea0886980c204c2" +dependencies = [ + "libc", + "wasix", + "wasm-bindgen", +] + [[package]] name = "color-eyre" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" +checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" dependencies = [ "backtrace", - "color-spantrace", "eyre", "indenter", "once_cell", "owo-colors", - "tracing-error", ] [[package]] -name = "color-spantrace" -version = "0.2.1" +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "compression-codecs" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" +checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", + "compression-core", + "flate2", + "liblzma", + "zstd", + "zstd-safe", ] [[package]] -name = "colorchoice" -version = "1.0.3" +name = "compression-core" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" [[package]] -name = "const-oid" -version = "0.9.6" +name = "concurrent-queue" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] [[package]] -name = "constant_time_eq" -version = "0.3.1" +name = "config" +version = "0.15.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "b30fa8254caad766fc03cb0ccae691e14bf3bd72bfff27f72802ce729551b3d6" +dependencies = [ + "pathdiff", + "serde_core", + "toml 0.9.12+spec-1.1.0", + "winnow", +] [[package]] -name = "core-foundation" -version = "0.9.4" +name = "console" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" dependencies = [ - "core-foundation-sys", + "encode_unicode", "libc", + "once_cell", + "windows-sys 0.59.0", ] [[package]] -name = "core-foundation-sys" -version = "0.8.7" +name = "const-oid" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "cpufeatures" -version = "0.2.15" +name = "const_format" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ - "libc", + "const_format_proc_macros", ] [[package]] -name = "crossbeam-channel" -version = "0.5.13" +name = "const_format_proc_macros" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ - "crossbeam-utils", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] -name = "crossbeam-deque" -version = "0.8.5" +name = "constant_time_eq" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] -name = "crossbeam-epoch" -version = "0.9.18" +name = "convert_case" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" dependencies = [ - "crossbeam-utils", + "unicode-segmentation", ] [[package]] -name = "crossbeam-utils" -version = "0.8.20" +name = "convert_case" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] [[package]] -name = "crunchy" -version = "0.2.2" +name = "cookie" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" +dependencies = [ + "percent-encoding", + "time", + "version_check", +] [[package]] -name = "crypto-common" -version = "0.1.6" +name = "cookie-factory" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "9885fa71e26b8ab7855e2ec7cae6e9b380edff76cd052e07c683a0319d51b3a2" dependencies = [ - "generic-array", - "typenum", + "futures", ] [[package]] -name = "ctrlc" -version = "3.4.5" +name = "cookie_store" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +checksum = "3fc4bff745c9b4c7fb1e97b25d13153da2bc7796260141df62378998d070207f" dependencies = [ - "nix", - "windows-sys 0.59.0", + "cookie", + "document-features", + "idna", + "log", + "publicsuffix", + "serde", + "serde_derive", + "serde_json", + "time", + "url", ] [[package]] -name = "curve25519-dalek" -version = "4.1.3" +name = "core-foundation" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "curve25519-dalek-derive", - "digest 0.10.7", - "fiat-crypto", - "rustc_version", - "serde", - "subtle", - "zeroize", + "core-foundation-sys", + "libc", ] [[package]] -name = "curve25519-dalek-derive" -version = "0.1.1" +name = "core-foundation-sys" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core2" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239fa3ae9b63c2dc74bd3fa852d4792b8b305ae64eeede946265b6af62f1fff3" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "memchr", ] [[package]] -name = "darling" -version = "0.20.10" +name = "core2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" dependencies = [ - "darling_core", - "darling_macro", + "memchr", ] [[package]] -name = "darling_core" -version = "0.20.10" +name = "cpufeatures" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 2.0.87", + "libc", ] [[package]] -name = "darling_macro" -version = "0.20.10" +name = "cpufeatures" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" dependencies = [ - "darling_core", - "quote", - "syn 2.0.87", + "libc", ] [[package]] -name = "der" -version = "0.7.9" +name = "crc32fast" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ - "const-oid", - "zeroize", + "cfg-if", ] [[package]] -name = "deranged" -version = "0.3.11" +name = "criterion" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" dependencies = [ - "powerfmt", + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", "serde", + "serde_json", + "tinytemplate", + "walkdir", ] [[package]] -name = "derivative" -version = "2.2.0" +name = "criterion-cycles-per-byte" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +checksum = "6f82e634fea1e2312dc41e6c0ca7444c5d6e7a1ccf3cf4b8de559831c3dcc271" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "cfg-if", + "criterion", ] [[package]] -name = "destructure_traitobject" -version = "0.2.0" +name = "criterion-plot" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" +checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" +dependencies = [ + "cast", + "itertools 0.13.0", +] [[package]] -name = "digest" -version = "0.9.0" +name = "crossbeam-channel" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ - "generic-array", + "crossbeam-utils", ] [[package]] -name = "digest" -version = "0.10.7" +name = "crossbeam-deque" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ - "block-buffer 0.10.4", - "crypto-common", - "subtle", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] -name = "dirs" -version = "5.0.1" +name = "crossbeam-epoch" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "dirs-sys", + "crossbeam-utils", ] [[package]] -name = "dirs-sys" -version = "0.4.1" +name = "crossbeam-queue" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", + "crossbeam-utils", ] [[package]] -name = "displaydoc" -version = "0.2.5" +name = "crossbeam-utils" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] -name = "document-features" -version = "0.2.10" +name = "crunchy" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0" -dependencies = [ - "litrs", -] +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] -name = "dunce" -version = "1.0.5" +name = "crypto-bigint" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] [[package]] -name = "ed25519" -version = "2.2.3" +name = "crypto-common" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ - "pkcs8", - "serde", - "signature", + "generic-array", + "typenum", ] [[package]] -name = "ed25519-zebra" -version = "4.0.3" +name = "crypto-common" +version = "0.2.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" +checksum = "b0b8ce8218c97789f16356e7896b3714f26c2ee1079b79c0b7ae7064bb9089fa" dependencies = [ - "curve25519-dalek", - "ed25519", - "hashbrown 0.14.5", - "hex", - "rand_core 0.6.4", - "serde", - "sha2 0.10.8", - "zeroize", + "hybrid-array", ] [[package]] -name = "either" -version = "1.13.0" +name = "ctr" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] [[package]] -name = "encoding_rs" -version = "0.8.35" +name = "curve25519-dalek" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "cpufeatures 0.2.17", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version", + "serde", + "subtle", + "zeroize", ] [[package]] -name = "enum_dispatch" -version = "0.3.13" +name = "curve25519-dalek-derive" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "env_logger" -version = "0.7.1" +name = "darling" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "log", - "regex", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] -name = "equihash" -version = "0.2.0" +name = "darling" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab579d7cf78477773b03e80bc2f89702ef02d7112c711d54ca93dcdce68533d5" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "blake2b_simd", - "byteorder", + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] -name = "equihash" -version = "0.2.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ - "blake2b_simd", - "byteorder", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", ] [[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.9" +name = "darling_core" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ - "libc", - "windows-sys 0.52.0", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.116", ] [[package]] -name = "eyre" -version = "0.6.12" +name = "darling_macro" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "indenter", - "once_cell", + "darling_core 0.14.4", + "quote", + "syn 1.0.109", ] [[package]] -name = "f4jumble" -version = "0.1.0" +name = "darling_macro" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a83e8d7fd0c526af4aad893b7c9fe41e2699ed8a776a6c74aecdeafe05afc75" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "blake2b_simd", + "darling_core 0.21.3", + "quote", + "syn 2.0.116", ] [[package]] -name = "f4jumble" -version = "0.1.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ - "blake2b_simd", + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", ] [[package]] -name = "fastrand" -version = "2.2.0" +name = "data-encoding" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] -name = "ff" -version = "0.13.0" +name = "der" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ - "bitvec", - "rand_core 0.6.4", - "subtle", + "const-oid", + "pem-rfc7468", + "zeroize", ] [[package]] -name = "fiat-crypto" -version = "0.2.9" +name = "der-parser" +version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "cookie-factory", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", +] [[package]] -name = "fixed-hash" -version = "0.8.0" +name = "deranged" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", + "powerfmt", + "serde", ] [[package]] -name = "fixedbitset" -version = "0.4.2" +name = "derive-deftly" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "7d308ebe4b10924331bd079044b418da7b227d724d3e2408567a47ad7c3da2a0" +dependencies = [ + "derive-deftly-macros", + "heck", +] [[package]] -name = "fnv" -version = "1.0.7" +name = "derive-deftly-macros" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "dd5f2b7218a51c827a11d22d1439b598121fac94bf9b99452e4afffe512d78c9" +dependencies = [ + "heck", + "indexmap 2.13.0", + "itertools 0.14.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "sha3", + "strum", + "syn 2.0.116", + "void", +] [[package]] -name = "foreign-types" -version = "0.3.2" +name = "derive-getters" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +checksum = "74ef43543e701c01ad77d3a5922755c6a1d71b22d942cb8042be4994b380caff" dependencies = [ - "foreign-types-shared", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "foreign-types-shared" -version = "0.1.1" +name = "derive-new" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] -name = "form_urlencoded" -version = "1.2.1" +name = "derive_builder_core_fork_arti" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "24c1b715c79be6328caa9a5e1a387a196ea503740f0722ec3dd8f67a9e72314d" dependencies = [ - "percent-encoding", + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "fpe" -version = "0.6.1" +name = "derive_builder_fork_arti" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" +checksum = "c3eae24d595f4d0ecc90a9a5a6d11c2bd8dafe2375ec4a1ec63250e5ade7d228" dependencies = [ - "cbc", - "cipher", - "libm", - "num-bigint", - "num-integer", - "num-traits", + "derive_builder_macro_fork_arti", ] [[package]] -name = "fs_extra" -version = "1.3.0" +name = "derive_builder_macro_fork_arti" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +checksum = "69887769a2489cd946bf782eb2b1bb2cb7bc88551440c94a765d4f040c08ebf3" +dependencies = [ + "derive_builder_core_fork_arti", + "syn 1.0.109", +] [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "derive_more" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +dependencies = [ + "derive_more-impl", +] [[package]] -name = "funty" -version = "2.0.0" +name = "derive_more-impl" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +dependencies = [ + "convert_case 0.10.0", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.116", + "unicode-xid", +] [[package]] -name = "futures" -version = "0.3.31" +name = "digest" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "block-buffer 0.10.4", + "const-oid", + "crypto-common 0.1.7", + "subtle", ] [[package]] -name = "futures-channel" -version = "0.3.31" +name = "digest" +version = "0.11.0-pre.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "cf2e3d6615d99707295a9673e889bf363a04b2a466bd320c65a72536f7577379" dependencies = [ - "futures-core", - "futures-sink", + "block-buffer 0.11.0-rc.3", + "crypto-common 0.2.0-rc.1", + "subtle", ] [[package]] -name = "futures-core" -version = "0.3.31" +name = "directories" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "16f5094c54661b38d03bd7e50df373292118db60b585c08a411c6d840017fe7d" +dependencies = [ + "dirs-sys", +] [[package]] -name = "futures-executor" -version = "0.3.31" +name = "dirs" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" dependencies = [ - "futures-core", - "futures-task", - "futures-util", + "dirs-sys", ] [[package]] -name = "futures-io" -version = "0.3.31" +name = "dirs-sys" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.59.0", +] [[package]] -name = "futures-macro" -version = "0.3.31" +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "futures-sink" -version = "0.3.31" +name = "document-features" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] [[package]] -name = "futures-task" -version = "0.3.31" +name = "documented" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "ed6b3e31251e87acd1b74911aed84071c8364fc9087972748ade2f1094ccce34" +dependencies = [ + "documented-macros", + "phf 0.12.1", + "thiserror 2.0.18", +] [[package]] -name = "futures-util" -version = "0.3.31" +name = "documented-macros" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "1149cf7462e5e79e17a3c05fd5b1f9055092bbfa95e04c319395c3beacc9370f" dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", + "convert_case 0.8.0", + "itertools 0.14.0", + "optfield", + "proc-macro2", + "quote", + "strum", + "syn 2.0.116", ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "downcast-rs" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" [[package]] -name = "getrandom" -version = "0.1.16" +name = "dunce" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] -name = "getrandom" -version = "0.2.15" +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "dynosaur" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "a12303417f378f29ba12cb12fc78a9df0d8e16ccb1ad94abf04d48d96bdda532" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "dynosaur_derive", ] [[package]] -name = "getset" -version = "0.1.3" +name = "dynosaur_derive" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f636605b743120a8d32ed92fc27b6cde1a769f8f936c065151eb66f88ded513c" +checksum = "0b0713d5c1d52e774c5cd7bb8b043d7c0fc4f921abfb678556140bfbe6ab2364" dependencies = [ - "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "gimli" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "globset" -version = "0.4.15" +name = "ecdsa" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "aho-corasick", - "bstr", - "log", - "regex-automata", - "regex-syntax", + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] -name = "group" -version = "0.13.0" +name = "ed25519" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "ff", - "memuse", - "rand_core 0.6.4", - "subtle", + "pkcs8", + "serde", + "signature", ] [[package]] -name = "h2" -version = "0.3.26" +name = "ed25519-dalek" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ - "bytes 1.8.0", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.6.0", - "slab", - "tokio", - "tokio-util 0.7.12", - "tracing", + "curve25519-dalek", + "ed25519", + "merlin", + "rand_core 0.6.4", + "serde", + "sha2 0.10.9", + "subtle", + "zeroize", ] [[package]] -name = "h2" -version = "0.4.6" +name = "ed25519-zebra" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "0017d969298eec91e3db7a2985a8cab4df6341d86e6f3a6f5878b13fb7846bc9" dependencies = [ - "atomic-waker", - "bytes 1.8.0", - "fnv", - "futures-core", - "futures-sink", - "http 1.1.0", - "indexmap 2.6.0", - "slab", - "tokio", - "tokio-util 0.7.12", - "tracing", + "curve25519-dalek", + "ed25519", + "hashbrown 0.15.5", + "pkcs8", + "rand_core 0.6.4", + "serde", + "sha2 0.10.9", + "subtle", + "zeroize", ] [[package]] -name = "halo2_gadgets" -version = "0.3.0" +name = "educe" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126a150072b0c38c7b573fe3eaf0af944a7fed09e154071bf2436d3f016f7230" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" dependencies = [ - "arrayvec", - "bitvec", - "ff", - "group", - "halo2_proofs", - "lazy_static", - "pasta_curves", - "rand 0.8.5", - "subtle", - "uint 0.9.5", + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "halo2_legacy_pdqsort" -version = "0.1.0" +name = "either" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47716fe1ae67969c5e0b2ef826f32db8c3be72be325e1aa3c1951d06b5575ec5" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] -name = "halo2_proofs" -version = "0.3.0" +name = "elliptic-curve" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b867a8d9bbb85fca76fff60652b5cd19b853a1c4d0665cb89bee68b18d2caf0" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "blake2b_simd", + "base16ct", + "crypto-bigint", + "digest 0.10.7", "ff", + "generic-array", "group", - "halo2_legacy_pdqsort", - "maybe-rayon", - "pasta_curves", + "pkcs8", "rand_core 0.6.4", - "tracing", + "sec1", + "subtle", + "zeroize", ] [[package]] -name = "hashbrown" -version = "0.12.3" +name = "encode_unicode" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] -name = "hashbrown" -version = "0.14.5" +name = "enum-ordinalize" +version = "3.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" dependencies = [ - "ahash", - "allocator-api2", + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "hashbrown" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hex" -version = "0.4.3" +name = "enum_dispatch" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ - "serde", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "hex-literal" -version = "0.4.1" +name = "env_home" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" [[package]] -name = "hmac" -version = "0.12.1" +name = "env_logger" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ - "digest 0.10.7", + "log", + "regex", ] [[package]] -name = "home" -version = "0.5.9" +name = "equihash" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "ca4f333d4ccc9d23c06593733673026efa71a332e028b00f12cf427b9677dce9" dependencies = [ - "windows-sys 0.52.0", + "blake2b_simd", + "core2 0.3.3", + "document-features", ] [[package]] -name = "http" -version = "0.2.12" +name = "equivalent" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes 1.8.0", - "fnv", - "itoa", -] +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] -name = "http" -version = "1.1.0" +name = "errno" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ - "bytes 1.8.0", - "fnv", - "itoa", + "libc", + "windows-sys 0.59.0", ] [[package]] -name = "http-body" -version = "0.4.6" +name = "event-listener" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ - "bytes 1.8.0", - "http 0.2.12", + "concurrent-queue", + "parking", "pin-project-lite", ] [[package]] -name = "http-body" -version = "1.0.1" +name = "eyre" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ - "bytes 1.8.0", - "http 1.1.0", + "indenter", + "once_cell", ] [[package]] -name = "http-body-util" -version = "0.1.2" +name = "f4jumble" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "0d42773cb15447644d170be20231a3268600e0c4cea8987d013b93ac973d3cf7" dependencies = [ - "bytes 1.8.0", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "pin-project-lite", + "blake2b_simd", ] [[package]] -name = "httparse" -version = "1.9.5" +name = "fallible-iterator" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] -name = "httpdate" -version = "1.0.3" +name = "fallible-streaming-iterator" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] -name = "human_bytes" -version = "0.4.3" +name = "fastrand" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] -name = "humantime" -version = "2.1.0" +name = "ff" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "humantime-serde" -version = "1.1.1" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "bitvec", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "figment" +version = "0.10.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ - "humantime", + "atomic 0.6.1", "serde", + "toml 0.8.23", + "uncased", + "version_check", ] [[package]] -name = "hyper" -version = "0.14.31" +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fluid-let" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749cff877dc1af878a0b31a41dd221a753634401ea0ef2f87b62d3171522485a" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fpe" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" +dependencies = [ + "cbc", + "cipher", + "libm", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "fs-mistrust" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a157b06319bb4868718fd20177a0d3373d465e429d89cd0ee493d9f5918902" +dependencies = [ + "derive_builder_fork_arti", + "dirs", + "libc", + "pwd-grp", + "serde", + "thiserror 2.0.18", + "walkdir", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "fslock" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ - "bytes 1.8.0", "futures-channel", "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", ] [[package]] -name = "hyper" -version = "1.5.0" +name = "futures-channel" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ - "bytes 1.8.0", - "futures-channel", + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "httparse", - "httpdate", - "itoa", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.37", + "rustls-pki-types", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", "pin-project-lite", - "smallvec", - "tokio", - "want", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "rand_core 0.10.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "hyper-rustls" -version = "0.24.2" +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "glob-match" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "memuse", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.13.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + +[[package]] +name = "halo2_gadgets" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73a5e510d58a07d8ed238a5a8a436fe6c2c79e1bb2611f62688bc65007b4e6e7" +dependencies = [ + "arrayvec", + "bitvec", + "ff", + "group", + "halo2_poseidon", + "halo2_proofs", + "lazy_static", + "pasta_curves", + "rand 0.8.5", + "sinsemilla", + "subtle", + "uint 0.9.5", +] + +[[package]] +name = "halo2_legacy_pdqsort" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47716fe1ae67969c5e0b2ef826f32db8c3be72be325e1aa3c1951d06b5575ec5" + +[[package]] +name = "halo2_poseidon" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa3da60b81f02f9b33ebc6252d766f843291fb4d2247a07ae73d20b791fc56f" +dependencies = [ + "bitvec", + "ff", + "group", + "pasta_curves", +] + +[[package]] +name = "halo2_proofs" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05713f117155643ce10975e0bee44a274bcda2f4bb5ef29a999ad67c1fa8d4d3" +dependencies = [ + "blake2b_simd", + "ff", + "group", + "halo2_legacy_pdqsort", + "indexmap 1.9.3", + "maybe-rayon", + "pasta_curves", + "rand_core 0.6.4", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda06d18ac606267c40c04e41b9947729bf8b9efe74bd4e82b61a5f26a510b9f" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "hmac" +version = "0.13.0-pre.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4b1fb14e4df79f9406b434b60acef9f45c26c50062cccf1346c6103b8c47d58" +dependencies = [ + "digest 0.11.0-pre.9", +] + +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "hostname-validator" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "human_bytes" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + +[[package]] +name = "hybrid-array" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2d35805454dc9f8662a98d6d61886ffe26bd465f5960e0e55345c70d5c0d2a9" +dependencies = [ + "typenum", +] + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "log", + "rustls 0.23.37", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.6", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.62.2", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-codec" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "incrementalmerkletree" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30821f91f0fa8660edca547918dc59812893b497d07c1144f326f07fdd94aba9" +dependencies = [ + "either", +] + +[[package]] +name = "indenter" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inotify" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" +dependencies = [ + "bitflags 2.11.0", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + +[[package]] +name = "insta" +version = "1.46.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e82db8c87c7f1ccecb34ce0c24399b8a73081427f3c7c50a5d597925356115e4" +dependencies = [ + "console", + "once_cell", + "similar", + "tempfile", +] + +[[package]] +name = "integration-tests" +version = "0.2.0" +dependencies = [ + "anyhow", + "core2 0.4.0", + "futures", + "hex", + "prost", + "serde_json", + "tempfile", + "tokio", + "tower 0.4.13", + "zaino-common", + "zaino-fetch", + "zaino-proto", + "zaino-state", + "zaino-testutils", + "zainod", + "zcash_local_net", + "zebra-chain", + "zebra-rpc", + "zebra-state", + "zingo_test_vectors", + "zip32", +] + +[[package]] +name = "inventory" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc61209c082fbeb19919bee74b176221b27223e27b65d781eb91af24eb1fb46e" +dependencies = [ + "rustversion", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + +[[package]] +name = "jsonrpsee" +version = "0.24.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e281ae70cc3b98dac15fced3366a880949e65fc66e345ce857a5682d152f3e62" +dependencies = [ + "jsonrpsee-core", + "jsonrpsee-proc-macros", + "jsonrpsee-server", + "jsonrpsee-types", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.24.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348ee569eaed52926b5e740aae20863762b16596476e943c9e415a6479021622" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "jsonrpsee-types", + "parking_lot", + "rand 0.8.5", + "rustc-hash 2.1.1", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.24.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7398cddf5013cca4702862a2692b66c48a3bd6cf6ec681a47453c93d63cf8de5" +dependencies = [ + "heck", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.24.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21429bcdda37dcf2d43b68621b994adede0e28061f816b038b0f18c70c143d51" +dependencies = [ + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project", + "route-recognizer", + "serde", + "serde_json", + "soketto", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tokio-util", + "tower 0.4.13", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.24.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0f05e0028e55b15dbd2107163b3c744cd3bb4474f193f95d9708acbf5677e44" +dependencies = [ + "http", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "jubjub" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" +dependencies = [ + "bitvec", + "bls12_381", + "ff", + "group", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures 0.2.17", +] + +[[package]] +name = "known-folders" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d463f34ca3c400fde3a054da0e0b8c6ffa21e4590922f3e18281bb5eeef4cbdc" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.182" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + +[[package]] +name = "liblzma" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c36d08cad03a3fbe2c4e7bb3a9e84c57e4ee4135ed0b065cade3d98480c648" +dependencies = [ + "liblzma-sys", +] + +[[package]] +name = "liblzma-sys" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f2db66f3268487b5033077f266da6777d057949b8f93c8ad82e441df25e6186" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "libm" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags 2.11.0", + "libc", + "redox_syscall 0.7.1", +] + +[[package]] +name = "librocksdb-sys" +version = "0.16.0+8.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +dependencies = [ + "bindgen 0.69.5", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libzcash_script" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f8ce05b56f3cbc65ec7d0908adb308ed91281e022f61c8c3a0c9388b5380b17" +dependencies = [ + "bindgen 0.72.1", + "cc", + "thiserror 2.0.18", + "tracing", + "zcash_script", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + +[[package]] +name = "lmdb" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0908efb5d6496aa977d96f91413da2635a902e5e31dbef0bfb88986c248539" +dependencies = [ + "bitflags 1.3.2", + "libc", + "lmdb-sys", +] + +[[package]] +name = "lmdb-sys" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5b392838cfe8858e86fac37cf97a0e8c55cc60ba0a18365cadc33092f128ce9" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "memmap2" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" +dependencies = [ + "libc", +] + +[[package]] +name = "memuse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d97bbf43eb4f088f8ca469930cde17fa036207c9a5e02ccc5107c4e8b17c964" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "metrics" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d5312e9ba3771cfa961b585728215e3d972c950a3eed9252aa093d6301277e8" +dependencies = [ + "ahash 0.8.12", + "portable-atomic", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "minreq" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05015102dad0f7d61691ca347e9d9d9006685a64aefb3d79eecf62665de2153d" +dependencies = [ + "rustls 0.21.12", + "rustls-webpki 0.101.7", + "webpki-roots 0.25.4", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "log", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.61.2", +] + +[[package]] +name = "mset" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c4d16a3d2b0e89ec6e7d509cf791545fcb48cbc8fc2fb2e96a492defda9140" + +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.11.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nonany" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6b8866ec53810a9a4b3d434a29801e78c707430a9ae11c2db4b8b62bb9675a0" + +[[package]] +name = "nonempty" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "549e471b99ccaf2f89101bec68f4d244457d5a95a9c3d0672e9564124397741d" + +[[package]] +name = "notify" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" +dependencies = [ + "bitflags 2.11.0", + "inotify", + "kqueue", + "libc", + "log", + "mio", + "notify-types", + "walkdir", + "windows-sys 0.60.2", +] + +[[package]] +name = "notify-types" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "ntapi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "objc2-io-kit" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15" +dependencies = [ + "libc", + "objc2-core-foundation", +] + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "oneshot-fused-workaround" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5480ab52bd005e9f14e3071d0227bfa204e16a496a719c58bfa013f880b41593" +dependencies = [ + "futures", +] + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openrpsee" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21f88ef83e8c454c2da7822db74ab2bbe3f03efac05bfb5dd0523afbdeb99799" +dependencies = [ + "documented", + "jsonrpsee", + "quote", + "schemars 1.2.1", + "serde", + "serde_json", + "syn 2.0.116", +] + +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + +[[package]] +name = "optfield" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "969ccca8ffc4fb105bd131a228107d5c9dd89d9d627edf3295cbe979156f9712" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "orchard" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ef66fcf99348242a20d582d7434da381a867df8dc155b3a980eca767c56137" +dependencies = [ + "aes", + "bitvec", + "blake2b_simd", + "core2 0.3.3", + "ff", + "fpe", + "getset", + "group", + "halo2_gadgets", + "halo2_poseidon", + "halo2_proofs", + "hex", + "incrementalmerkletree", + "lazy_static", + "memuse", + "nonempty", + "pasta_curves", + "rand 0.8.5", + "reddsa", + "serde", + "sinsemilla", + "subtle", + "tracing", + "visibility", + "zcash_note_encryption", + "zcash_spec", + "zip32", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-map" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac8f4a4a06c811aa24b151dbb3fe19f687cb52e0d5cca0493671ed88f973970" +dependencies = [ + "quickcheck", + "quickcheck_macros", +] + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" +dependencies = [ + "memchr", +] + +[[package]] +name = "owo-colors" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct", + "ecdsa", + "elliptic-curve", + "primeorder", + "rand_core 0.6.4", + "sha2 0.10.9", +] + +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group", +] + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.31", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", + "lock_api", + "parking_lot_core", ] [[package]] -name = "hyper-rustls" -version = "0.27.3" +name = "parking_lot_core" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ - "futures-util", - "http 1.1.0", - "hyper 1.5.0", - "hyper-util", - "log", - "rustls 0.23.16", - "rustls-native-certs", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.26.0", - "tower-service", + "cfg-if", + "libc", + "redox_syscall 0.5.18", + "smallvec", + "windows-link 0.2.1", ] [[package]] -name = "hyper-timeout" -version = "0.5.2" +name = "password-hash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ - "hyper 1.5.0", - "hyper-util", - "pin-project-lite", - "tokio", - "tower-service", + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] -name = "hyper-tls" -version = "0.6.0" +name = "pasta_curves" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" dependencies = [ - "bytes 1.8.0", - "http-body-util", - "hyper 1.5.0", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", + "blake2b_simd", + "ff", + "group", + "lazy_static", + "rand 0.8.5", + "static_assertions", + "subtle", ] [[package]] -name = "hyper-util" -version = "0.1.10" +name = "paste" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" -dependencies = [ - "bytes 1.8.0", - "futures-channel", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.5.0", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", -] +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] -name = "iana-time-zone" -version = "0.1.61" +name = "pathdiff" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" [[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" +name = "pbkdf2" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ - "cc", + "digest 0.10.7", + "password-hash", ] [[package]] -name = "icu_collections" -version = "1.5.0" +name = "pem-rfc7468" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", + "base64ct", ] [[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +name = "pepper-sync" +version = "0.2.0" +source = "git+https://github.com/zingolabs/zingolib.git?rev=14a69853a8bd2e473dee8a433004c4c06aaf5308#14a69853a8bd2e473dee8a433004c4c06aaf5308" dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", + "bip32", + "byteorder", + "crossbeam-channel", + "futures", + "incrementalmerkletree", + "json", + "jubjub", + "memuse", + "orchard", + "rayon", + "sapling-crypto", + "shardtree", + "simple-mermaid", + "subtle", + "thiserror 2.0.18", + "tokio", + "tonic", + "tracing", + "zcash_address", + "zcash_client_backend", + "zcash_encoding", + "zcash_keys", + "zcash_note_encryption", + "zcash_primitives", + "zcash_protocol", + "zcash_transparent", + "zingo-memo", + "zingo-netutils", + "zingo-status", + "zip32", ] [[package]] -name = "icu_locid_transform" -version = "1.5.0" +name = "percent-encoding" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] -name = "icu_locid_transform_data" -version = "1.5.0" +name = "petgraph" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap 2.13.0", +] [[package]] -name = "icu_normalizer" -version = "1.5.0" +name = "phf" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", + "phf_macros 0.12.1", + "phf_shared 0.12.1", + "serde", ] [[package]] -name = "icu_normalizer_data" -version = "1.5.0" +name = "phf" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros 0.13.1", + "phf_shared 0.13.1", + "serde", +] [[package]] -name = "icu_properties" -version = "1.5.1" +name = "phf_codegen" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "49aa7f9d80421bca176ca8dbfebe668cc7a2684708594ec9f3c0db0805d5d6e1" dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", + "phf_generator 0.13.1", + "phf_shared 0.13.1", ] [[package]] -name = "icu_properties_data" -version = "1.5.0" +name = "phf_generator" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "2cbb1126afed61dd6368748dae63b1ee7dc480191c6262a3b4ff1e29d86a6c5b" +dependencies = [ + "fastrand", + "phf_shared 0.12.1", +] [[package]] -name = "icu_provider" -version = "1.5.0" +name = "phf_generator" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", + "fastrand", + "phf_shared 0.13.1", ] [[package]] -name = "icu_provider_macros" -version = "1.5.0" +name = "phf_macros" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +checksum = "d713258393a82f091ead52047ca779d37e5766226d009de21696c4e667044368" dependencies = [ + "phf_generator 0.12.1", + "phf_shared 0.12.1", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "ident_case" -version = "1.0.1" +name = "phf_macros" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator 0.13.1", + "phf_shared 0.13.1", + "proc-macro2", + "quote", + "syn 2.0.116", +] [[package]] -name = "idna" -version = "1.0.3" +name = "phf_shared" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", + "siphasher", ] [[package]] -name = "idna_adapter" -version = "1.2.0" +name = "phf_shared" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" dependencies = [ - "icu_normalizer", - "icu_properties", + "siphasher", ] [[package]] -name = "impl-codec" -version = "0.6.0" +name = "pin-project" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ - "parity-scale-codec", + "pin-project-internal", ] [[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" +name = "pin-project-internal" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.116", ] [[package]] -name = "incrementalmerkletree" -version = "0.6.0" +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs1" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75346da3bd8e3d8891d02508245ed2df34447ca6637e343829f8d08986e9cde2" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "either", - "proptest", - "rand 0.8.5", - "rand_core 0.6.4", + "der", + "pkcs8", + "spki", ] [[package]] -name = "incrementalmerkletree" -version = "0.7.0" +name = "pkcs8" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d45063fbc4b0a37837f6bfe0445f269d13d730ad0aa3b5a7f74aa7bf27a0f4df" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "either", + "der", + "spki", ] [[package]] -name = "indenter" -version = "0.3.3" +name = "pkg-config" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] -name = "indexmap" -version = "1.9.3" +name = "plotters" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ - "autocfg", - "hashbrown 0.12.3", - "serde", + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "indexmap" -version = "2.6.0" +name = "plotters-backend" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" -dependencies = [ - "equivalent", - "hashbrown 0.15.1", - "serde", -] +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] -name = "indoc" -version = "2.0.5" +name = "plotters-svg" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] [[package]] -name = "inout" -version = "0.1.3" +name = "poly1305" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ - "generic-array", + "cpufeatures 0.2.17", + "opaque-debug", + "universal-hash", ] [[package]] -name = "instant" -version = "0.1.13" +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "portpicker" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" dependencies = [ - "cfg-if 1.0.0", + "rand 0.8.5", ] [[package]] -name = "integration-tests" -version = "0.0.0" +name = "postage" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af3fb618632874fb76937c2361a7f22afd393c982a2165595407edc75b06d3c1" dependencies = [ - "once_cell", - "tokio", - "tracing-subscriber", - "zaino-testutils", - "zcash_local_net", - "zingolib 0.2.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", + "atomic 0.5.3", + "crossbeam-queue", + "futures", + "parking_lot", + "pin-project", + "static_assertions", + "thiserror 1.0.69", ] [[package]] -name = "iovec" +name = "potential_utf" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ - "libc", + "zerovec", ] [[package]] -name = "ipnet" -version = "2.10.1" +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] -name = "is_terminal_polyfill" -version = "1.70.1" +name = "ppv-lite86" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] [[package]] -name = "itertools" -version = "0.12.1" +name = "prettyplease" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ - "either", + "proc-macro2", + "syn 2.0.116", ] [[package]] -name = "itertools" -version = "0.13.0" +name = "primeorder" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "either", + "elliptic-curve", ] [[package]] -name = "itoa" -version = "1.0.11" +name = "primitive-types" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec 0.6.0", + "uint 0.9.5", +] [[package]] -name = "jobserver" -version = "0.1.32" +name = "primitive-types" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" dependencies = [ - "libc", + "fixed-hash", + "impl-codec 0.7.1", + "uint 0.10.0", ] [[package]] -name = "js-sys" -version = "0.3.72" +name = "priority-queue" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "93980406f12d9f8140ed5abe7155acb10bb1e69ea55c88960b9c2f117445ef96" dependencies = [ - "wasm-bindgen", + "equivalent", + "indexmap 2.13.0", + "serde", ] [[package]] -name = "json" -version = "0.12.4" +name = "proc-macro-crate" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit 0.23.10+spec-1.0.0", +] [[package]] -name = "jsonrpc-core" -version = "18.0.0" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "futures", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", + "proc-macro2", + "quote", ] [[package]] -name = "jsonrpc-derive" -version = "18.0.0" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ - "proc-macro-crate 0.1.5", + "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.116", ] [[package]] -name = "jsonrpc-http-server" -version = "18.0.0" +name = "proc-macro2" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ - "futures", - "hyper 0.14.31", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.11.2", - "unicase", + "unicode-ident", ] [[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" +name = "proptest" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ - "bytes 1.8.0", - "futures", - "globset", - "jsonrpc-core", + "bit-set", + "bit-vec", + "bitflags 2.11.0", "lazy_static", - "log", - "tokio", - "tokio-stream", - "tokio-util 0.6.10", - "unicase", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] -name = "jubjub" -version = "0.10.0" +name = "proptest-derive" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ - "bitvec", - "bls12_381", - "ff", - "group", - "rand_core 0.6.4", - "subtle", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "known-folders" -version = "1.2.0" +name = "prost" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7d9a1740cc8b46e259a0eb787d79d855e79ff10b9855a5eba58868d5da7927c" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ - "windows-sys 0.59.0", + "bytes", + "prost-derive", ] [[package]] -name = "lazy-regex" -version = "3.3.0" +name = "prost-build" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ - "lazy-regex-proc_macros", - "once_cell", + "heck", + "itertools 0.14.0", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "pulldown-cmark", + "pulldown-cmark-to-cmark", "regex", + "syn 2.0.116", + "tempfile", ] [[package]] -name = "lazy-regex-proc_macros" -version = "3.3.0" +name = "prost-derive" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ + "anyhow", + "itertools 0.14.0", "proc-macro2", "quote", - "regex", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "lazy_static" -version = "1.5.0" +name = "prost-types" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ - "spin 0.9.8", + "prost", ] [[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.162" +name = "psl-types" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" [[package]] -name = "libloading" -version = "0.8.5" +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" dependencies = [ - "cfg-if 1.0.0", - "windows-targets 0.52.6", + "ptr_meta_derive", ] [[package]] -name = "libm" -version = "0.2.11" +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] -name = "libredox" -version = "0.1.3" +name = "publicsuffix" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "6f42ea446cab60335f76979ec15e12619a2165b5ae2c12166bef27d283a9fadf" dependencies = [ - "bitflags 2.6.0", - "libc", + "idna", + "psl-types", ] [[package]] -name = "librocksdb-sys" -version = "0.16.0+8.10.0" +name = "pulldown-cmark" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bindgen 0.69.5", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", + "bitflags 2.11.0", + "memchr", + "unicase", ] [[package]] -name = "libz-sys" -version = "1.1.20" +name = "pulldown-cmark-to-cmark" +version = "22.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" dependencies = [ - "cc", - "pkg-config", - "vcpkg", + "pulldown-cmark", ] [[package]] -name = "linux-raw-sys" -version = "0.4.14" +name = "pwd-grp" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "0e2023f41b5fcb7c30eb5300a5733edfaa9e0e0d502d51b586f65633fd39e40c" +dependencies = [ + "derive-deftly", + "libc", + "paste", + "thiserror 2.0.18", +] [[package]] -name = "litemap" -version = "0.7.3" +name = "quick-error" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "litrs" -version = "0.4.1" +name = "quickcheck" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" +checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +dependencies = [ + "env_logger", + "log", + "rand 0.7.3", + "rand_core 0.5.1", +] [[package]] -name = "lock_api" -version = "0.4.12" +name = "quickcheck_macros" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "autocfg", - "scopeguard", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "log" -version = "0.4.22" +name = "quinn" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ - "serde", + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.1.1", + "rustls 0.23.37", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", ] [[package]] -name = "log-mdc" -version = "0.1.0" +name = "quinn-proto" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash 2.1.1", + "rustls 0.23.37", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] [[package]] -name = "log4rs" -version = "1.3.0" +name = "quinn-udp" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ - "anyhow", - "arc-swap", - "chrono", - "derivative", - "fnv", - "humantime", + "cfg_aliases", "libc", - "log", - "log-mdc", "once_cell", - "parking_lot 0.12.3", - "rand 0.8.5", - "serde", - "serde-value", - "serde_json", - "serde_yaml", - "thiserror", - "thread-id", - "typemap-ors", - "winapi", + "socket2", + "tracing", + "windows-sys 0.59.0", ] [[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" +name = "quote" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ - "cc", - "libc", + "proc-macro2", ] [[package]] -name = "matchit" -version = "0.7.3" +name = "r-efi" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] -name = "maybe-rayon" -version = "0.1.1" +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "cfg-if 1.0.0", - "rayon", + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", ] [[package]] -name = "memchr" -version = "2.7.4" +name = "rand" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] [[package]] -name = "memuse" -version = "0.2.1" +name = "rand" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2145869435ace5ea6ea3d35f59be559317ec9a0d04e1812d5f185a87b6d36f1a" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "nonempty", + "rand_chacha 0.9.0", + "rand_core 0.9.5", ] [[package]] -name = "metrics" -version = "0.24.0" +name = "rand" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" dependencies = [ - "ahash", - "portable-atomic", + "chacha20 0.10.0", + "getrandom 0.4.1", + "rand_core 0.10.0", ] [[package]] -name = "mime" -version = "0.3.17" +name = "rand_chacha" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] [[package]] -name = "minimal-lexical" -version = "0.2.1" +name = "rand_chacha" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] [[package]] -name = "miniz_oxide" -version = "0.7.4" +name = "rand_chacha" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ - "adler", + "ppv-lite86", + "rand_core 0.9.5", ] [[package]] -name = "mio" -version = "1.0.2" +name = "rand_core" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "hermit-abi", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "getrandom 0.1.16", ] [[package]] -name = "mirai-annotations" -version = "1.12.0" +name = "rand_core" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] [[package]] -name = "mset" -version = "0.1.1" +name = "rand_core" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c4d16a3d2b0e89ec6e7d509cf791545fcb48cbc8fc2fb2e96a492defda9140" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] [[package]] -name = "multimap" +name = "rand_core" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" [[package]] -name = "native-tls" -version = "0.2.12" +name = "rand_hc" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", + "rand_core 0.5.1", ] [[package]] -name = "net2" -version = "0.2.39" +name = "rand_jitter" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" +checksum = "b16df48f071248e67b8fc5e866d9448d45c08ad8b672baaaf796e2f15e606ff0" dependencies = [ - "cfg-if 0.1.10", "libc", + "rand_core 0.9.5", "winapi", ] [[package]] -name = "nix" -version = "0.29.0" +name = "rand_xorshift" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "bitflags 2.6.0", - "cfg-if 1.0.0", - "cfg_aliases", - "libc", + "rand_core 0.6.4", ] [[package]] -name = "nom" -version = "7.1.3" +name = "rayon" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ - "memchr", - "minimal-lexical", + "either", + "rayon-core", ] [[package]] -name = "nonempty" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" +name = "rayon-core" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ - "overload", - "winapi", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] -name = "num-bigint" -version = "0.4.6" +name = "rdrand" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +checksum = "d92195228612ac8eed47adbc2ed0f04e513a4ccb98175b6f2bd04d963b533655" dependencies = [ - "num-integer", - "num-traits", + "rand_core 0.6.4", ] [[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - -[[package]] -name = "num-integer" -version = "0.1.46" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ - "num-traits", + "blake2b_simd", + "byteorder", + "group", + "hex", + "jubjub", + "pasta_curves", + "rand_core 0.6.4", + "serde", + "thiserror 1.0.69", + "zeroize", ] [[package]] -name = "num-traits" -version = "0.2.19" +name = "redjubjub" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "89b0ac1bc6bb3696d2c6f52cff8fba57238b81da8c0214ee6cd146eb8fde364e" dependencies = [ - "autocfg", - "libm", + "rand_core 0.6.4", + "reddsa", + "serde", + "thiserror 1.0.69", + "zeroize", ] [[package]] -name = "num_cpus" -version = "1.16.0" +name = "redox_syscall" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "hermit-abi", - "libc", + "bitflags 2.11.0", ] [[package]] -name = "object" -version = "0.32.2" +name = "redox_syscall" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" dependencies = [ - "memchr", + "bitflags 2.11.0", ] [[package]] -name = "once_cell" -version = "1.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" - -[[package]] -name = "opaque-debug" -version = "0.3.1" +name = "redox_users" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.17", + "libredox", + "thiserror 2.0.18", +] [[package]] -name = "openssl" -version = "0.10.68" +name = "ref-cast" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ - "bitflags 2.6.0", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", + "ref-cast-impl", ] [[package]] -name = "openssl-macros" -version = "0.1.1" +name = "ref-cast-impl" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "openssl-probe" -version = "0.1.5" +name = "regex" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] [[package]] -name = "openssl-sys" -version = "0.9.104" +name = "regex-automata" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", + "aho-corasick", + "memchr", + "regex-syntax", ] [[package]] -name = "option-ext" -version = "0.2.0" +name = "regex-syntax" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] -name = "orchard" -version = "0.9.0" +name = "rend" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc7bde644aeb980be296cd908c6650894dc8541deb56f9f5294c52ed7ca568f" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ - "aes", - "bitvec", - "blake2b_simd", - "ff", - "fpe", - "group", - "halo2_gadgets", - "halo2_proofs", - "hex", - "incrementalmerkletree 0.6.0", - "lazy_static", - "memuse", - "nonempty", - "pasta_curves", - "rand 0.8.5", - "reddsa", - "serde", - "subtle", - "tracing", - "visibility", - "zcash_note_encryption", - "zcash_spec", - "zip32", + "bytecheck", ] [[package]] -name = "orchard" -version = "0.10.0" +name = "reqwest" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f18e997fa121de5c73e95cdc7e8512ae43b7de38904aeea5e5713cc48f3c0ba" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "aes", - "bitvec", - "blake2b_simd", - "ff", - "fpe", - "group", - "halo2_gadgets", - "halo2_proofs", - "hex", - "incrementalmerkletree 0.7.0", - "lazy_static", - "memuse", - "nonempty", - "pasta_curves", - "rand 0.8.5", - "reddsa", - "serde", - "subtle", - "tracing", - "visibility", - "zcash_note_encryption", - "zcash_spec", - "zip32", + "base64", + "bytes", + "cookie", + "cookie_store", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.37", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower 0.5.3", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 1.0.6", ] [[package]] -name = "ordered-float" -version = "2.10.1" +name = "retry-error" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" -dependencies = [ - "num-traits", -] +checksum = "b295404fa4a9e1e63537ccbd4e4b6309d9688bd70608ddc16d3b8af0389a673a" [[package]] -name = "ordered-map" -version = "0.4.2" +name = "rfc6979" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac8f4a4a06c811aa24b151dbb3fe19f687cb52e0d5cca0493671ed88f973970" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "quickcheck", - "quickcheck_macros", + "hmac 0.12.1", + "subtle", ] [[package]] -name = "overload" -version = "0.1.1" +name = "ring" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] [[package]] -name = "owo-colors" -version = "3.5.0" +name = "ripemd" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] [[package]] -name = "pairing" -version = "0.23.0" +name = "ripemd" +version = "0.2.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +checksum = "e48cf93482ea998ad1302c42739bc73ab3adc574890c373ec89710e219357579" dependencies = [ - "group", + "digest 0.11.0-pre.9", ] [[package]] -name = "parity-scale-codec" -version = "3.6.12" +name = "rkyv" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ - "arrayvec", "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", ] [[package]] -name = "parity-scale-codec-derive" -version = "3.6.12" +name = "rkyv_derive" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ - "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] -name = "parking_lot" -version = "0.11.2" +name = "rlimit" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", + "libc", ] [[package]] -name = "parking_lot" -version = "0.12.3" +name = "rocksdb" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" dependencies = [ - "lock_api", - "parking_lot_core 0.9.10", + "libc", + "librocksdb-sys", ] [[package]] -name = "parking_lot_core" -version = "0.8.6" +name = "route-recognizer" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", -] +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] -name = "parking_lot_core" +name = "rsa" version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.5.7", - "smallvec", - "windows-targets 0.52.6", + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "sha2 0.10.9", + "signature", + "spki", + "subtle", + "zeroize", ] [[package]] -name = "password-hash" -version = "0.4.2" +name = "rusqlite" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ - "base64ct", - "rand_core 0.6.4", - "subtle", + "bitflags 2.11.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", + "time", ] [[package]] -name = "pasta_curves" -version = "0.5.1" +name = "rust-embed" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +checksum = "04113cb9355a377d83f06ef1f0a45b8ab8cd7d8b1288160717d66df5c7988d27" dependencies = [ - "blake2b_simd", - "ff", - "group", - "lazy_static", - "rand 0.8.5", - "static_assertions", - "subtle", + "rust-embed-impl", + "rust-embed-utils", + "walkdir", ] [[package]] -name = "paste" -version = "1.0.15" +name = "rust-embed-impl" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +checksum = "da0902e4c7c8e997159ab384e6d0fc91c221375f6894346ae107f47dd0f3ccaa" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "syn 2.0.116", + "walkdir", +] [[package]] -name = "pbkdf2" -version = "0.11.0" +name = "rust-embed-utils" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1" dependencies = [ - "digest 0.10.7", - "password-hash", + "sha2 0.10.9", + "walkdir", ] [[package]] -name = "percent-encoding" -version = "2.3.1" +name = "rust_decimal" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" +dependencies = [ + "arrayvec", + "borsh", + "bytes", + "num-traits", + "rand 0.8.5", + "rkyv", + "serde", + "serde_json", +] [[package]] -name = "petgraph" -version = "0.6.5" +name = "rustc-demangle" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset", - "indexmap 2.6.0", -] +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] -name = "pin-project" -version = "1.1.7" +name = "rustc-hash" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" -dependencies = [ - "pin-project-internal", -] +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "pin-project-internal" -version = "1.1.7" +name = "rustc-hash" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] -name = "pin-project-lite" -version = "0.2.15" +name = "rustc-hex" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] -name = "pin-utils" -version = "0.1.0" +name = "rustc_version" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] [[package]] -name = "pkcs8" -version = "0.10.2" +name = "rusticata-macros" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "der", - "spki", + "nom", ] [[package]] -name = "pkg-config" -version = "0.3.31" +name = "rustix" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.11.0", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] [[package]] -name = "poly1305" -version = "0.8.0" +name = "rustix" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", + "bitflags 2.11.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.59.0", ] [[package]] -name = "portable-atomic" -version = "1.9.0" +name = "rustls" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] [[package]] -name = "portpicker" -version = "0.1.1" +name = "rustls" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ - "rand 0.8.5", + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.103.9", + "subtle", + "zeroize", ] [[package]] -name = "powerfmt" -version = "0.2.0" +name = "rustls-native-certs" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] [[package]] -name = "ppv-lite86" -version = "0.2.20" +name = "rustls-pki-types" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ - "zerocopy", + "web-time", + "zeroize", ] [[package]] -name = "prettyplease" -version = "0.2.25" +name = "rustls-webpki" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "proc-macro2", - "syn 2.0.87", + "ring", + "untrusted", ] [[package]] -name = "primitive-types" -version = "0.12.2" +name = "rustls-webpki" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ - "fixed-hash", - "impl-codec", - "uint 0.9.5", + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] -name = "proc-macro-crate" -version = "0.1.5" +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rusty-fork" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ - "toml", + "fnv", + "quick-error", + "tempfile", + "wait-timeout", ] [[package]] -name = "proc-macro-crate" -version = "3.2.0" +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "safelog" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "e75b0880210c750d9189aa2d1ef94075a5500ccd9e7e98ad868e017c17c4a4bc" dependencies = [ - "toml_edit", + "derive_more", + "educe", + "either", + "fluid-let", + "thiserror 2.0.18", ] [[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "proc-macro2", - "quote", + "winapi-util", ] [[package]] -name = "proc-macro-error2" -version = "2.0.1" +name = "sanitize-filename" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +checksum = "bc984f4f9ceb736a7bb755c3e3bd17dc56370af2600c9780dcc48c66453da34d" dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", - "syn 2.0.87", + "regex", ] [[package]] -name = "proc-macro2" -version = "1.0.89" +name = "sapling-crypto" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "f9d3c081c83f1dc87403d9d71a06f52301c0aa9ea4c17da2a3435bbf493ffba4" dependencies = [ - "unicode-ident", + "aes", + "bellman", + "bitvec", + "blake2b_simd", + "blake2s_simd", + "bls12_381", + "core2 0.3.3", + "document-features", + "ff", + "fpe", + "getset", + "group", + "hex", + "incrementalmerkletree", + "jubjub", + "lazy_static", + "memuse", + "rand 0.8.5", + "rand_core 0.6.4", + "redjubjub", + "subtle", + "tracing", + "zcash_note_encryption", + "zcash_spec", + "zip32", ] [[package]] -name = "proptest" -version = "1.5.0" +name = "schannel" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "bit-set", - "bit-vec", - "bitflags 2.6.0", - "lazy_static", - "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift", - "regex-syntax", - "rusty-fork", - "tempfile", - "unarray", + "windows-sys 0.61.2", ] [[package]] -name = "prost" -version = "0.13.3" +name = "schemars" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" dependencies = [ - "bytes 1.8.0", - "prost-derive", + "dyn-clone", + "ref-cast", + "serde", + "serde_json", ] [[package]] -name = "prost-build" -version = "0.13.3" +name = "schemars" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" dependencies = [ - "bytes 1.8.0", - "heck", - "itertools 0.13.0", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 2.0.87", - "tempfile", + "dyn-clone", + "ref-cast", + "schemars_derive", + "serde", + "serde_json", ] [[package]] -name = "prost-derive" -version = "0.13.3" +name = "schemars_derive" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "7d115b50f4aaeea07e79c1912f645c7513d81715d0420f8bc77a18c6260b307f" dependencies = [ - "anyhow", - "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.87", + "serde_derive_internals", + "syn 2.0.116", ] [[package]] -name = "prost-types" -version = "0.13.3" +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "prost", + "ring", + "untrusted", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "seahash" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] -name = "quickcheck" -version = "0.9.2" +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "env_logger", - "log", - "rand 0.7.3", - "rand_core 0.5.1", + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", ] [[package]] -name = "quickcheck_macros" -version = "0.9.1" +name = "secp256k1" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "secp256k1-sys 0.10.1", + "serde", ] [[package]] -name = "quote" -version = "1.0.37" +name = "secp256k1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "2c3c81b43dc2d8877c216a3fccf76677ee1ebccd429566d3e67447290d0c42b2" dependencies = [ - "proc-macro2", + "bitcoin_hashes", + "rand 0.9.2", + "secp256k1-sys 0.11.0", ] [[package]] -name = "radium" -version = "0.7.0" +name = "secp256k1-sys" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] [[package]] -name = "rand" -version = "0.4.6" +name = "secp256k1-sys" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +checksum = "dcb913707158fadaf0d8702c2db0e857de66eb003ccfdda5924b5f5ac98efb38" dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", + "cc", ] [[package]] -name = "rand" -version = "0.7.3" +name = "secrecy" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "zeroize", ] [[package]] -name = "rand" -version = "0.8.5" +name = "security-framework" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" dependencies = [ + "bitflags 2.11.0", + "core-foundation", + "core-foundation-sys", "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "security-framework-sys", ] [[package]] -name = "rand_chacha" -version = "0.2.2" +name = "security-framework-sys" +version = "2.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "core-foundation-sys", + "libc", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "semver" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", + "serde", + "serde_core", ] [[package]] -name = "rand_core" -version = "0.3.1" +name = "serde" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ - "rand_core 0.4.2", + "serde_core", + "serde_derive", ] [[package]] -name = "rand_core" -version = "0.4.2" +name = "serde-big-array" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] [[package]] -name = "rand_core" -version = "0.5.1" +name = "serde-value" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ - "getrandom 0.1.16", + "ordered-float", + "serde", ] [[package]] -name = "rand_core" -version = "0.6.4" +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ - "getrandom 0.2.15", + "serde_derive", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "serde_derive" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ - "rand_core 0.5.1", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "rand_xorshift" -version = "0.3.0" +name = "serde_derive_internals" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ - "rand_core 0.6.4", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "rayon" -version = "1.10.0" +name = "serde_ignored" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "115dffd5f3853e06e746965a20dcbae6ee747ae30b543d91b0e089668bb07798" dependencies = [ - "either", - "rayon-core", + "serde", + "serde_core", ] [[package]] -name = "rayon-core" -version = "1.12.1" +name = "serde_json" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "crossbeam-deque", - "crossbeam-utils", + "indexmap 2.13.0", + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", ] [[package]] -name = "rdrand" -version = "0.4.0" +name = "serde_spanned" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ - "rand_core 0.3.1", + "serde", ] [[package]] -name = "reddsa" -version = "0.5.1" +name = "serde_spanned" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ - "blake2b_simd", - "byteorder", - "group", - "hex", - "jubjub", - "pasta_curves", - "rand_core 0.6.4", - "serde", - "thiserror", - "zeroize", + "serde_core", ] [[package]] -name = "redjubjub" -version = "0.7.0" +name = "serde_urlencoded" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ - "rand_core 0.6.4", - "reddsa", + "form_urlencoded", + "itoa", + "ryu", "serde", - "thiserror", - "zeroize", ] [[package]] -name = "redox_syscall" -version = "0.2.16" +name = "serde_with" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ - "bitflags 1.3.2", + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.13.0", + "schemars 0.9.0", + "schemars 1.2.1", + "serde_core", + "serde_json", + "serde_with_macros", + "time", ] [[package]] -name = "redox_syscall" -version = "0.5.7" +name = "serde_with_macros" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ - "bitflags 2.6.0", + "darling 0.21.3", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "redox_users" -version = "0.4.6" +name = "sha1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror", + "cfg-if", + "cpufeatures 0.2.17", + "digest 0.10.7", ] [[package]] -name = "regex" -version = "1.11.1" +name = "sha2" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", + "cfg-if", + "cpufeatures 0.2.17", + "digest 0.10.7", ] [[package]] -name = "regex-automata" -version = "0.4.9" +name = "sha2" +version = "0.11.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "540c0893cce56cdbcfebcec191ec8e0f470dd1889b6e7a0b503e310a94a168f5" dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", + "cfg-if", + "cpufeatures 0.2.17", + "digest 0.11.0-pre.9", ] [[package]] -name = "regex-syntax" -version = "0.8.5" +name = "sha3" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "sharded-slab" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ - "winapi", + "lazy_static", ] [[package]] -name = "reqwest" -version = "0.11.27" +name = "shardtree" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "637e95dcd06bc1bb3f86ed9db1e1832a70125f32daae071ef37dcb7701b7d4fe" dependencies = [ - "base64 0.21.7", - "bytes 1.8.0", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.31", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 0.25.4", - "winreg", + "bitflags 2.11.0", + "either", + "incrementalmerkletree", + "tracing", ] [[package]] -name = "reqwest" -version = "0.12.9" +name = "shellexpand" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb" dependencies = [ - "base64 0.22.1", - "bytes 1.8.0", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.5.0", - "hyper-rustls 0.27.3", - "hyper-tls", - "hyper-util", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile 2.2.0", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 1.0.1", - "system-configuration 0.6.1", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "windows-registry", + "bstr", + "dirs", + "os_str_bytes", ] [[package]] -name = "ring" -version = "0.16.20" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "ring" -version = "0.17.8" +name = "signal-hook-registry" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ - "cc", - "cfg-if 1.0.0", - "getrandom 0.2.15", + "errno", "libc", - "spin 0.9.8", - "untrusted 0.9.0", - "windows-sys 0.52.0", ] [[package]] -name = "ripemd" -version = "0.1.3" +name = "signature" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", + "rand_core 0.6.4", ] [[package]] -name = "rlimit" -version = "0.10.2" +name = "simd-adler32" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" -dependencies = [ - "libc", -] +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] -name = "rocksdb" -version = "0.22.0" +name = "simdutf8" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" -dependencies = [ - "libc", - "librocksdb-sys", -] +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] -name = "rust-embed" -version = "6.8.1" +name = "similar" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" -dependencies = [ - "rust-embed-impl", - "rust-embed-utils", - "walkdir", -] +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] -name = "rust-embed-impl" -version = "6.8.1" +name = "simple-mermaid" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" -dependencies = [ - "proc-macro2", - "quote", - "rust-embed-utils", - "syn 2.0.87", - "walkdir", -] +checksum = "589144a964b4b30fe3a83b4bb1a09e2475aac194ec832a046a23e75bddf9eb29" [[package]] -name = "rust-embed-utils" -version = "7.8.1" +name = "sinsemilla" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" +checksum = "3d268ae0ea06faafe1662e9967cd4f9022014f5eeb798e0c302c876df8b7af9c" dependencies = [ - "sha2 0.10.8", - "walkdir", + "group", + "pasta_curves", + "subtle", ] [[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - -[[package]] -name = "rustc-hash" -version = "1.1.0" +name = "siphasher" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] -name = "rustc-hex" -version = "2.1.0" +name = "slab" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] -name = "rustc_version" -version = "0.4.1" +name = "slotmap" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +checksum = "bdd58c3c93c3d278ca835519292445cb4b0d4dc59ccfdf7ceadaab3f8aeb4038" dependencies = [ - "semver", + "serde", + "version_check", ] [[package]] -name = "rustix" -version = "0.38.40" +name = "slotmap-careful" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d866fb978c1cf6d71abde4dce1905369edd0d0028ff9bc55e2431b83df7a36e8" dependencies = [ - "bitflags 2.6.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.52.0", + "paste", + "serde", + "slotmap", + "thiserror 2.0.18", + "void", ] [[package]] -name = "rustls" -version = "0.21.12" +name = "smallvec" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring 0.17.8", - "rustls-webpki 0.101.7", - "sct", -] +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] -name = "rustls" -version = "0.23.16" +name = "socket2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ - "aws-lc-rs", - "log", - "once_cell", - "ring 0.17.8", - "rustls-pki-types", - "rustls-webpki 0.102.8", - "subtle", - "zeroize", + "libc", + "windows-sys 0.60.2", ] [[package]] -name = "rustls-native-certs" -version = "0.8.0" +name = "soketto" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ - "openssl-probe", - "rustls-pemfile 2.2.0", - "rustls-pki-types", - "schannel", - "security-framework", + "base64", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha1", ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "spandoc" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "25ed5a886d0234ac48bea41d450e4253cdd0642656249d6454e74c023d0f8821" dependencies = [ - "base64 0.21.7", + "spandoc-attribute", + "tracing", + "tracing-futures", ] [[package]] -name = "rustls-pemfile" -version = "2.2.0" +name = "spandoc-attribute" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "rustls-pki-types", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "rustls-pki-types" -version = "1.10.0" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] -name = "rustls-webpki" -version = "0.101.7" +name = "spki" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "base64ct", + "der", ] [[package]] -name = "rustls-webpki" -version = "0.102.8" +name = "ssh-cipher" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" dependencies = [ - "aws-lc-rs", - "ring 0.17.8", - "rustls-pki-types", - "untrusted 0.9.0", + "cipher", + "ssh-encoding", ] [[package]] -name = "rustversion" -version = "1.0.18" +name = "ssh-encoding" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "eb9242b9ef4108a78e8cd1a2c98e193ef372437f8c22be363075233321dd4a15" +dependencies = [ + "base64ct", + "pem-rfc7468", + "sha2 0.10.9", +] [[package]] -name = "rusty-fork" -version = "0.3.0" +name = "ssh-key" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" dependencies = [ - "fnv", - "quick-error", - "tempfile", - "wait-timeout", + "num-bigint-dig", + "p256", + "p384", + "p521", + "rand_core 0.6.4", + "rsa", + "sec1", + "sha2 0.10.9", + "signature", + "ssh-cipher", + "ssh-encoding", + "subtle", + "zeroize", ] [[package]] -name = "ryu" -version = "1.0.18" +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "same-file" -version = "1.0.6" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] -name = "sapling-crypto" -version = "0.2.0" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e379398fffad84e49f9a45a05635fc004f66086e65942dbf4eb95332c26d2a" -dependencies = [ - "aes", - "bellman", - "bitvec", - "blake2b_simd", - "blake2s_simd", - "bls12_381", - "byteorder", - "document-features", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree 0.6.0", - "jubjub", - "lazy_static", - "memuse", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "subtle", - "tracing", - "zcash_note_encryption", - "zcash_spec", - "zip32", -] +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] -name = "sapling-crypto" -version = "0.3.0" +name = "strum" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfff8cfce16aeb38da50b8e2ed33c9018f30552beff2210c266662a021b17f38" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "aes", - "bellman", - "bitvec", - "blake2b_simd", - "blake2s_simd", - "bls12_381", - "byteorder", - "document-features", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree 0.7.0", - "jubjub", - "lazy_static", - "memuse", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "subtle", - "tracing", - "zcash_note_encryption", - "zcash_spec", - "zip32", + "strum_macros", ] [[package]] -name = "schannel" -version = "0.1.26" +name = "strum_macros" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "windows-sys 0.59.0", + "heck", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "scopeguard" -version = "1.2.0" +name = "subtle" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] -name = "sct" -version = "0.7.1" +name = "syn" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "secp256k1" -version = "0.27.0" +name = "syn" +version = "2.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" +checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" dependencies = [ - "secp256k1-sys", - "serde", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "secp256k1-sys" -version = "0.8.1" +name = "sync_wrapper" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ - "cc", + "futures-core", ] [[package]] -name = "secrecy" -version = "0.8.0" +name = "synstructure" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ - "zeroize", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "security-framework" -version = "2.11.1" +name = "sysinfo" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "252800745060e7b9ffb7b2badbd8b31cfa4aa2e61af879d0a3bf2a317c20217d" dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "core-foundation-sys", "libc", - "security-framework-sys", + "memchr", + "ntapi", + "objc2-core-foundation", + "objc2-io-kit", + "windows", ] [[package]] -name = "security-framework-sys" -version = "2.12.1" +name = "tap" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" -dependencies = [ - "core-foundation-sys", - "libc", -] +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] -name = "semver" -version = "1.0.23" +name = "tempfile" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +dependencies = [ + "fastrand", + "getrandom 0.4.1", + "once_cell", + "rustix 1.1.3", + "windows-sys 0.59.0", +] [[package]] -name = "serde" -version = "1.0.215" +name = "thiserror" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "serde_derive", + "thiserror-impl 1.0.69", ] [[package]] -name = "serde-big-array" -version = "0.5.1" +name = "thiserror" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "serde", + "thiserror-impl 2.0.18", ] [[package]] -name = "serde-value" -version = "0.7.0" +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "ordered-float", - "serde", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "serde_derive" -version = "1.0.215" +name = "thiserror-impl" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] -name = "serde_json" -version = "1.0.132" +name = "thread_local" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ - "indexmap 2.6.0", - "itoa", - "memchr", - "ryu", - "serde", + "cfg-if", ] [[package]] -name = "serde_urlencoded" -version = "0.7.1" +name = "time" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ - "form_urlencoded", + "deranged", "itoa", - "ryu", + "num-conv", + "powerfmt", "serde", + "time-core", + "time-macros", ] [[package]] -name = "serde_with" -version = "3.11.0" +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ - "base64 0.22.1", - "chrono", - "hex", - "indexmap 1.9.3", - "indexmap 2.6.0", - "serde", - "serde_derive", - "serde_json", - "serde_with_macros", - "time", + "num-conv", + "time-core", ] [[package]] -name = "serde_with_macros" -version = "3.11.0" +name = "tinystr" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 2.0.87", + "displaydoc", + "serde_core", + "zerovec", ] [[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" +name = "tinytemplate" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ - "indexmap 2.6.0", - "itoa", - "ryu", "serde", - "unsafe-libyaml", + "serde_json", ] [[package]] -name = "sha2" -version = "0.9.9" +name = "tinyvec" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "tinyvec_macros", ] [[package]] -name = "sha2" -version = "0.10.8" +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "tracing", + "windows-sys 0.61.2", ] [[package]] -name = "sharded-slab" -version = "0.1.7" +name = "tokio-macros" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ - "lazy_static", + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "shardtree" -version = "0.4.0" +name = "tokio-rustls" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78222845cd8bbe5eb95687407648ff17693a35de5e8abaa39a4681fb21e033f9" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "bitflags 2.6.0", - "either", - "incrementalmerkletree 0.6.0", - "tracing", + "rustls 0.23.37", + "tokio", ] [[package]] -name = "shlex" -version = "1.3.0" +name = "tokio-stream" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] [[package]] -name = "signal-hook-registry" -version = "1.4.2" +name = "tokio-util" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ - "libc", + "bytes", + "futures-core", + "futures-io", + "futures-sink", + "pin-project-lite", + "tokio", ] [[package]] -name = "signature" -version = "2.2.0" +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "rand_core 0.6.4", + "serde", ] [[package]] -name = "slab" -version = "0.4.9" +name = "toml" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ - "autocfg", + "serde", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", ] [[package]] -name = "smallvec" -version = "1.13.2" +name = "toml" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +dependencies = [ + "indexmap 2.13.0", + "serde_core", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow", +] [[package]] -name = "socket2" -version = "0.5.7" +name = "toml_datetime" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ - "libc", - "windows-sys 0.52.0", + "serde", ] [[package]] -name = "spin" -version = "0.5.2" +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] [[package]] -name = "spin" -version = "0.9.8" +name = "toml_edit" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.13.0", + "serde", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_write", + "winnow", +] [[package]] -name = "spki" -version = "0.7.3" +name = "toml_edit" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "base64ct", - "der", + "indexmap 2.13.0", + "toml_datetime 0.7.5+spec-1.1.0", + "toml_parser", + "winnow", ] [[package]] -name = "stable_deref_trait" -version = "1.2.0" +name = "toml_parser" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +dependencies = [ + "winnow", +] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "toml_write" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] -name = "strsim" -version = "0.11.1" +name = "toml_writer" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] -name = "subtle" -version = "2.5.0" +name = "tonic" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "7f32a6f80051a4111560201420c7885d0082ba9efe2ab61875c587bb6b18b9a0" +dependencies = [ + "async-trait", + "axum", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "rustls-native-certs", + "socket2", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.5.3", + "tower-layer", + "tower-service", + "tracing", + "webpki-roots 1.0.6", +] [[package]] -name = "syn" -version = "1.0.109" +name = "tonic-build" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +checksum = "ce6d8958ed3be404120ca43ffa0fb1e1fc7be214e96c8d33bd43a131b6eebc9e" dependencies = [ + "prettyplease", "proc-macro2", "quote", - "unicode-ident", + "syn 2.0.116", ] [[package]] -name = "syn" -version = "2.0.87" +name = "tonic-prost" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "9f86539c0089bfd09b1f8c0ab0239d80392af74c21bc9e0f15e1b4aca4c1647f" dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", + "bytes", + "prost", + "tonic", ] [[package]] -name = "sync_wrapper" -version = "0.1.2" +name = "tonic-prost-build" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "65873ace111e90344b8973e94a1fc817c924473affff24629281f90daed1cd2e" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.116", + "tempfile", + "tonic-build", +] [[package]] -name = "sync_wrapper" -version = "1.0.1" +name = "tonic-reflection" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "91e5f75c058c397bf4ecada0fe0a2299c49c252fcf484f4a2f47279b6fa7026b" dependencies = [ - "futures-core", + "prost", + "prost-types", + "tokio", + "tokio-stream", + "tonic", + "tonic-prost", ] [[package]] -name = "synstructure" -version = "0.13.1" +name = "tor-async-utils" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "cad5e568ad4e025a68aa0395a146247609dd5b6d8c2141255f5e4f367e7fda8a" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "derive-deftly", + "educe", + "futures", + "oneshot-fused-workaround", + "pin-project", + "postage", + "thiserror 2.0.18", + "void", ] [[package]] -name = "system-configuration" -version = "0.5.1" +name = "tor-basic-utils" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "30122645feee76f76ba1ad011b316a2b135d44a00c45ed9c14af58b32ad93b69" dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys 0.5.0", + "derive_more", + "hex", + "itertools 0.14.0", + "libc", + "paste", + "rand 0.9.2", + "rand_chacha 0.9.0", + "serde", + "slab", + "smallvec", + "thiserror 2.0.18", ] [[package]] -name = "system-configuration" -version = "0.6.1" +name = "tor-bytes" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "6fc7fb465ba671ee1486d8bd1e0a8f546887c2ce034004c4c9b03a6227e1c381" dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "system-configuration-sys 0.6.0", + "bytes", + "derive-deftly", + "digest 0.10.7", + "educe", + "getrandom 0.3.4", + "safelog", + "thiserror 2.0.18", + "tor-error", + "tor-llcrypto", + "zeroize", ] [[package]] -name = "system-configuration-sys" -version = "0.5.0" +name = "tor-cell" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "79ba1b43f22fab2daee3e0c902f1455b3aed8e086b2d83d8c60b36523b173d25" dependencies = [ - "core-foundation-sys", - "libc", + "amplify", + "bitflags 2.11.0", + "bytes", + "caret", + "derive-deftly", + "derive_more", + "educe", + "itertools 0.14.0", + "paste", + "rand 0.9.2", + "smallvec", + "thiserror 2.0.18", + "tor-basic-utils", + "tor-bytes", + "tor-cert", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-protover", + "tor-units", + "void", ] [[package]] -name = "system-configuration-sys" -version = "0.6.0" +name = "tor-cert" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +checksum = "f5e63e2db09b6d6d3453f63d7d55796c9b10a7cd2bcc14e553666b1f3a84df66" dependencies = [ - "core-foundation-sys", - "libc", + "caret", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "thiserror 2.0.18", + "tor-bytes", + "tor-checkable", + "tor-llcrypto", ] [[package]] -name = "tap" -version = "1.0.1" +name = "tor-chanmgr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +checksum = "cbd6924b1716b7d071221087e18eb911ff8331eca4bc2d896f2a03864ff67f2c" +dependencies = [ + "async-trait", + "caret", + "derive_builder_fork_arti", + "derive_more", + "educe", + "futures", + "oneshot-fused-workaround", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "thiserror 2.0.18", + "tor-async-utils", + "tor-basic-utils", + "tor-cell", + "tor-config", + "tor-error", + "tor-keymgr", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-netdir", + "tor-proto", + "tor-rtcompat", + "tor-socksproto", + "tor-units", + "tracing", + "void", +] [[package]] -name = "tempdir" -version = "0.3.7" +name = "tor-checkable" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +checksum = "7c9839e9bb302f17447c350e290bb107084aca86c640882a91522f2059f6a686" dependencies = [ - "rand 0.4.6", - "remove_dir_all", + "humantime", + "signature", + "thiserror 2.0.18", + "tor-llcrypto", ] [[package]] -name = "tempfile" -version = "3.14.0" +name = "tor-circmgr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "ea86ed519745136c7d90bb42efe4786dc7aa7548b92d9091ec8237cd16b9c12f" dependencies = [ - "cfg-if 1.0.0", - "fastrand", + "amplify", + "async-trait", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "downcast-rs", + "dyn-clone", + "educe", + "futures", + "humantime-serde", + "itertools 0.14.0", "once_cell", - "rustix", - "windows-sys 0.59.0", + "oneshot-fused-workaround", + "pin-project", + "rand 0.9.2", + "retry-error", + "safelog", + "serde", + "thiserror 2.0.18", + "tor-async-utils", + "tor-basic-utils", + "tor-cell", + "tor-chanmgr", + "tor-config", + "tor-dircommon", + "tor-error", + "tor-guardmgr", + "tor-linkspec", + "tor-memquota", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-relay-selection", + "tor-rtcompat", + "tor-units", + "tracing", + "void", + "weak-table", ] [[package]] -name = "test-case" -version = "3.3.1" +name = "tor-config" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" +checksum = "cb15df773842025010d885fbe862062ebaa342b799f9716273eaf733b92f2f45" dependencies = [ - "test-case-macros", + "amplify", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "educe", + "either", + "figment", + "fs-mistrust", + "futures", + "itertools 0.14.0", + "notify", + "paste", + "postage", + "regex", + "serde", + "serde-value", + "serde_ignored", + "strum", + "thiserror 2.0.18", + "toml 0.9.12+spec-1.1.0", + "tor-basic-utils", + "tor-error", + "tor-rtcompat", + "tracing", + "void", ] [[package]] -name = "test-case-core" -version = "3.3.1" +name = "tor-config-path" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +checksum = "c80d2784120508b5374a979cc0f6be0177ed870d176b0b31c94cf822200091dc" dependencies = [ - "cfg-if 1.0.0", - "proc-macro2", - "quote", - "syn 2.0.87", + "directories", + "serde", + "shellexpand", + "thiserror 2.0.18", + "tor-error", + "tor-general-addr", ] [[package]] -name = "test-case-macros" -version = "3.3.1" +name = "tor-consdiff" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +checksum = "c1690438c1fc778fc7c89c132e529365b1430d6afe03aeecbc2508324807bf0b" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", - "test-case-core", + "digest 0.10.7", + "hex", + "thiserror 2.0.18", + "tor-llcrypto", ] [[package]] -name = "thiserror" -version = "1.0.69" +name = "tor-dirclient" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "f5e730873fdc4b7f9545472c0d1cf0c43a7e89d6c996c234b6b548163010284c" dependencies = [ - "thiserror-impl", + "async-compression", + "base64ct", + "derive_more", + "futures", + "hex", + "http", + "httparse", + "httpdate", + "itertools 0.14.0", + "memchr", + "thiserror 2.0.18", + "tor-circmgr", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-proto", + "tor-rtcompat", + "tracing", ] [[package]] -name = "thiserror-impl" -version = "1.0.69" +name = "tor-dircommon" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "5b60043697f94ec228f4fb6d30834a037774f2f3c2cdb0bdb805248f46b5320e" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "base64ct", + "derive_builder_fork_arti", + "getset", + "humantime", + "humantime-serde", + "serde", + "tor-basic-utils", + "tor-checkable", + "tor-config", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tracing", ] [[package]] -name = "thread-id" -version = "4.2.2" +name = "tor-dirmgr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" +checksum = "86f5e21a574acb35dd1a32960b10cb184db2e2ffbb4007abd3515951ce09d0f2" dependencies = [ - "libc", - "winapi", + "async-trait", + "base64ct", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "event-listener", + "fs-mistrust", + "fslock", + "futures", + "hex", + "humantime", + "humantime-serde", + "itertools 0.14.0", + "memmap2", + "oneshot-fused-workaround", + "paste", + "postage", + "rand 0.9.2", + "rusqlite", + "safelog", + "scopeguard", + "serde", + "serde_json", + "signature", + "static_assertions", + "strum", + "thiserror 2.0.18", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-checkable", + "tor-circmgr", + "tor-config", + "tor-consdiff", + "tor-dirclient", + "tor-dircommon", + "tor-error", + "tor-guardmgr", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-rtcompat", + "tracing", ] [[package]] -name = "thread_local" -version = "1.1.8" +name = "tor-error" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "63d766a5d11ddad7946cf8357ce7a1e948abdc3ad3ef06ed23f35af522dc089c" dependencies = [ - "cfg-if 1.0.0", - "once_cell", + "derive_more", + "futures", + "paste", + "retry-error", + "static_assertions", + "strum", + "thiserror 2.0.18", + "tracing", + "void", ] [[package]] -name = "time" -version = "0.3.36" +name = "tor-general-addr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "c42cb5b5aec0584db2fba4a88c4e08fb09535ef61e4ef5674315a89e69ec31a2" dependencies = [ - "deranged", - "itoa", - "num-conv", - "powerfmt", - "serde", - "time-core", - "time-macros", + "derive_more", + "thiserror 2.0.18", + "void", ] [[package]] -name = "time-core" -version = "0.1.2" +name = "tor-guardmgr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "0585a83a4c56b4f31f6fa2965e2f9c490c9f4d29fba2fedb5a9ee71009f793c0" +dependencies = [ + "amplify", + "base64ct", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "dyn-clone", + "educe", + "futures", + "humantime", + "humantime-serde", + "itertools 0.14.0", + "num_enum", + "oneshot-fused-workaround", + "pin-project", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "strum", + "thiserror 2.0.18", + "tor-async-utils", + "tor-basic-utils", + "tor-config", + "tor-dircommon", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-relay-selection", + "tor-rtcompat", + "tor-units", + "tracing", +] [[package]] -name = "time-macros" -version = "0.2.18" +name = "tor-hscrypto" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "cf9ee6e0dbec9ba11c3d046181a42dd4759e108de38e2b5927689edbdc458a51" dependencies = [ - "num-conv", - "time-core", + "data-encoding", + "derive-deftly", + "derive_more", + "digest 0.10.7", + "hex", + "humantime", + "itertools 0.14.0", + "paste", + "rand 0.9.2", + "safelog", + "serde", + "signature", + "subtle", + "thiserror 2.0.18", + "tor-basic-utils", + "tor-bytes", + "tor-error", + "tor-key-forge", + "tor-llcrypto", + "tor-units", + "void", ] [[package]] -name = "tinystr" -version = "0.7.6" +name = "tor-key-forge" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5aa30066b80ade55a1b88a82b5320dfc50d1724918ad614ded8ecb4820c32062" dependencies = [ - "displaydoc", - "zerovec", + "derive-deftly", + "derive_more", + "downcast-rs", + "paste", + "rand 0.9.2", + "rsa", + "signature", + "ssh-key", + "thiserror 2.0.18", + "tor-bytes", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-llcrypto", ] [[package]] -name = "tinyvec" -version = "1.8.0" +name = "tor-keymgr" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "e331dede46246977ae6722888329a60ef446df437f1a13ad2addcdff840692cc" dependencies = [ - "tinyvec_macros", + "amplify", + "arrayvec", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "downcast-rs", + "dyn-clone", + "fs-mistrust", + "glob-match", + "humantime", + "inventory", + "itertools 0.14.0", + "rand 0.9.2", + "safelog", + "serde", + "signature", + "ssh-key", + "thiserror 2.0.18", + "tor-basic-utils", + "tor-bytes", + "tor-config", + "tor-config-path", + "tor-error", + "tor-hscrypto", + "tor-key-forge", + "tor-llcrypto", + "tor-persist", + "tracing", + "visibility", + "walkdir", + "zeroize", ] [[package]] -name = "tinyvec_macros" -version = "0.1.1" +name = "tor-linkspec" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +checksum = "d9daa8b71777ecf02d317c200e96fd777d3668ddac4fc2fe3054216429b7917f" +dependencies = [ + "base64ct", + "by_address", + "caret", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "hex", + "itertools 0.14.0", + "safelog", + "serde", + "serde_with", + "strum", + "thiserror 2.0.18", + "tor-basic-utils", + "tor-bytes", + "tor-config", + "tor-llcrypto", + "tor-memquota", + "tor-protover", +] [[package]] -name = "tokio" -version = "1.41.1" +name = "tor-llcrypto" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "95cb3920ea326ba2bb7c2674293655d045a1112eb93cc8ddcbf948bb59307a97" dependencies = [ - "backtrace", - "bytes 1.8.0", - "libc", - "mio", - "parking_lot 0.12.3", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "tracing", - "windows-sys 0.52.0", + "aes", + "base64ct", + "ctr", + "curve25519-dalek", + "der-parser", + "derive-deftly", + "derive_more", + "digest 0.10.7", + "ed25519-dalek", + "educe", + "getrandom 0.3.4", + "hex", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_core 0.6.4", + "rand_core 0.9.5", + "rand_jitter", + "rdrand", + "rsa", + "safelog", + "serde", + "sha1", + "sha2 0.10.9", + "sha3", + "signature", + "subtle", + "thiserror 2.0.18", + "tor-error", + "tor-memquota", + "visibility", + "x25519-dalek", + "zeroize", ] [[package]] -name = "tokio-macros" -version = "2.4.0" +name = "tor-log-ratelim" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "845d65304be6a614198027c4b2d1b35aaf073335c26df619d17e5f4027f2657f" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "futures", + "humantime", + "thiserror 2.0.18", + "tor-error", + "tor-rtcompat", + "tracing", + "weak-table", ] [[package]] -name = "tokio-native-tls" -version = "0.3.1" +name = "tor-memquota" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +checksum = "ef375c3442a4ea74f0b6bf91a3eed660d55301b2e2f59b366aba4849b2321a6f" dependencies = [ - "native-tls", - "tokio", + "cfg-if", + "derive-deftly", + "derive_more", + "dyn-clone", + "educe", + "futures", + "itertools 0.14.0", + "paste", + "pin-project", + "serde", + "slotmap-careful", + "static_assertions", + "sysinfo", + "thiserror 2.0.18", + "tor-async-utils", + "tor-basic-utils", + "tor-config", + "tor-error", + "tor-log-ratelim", + "tor-rtcompat", + "tracing", + "void", ] [[package]] -name = "tokio-rustls" -version = "0.24.1" +name = "tor-netdir" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "638b4e6507e3786488859d3c463fa73addbad4f788806c6972603727e527672e" dependencies = [ - "rustls 0.21.12", - "tokio", + "async-trait", + "bitflags 2.11.0", + "derive_more", + "futures", + "humantime", + "itertools 0.14.0", + "num_enum", + "rand 0.9.2", + "serde", + "strum", + "thiserror 2.0.18", + "tor-basic-utils", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-protover", + "tor-units", + "tracing", + "typed-index-collections", ] [[package]] -name = "tokio-rustls" -version = "0.26.0" +name = "tor-netdoc" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "1dbc32d89e7ea2e2799168d0c453061647a727e39fc66f52e1bcb4c38c8dc433" dependencies = [ - "rustls 0.23.16", - "rustls-pki-types", - "tokio", + "amplify", + "base64ct", + "bitflags 2.11.0", + "cipher", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "hex", + "humantime", + "itertools 0.14.0", + "memchr", + "paste", + "phf 0.13.1", + "serde", + "serde_with", + "signature", + "smallvec", + "strum", + "subtle", + "thiserror 2.0.18", + "time", + "tinystr", + "tor-basic-utils", + "tor-bytes", + "tor-cell", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-llcrypto", + "tor-protover", + "void", + "weak-table", + "zeroize", ] [[package]] -name = "tokio-stream" -version = "0.1.16" +name = "tor-persist" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "59e41aea027686b05f21e0ad75aa2c0c9681a87f2f3130b6d6f7a7a8c06edd7b" dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", - "tokio-util 0.7.12", + "derive-deftly", + "derive_more", + "filetime", + "fs-mistrust", + "fslock", + "futures", + "itertools 0.14.0", + "oneshot-fused-workaround", + "paste", + "sanitize-filename", + "serde", + "serde_json", + "thiserror 2.0.18", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-error", + "tracing", + "void", ] [[package]] -name = "tokio-util" -version = "0.6.10" +name = "tor-proto" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +checksum = "b95119789898b1b12e8f487745b70215e9f7d3df7c23325e4901ae65aec9703b" dependencies = [ - "bytes 1.8.0", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", + "amplify", + "asynchronous-codec", + "bitvec", + "bytes", + "caret", + "cfg-if", + "cipher", + "coarsetime", + "criterion-cycles-per-byte", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "enum_dispatch", + "futures", + "futures-util", + "hkdf", + "hmac 0.12.1", + "itertools 0.14.0", + "nonany", + "oneshot-fused-workaround", + "pin-project", + "postage", + "rand 0.9.2", + "rand_core 0.9.5", + "safelog", + "slotmap-careful", + "smallvec", + "static_assertions", + "subtle", + "sync_wrapper", + "thiserror 2.0.18", "tokio", + "tokio-util", + "tor-async-utils", + "tor-basic-utils", + "tor-bytes", + "tor-cell", + "tor-cert", + "tor-checkable", + "tor-config", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-log-ratelim", + "tor-memquota", + "tor-protover", + "tor-rtcompat", + "tor-rtmock", + "tor-units", + "tracing", + "typenum", + "visibility", + "void", + "zeroize", ] [[package]] -name = "tokio-util" -version = "0.7.12" +name = "tor-protover" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "484dc40a0ea58e8cc809ca2faf4df010327f7089ceafa6c8781a767260a34f6e" dependencies = [ - "bytes 1.8.0", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", + "caret", + "paste", + "serde_with", + "thiserror 2.0.18", + "tor-bytes", ] [[package]] -name = "toml" -version = "0.5.11" +name = "tor-relay-selection" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "54cc2b365bf5881b4380059e0636cc40e1fa18a1b3b050f78ce322c95139d467" dependencies = [ + "rand 0.9.2", "serde", + "tor-basic-utils", + "tor-linkspec", + "tor-netdir", + "tor-netdoc", ] [[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" - -[[package]] -name = "toml_edit" -version = "0.22.22" +name = "tor-rtcompat" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "591b0b0695e86c2958b8ab9c431f6fea17b544ef3ed3931bbfe96239fd5c9193" dependencies = [ - "indexmap 2.6.0", - "toml_datetime", - "winnow", + "async-trait", + "async_executors", + "asynchronous-codec", + "coarsetime", + "derive_more", + "dyn-clone", + "educe", + "futures", + "futures-rustls", + "hex", + "libc", + "paste", + "pin-project", + "rustls-pki-types", + "rustls-webpki 0.103.9", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "tor-error", + "tor-general-addr", + "tracing", + "void", ] [[package]] -name = "tonic" -version = "0.12.3" +name = "tor-rtmock" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "9cdbf415d79f7a4d2a502039645a39d8bf0ff8af715e588575ac812b2baa7a91" dependencies = [ - "async-stream", + "amplify", + "assert_matches", "async-trait", - "axum", - "base64 0.22.1", - "bytes 1.8.0", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.5.0", - "hyper-timeout", - "hyper-util", - "percent-encoding", + "derive-deftly", + "derive_more", + "educe", + "futures", + "humantime", + "itertools 0.14.0", + "oneshot-fused-workaround", "pin-project", - "prost", - "rustls-native-certs", - "rustls-pemfile 2.2.0", - "socket2", - "tokio", - "tokio-rustls 0.26.0", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", + "priority-queue", + "slotmap-careful", + "strum", + "thiserror 2.0.18", + "tor-error", + "tor-general-addr", + "tor-rtcompat", "tracing", - "webpki-roots 0.26.6", + "tracing-test", + "void", ] [[package]] -name = "tonic-build" -version = "0.12.3" +name = "tor-socksproto" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +checksum = "0dbb9b68d9cf8e07eeafbca91ac11b7d9c4be1e674cb59830edfbac153333e7f" dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "prost-types", - "quote", - "syn 2.0.87", + "amplify", + "caret", + "derive-deftly", + "educe", + "safelog", + "subtle", + "thiserror 2.0.18", + "tor-bytes", + "tor-error", +] + +[[package]] +name = "tor-units" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48139f001dd6f409325b7c190ebcea1033b27f09042543946ab7aa4ad286257b" +dependencies = [ + "derive-deftly", + "derive_more", + "serde", + "thiserror 2.0.18", + "tor-memquota", ] [[package]] @@ -4612,13 +7513,10 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", - "slab", "tokio", - "tokio-util 0.7.12", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4626,45 +7524,35 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", + "indexmap 2.13.0", "pin-project-lite", - "sync_wrapper 0.1.2", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", "tower-layer", "tower-service", -] - -[[package]] -name = "tower-batch-control" -version = "0.2.41-beta.18" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" -dependencies = [ - "futures", - "futures-core", - "pin-project", - "rayon", - "tokio", - "tokio-util 0.7.12", - "tower 0.4.13", "tracing", - "tracing-futures", ] [[package]] name = "tower-batch-control" -version = "0.2.41-beta.18" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6cf52578f98b4da47335c26c4f883f7993b1a9b9d2f5420eb8dbfd5dd19a28" dependencies = [ "futures", "futures-core", "pin-project", "rayon", "tokio", - "tokio-util 0.7.12", + "tokio-util", "tower 0.4.13", "tracing", "tracing-futures", @@ -4672,8 +7560,9 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.18" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" +version = "0.2.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434e19ee996ee5c6aa42f11463a355138452592e5c5b5b73b6f0f19534556af" dependencies = [ "futures-core", "pin-project", @@ -4682,14 +7571,21 @@ dependencies = [ ] [[package]] -name = "tower-fallback" -version = "0.2.41-beta.18" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "futures-core", - "pin-project", - "tower 0.4.13", - "tracing", + "bitflags 2.11.0", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower 0.5.3", + "tower-layer", + "tower-service", ] [[package]] @@ -4706,9 +7602,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -4718,20 +7614,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -4739,9 +7635,9 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" dependencies = [ "tracing", "tracing-subscriber", @@ -4770,18 +7666,55 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex-automata", "sharded-slab", "smallvec", "thread_local", + "time", + "tracing", "tracing-core", "tracing-log", ] +[[package]] +name = "tracing-test" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a4c448db514d4f24c5ddb9f73f2ee71bfb24c526cf0c570ba142d1119e0051" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad06847b7afb65c7866a36664b75c40b895e318cea4f71299f013fb22965329d" +dependencies = [ + "quote", + "syn 2.0.116", +] + +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -4789,19 +7722,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] -name = "typemap-ors" -version = "1.0.0" +name = "typed-index-collections" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a68c24b707f02dd18f1e4ccceb9d49f2058c2fb86384ef9972592904d7a28867" +checksum = "898160f1dfd383b4e92e17f0512a7d62f3c51c44937b23b6ffc3a1614a8eaccd" dependencies = [ - "unsafe-any-ors", + "bincode 2.0.1", + "serde", ] [[package]] name = "typenum" -version = "1.17.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "uint" @@ -4833,80 +7767,81 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "uncased" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] + [[package]] name = "unicase" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-normalization" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" dependencies = [ "tinyvec", ] [[package]] -name = "universal-hash" -version = "0.5.1" +name = "unicode-segmentation" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] -name = "unsafe-any-ors" -version = "1.0.0" +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a303d30665362d9680d7d91d78b23f5f899504d4f08b3c4cf08d055d87c0ad" -dependencies = [ - "destructure_traitobject", -] +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] -name = "unsafe-libyaml" -version = "0.2.11" +name = "universal-hash" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common 0.1.7", + "subtle", +] [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] -name = "untrusted" -version = "0.9.0" +name = "unty" +version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" [[package]] name = "url" -version = "2.5.3" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", -] - -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + "serde", +] [[package]] name = "utf8_iter" @@ -4920,11 +7855,21 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -4946,9 +7891,15 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "wagyu-zcash-parameters" version = "0.2.0" @@ -5001,9 +7952,9 @@ checksum = "a7b6d5a78adc3e8f198e9cd730f219a695431467f7ec29dcfc63ade885feebe1" [[package]] name = "wait-timeout" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" dependencies = [ "libc", ] @@ -5035,9 +7986,27 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] [[package]] name = "wasite" @@ -5046,48 +8015,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] -name = "wasm-bindgen" -version = "0.2.95" +name = "wasix" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "1757e0d1f8456693c7e5c6c629bdb54884e032aa0bb53c155f6a39f94440d332" dependencies = [ - "cfg-if 1.0.0", - "once_cell", - "wasm-bindgen-macro", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.95" +name = "wasm-bindgen" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ - "bumpalo", - "log", + "cfg-if", "once_cell", - "proc-macro2", - "quote", - "syn 2.0.87", + "rustversion", + "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "futures-util", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5095,50 +8062,84 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.87", - "wasm-bindgen-backend", + "syn 2.0.116", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +dependencies = [ + "unicode-ident", +] [[package]] -name = "web-sys" -version = "0.3.72" +name = "wasm-encoder" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" dependencies = [ - "js-sys", - "wasm-bindgen", + "leb128fmt", + "wasmparser", ] [[package]] -name = "webpki" -version = "0.21.4" +name = "wasm-metadata" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "anyhow", + "indexmap 2.13.0", + "wasm-encoder", + "wasmparser", ] [[package]] -name = "webpki-roots" -version = "0.21.1" +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.11.0", + "hashbrown 0.15.5", + "indexmap 2.13.0", + "semver", +] + +[[package]] +name = "weak-table" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" + +[[package]] +name = "web-sys" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ - "webpki", + "js-sys", + "wasm-bindgen", ] [[package]] @@ -5149,9 +8150,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.6" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" dependencies = [ "rustls-pki-types", ] @@ -5165,16 +8166,27 @@ dependencies = [ "either", "home", "once_cell", - "rustix", + "rustix 0.38.44", +] + +[[package]] +name = "which" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3fabb953106c3c8eea8306e4393700d7657561cb43122571b172bbfb7c7ba1d" +dependencies = [ + "env_home", + "rustix 1.1.3", + "winsafe", ] [[package]] name = "whoami" -version = "1.5.2" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" dependencies = [ - "redox_syscall 0.5.7", + "libredox", "wasite", "web-sys", ] @@ -5197,9 +8209,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys 0.59.0", ] @@ -5210,52 +8222,143 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core 0.61.2", + "windows-future", + "windows-link 0.1.3", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", +] + [[package]] name = "windows-core" -version = "0.52.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-targets 0.52.6", + "windows-implement", + "windows-interface", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.3", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.116", ] [[package]] -name = "windows-registry" +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-numerics" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", + "windows-core 0.61.2", + "windows-link 0.1.3", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-targets 0.52.6", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link 0.2.1", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-result", - "windows-targets 0.52.6", + "windows-link 0.1.3", ] [[package]] -name = "windows-sys" -version = "0.48.0" +name = "windows-strings" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-targets 0.48.5", + "windows-link 0.2.1", ] [[package]] @@ -5277,18 +8380,21 @@ dependencies = [ ] [[package]] -name = "windows-targets" -version = "0.48.5" +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows-link 0.2.1", ] [[package]] @@ -5300,7 +8406,7 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", @@ -5308,10 +8414,30 @@ dependencies = [ ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows-threading" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link 0.1.3", +] [[package]] name = "windows_aarch64_gnullvm" @@ -5320,10 +8446,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" +name = "windows_aarch64_gnullvm" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -5332,10 +8458,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] -name = "windows_i686_gnu" -version = "0.48.5" +name = "windows_aarch64_msvc" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -5343,6 +8469,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" @@ -5350,10 +8482,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] -name = "windows_i686_msvc" -version = "0.48.5" +name = "windows_i686_gnullvm" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -5362,10 +8494,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" +name = "windows_i686_msvc" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -5374,10 +8506,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" +name = "windows_x86_64_gnu" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -5386,10 +8518,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" +name = "windows_x86_64_gnullvm" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -5397,36 +8529,120 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "winnow" -version = "0.6.20" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] [[package]] -name = "winreg" -version = "0.50.0" +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + +[[package]] +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" dependencies = [ - "cfg-if 1.0.0", - "windows-sys 0.48.0", + "wit-bindgen-rust-macro", ] [[package]] -name = "write16" -version = "1.0.0" +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.13.0", + "prettyplease", + "syn 2.0.116", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.116", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.11.0", + "indexmap 2.13.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.13.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -5457,11 +8673,10 @@ checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" [[package]] name = "yoke" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -5469,141 +8684,228 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", "synstructure", ] +[[package]] +name = "zaino-common" +version = "0.2.0" +dependencies = [ + "serde", + "thiserror 1.0.69", + "zcash_local_net", + "zebra-chain", + "zingo_common_components", +] + [[package]] name = "zaino-fetch" -version = "0.0.0" +version = "0.2.0" dependencies = [ - "base64 0.22.1", + "base64", "byteorder", + "derive_more", "hex", - "http 1.1.0", - "indexmap 2.6.0", + "http", + "indexmap 2.13.0", + "jsonrpsee-types", "prost", - "reqwest 0.12.9", + "reqwest", "serde", "serde_json", - "sha2 0.10.8", - "thiserror", + "sha2 0.10.9", + "thiserror 1.0.69", "tokio", "tonic", + "tracing", "url", + "zaino-common", "zaino-proto", - "zcash_protocol 0.2.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-rpc 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", + "zaino-testvectors", + "zebra-chain", + "zebra-rpc", ] [[package]] name = "zaino-proto" -version = "0.0.0" +version = "0.2.0" dependencies = [ "prost", + "prost-build", "tonic", "tonic-build", - "which", + "tonic-prost", + "tonic-prost-build", + "which 4.4.2", + "zebra-chain", + "zebra-state", ] [[package]] name = "zaino-serve" -version = "0.0.0" +version = "0.2.0" +dependencies = [ + "futures", + "jsonrpsee", + "serde", + "thiserror 1.0.69", + "tokio", + "tonic", + "tower 0.4.13", + "tracing", + "whoami", + "zaino-fetch", + "zaino-proto", + "zaino-state", + "zebra-chain", + "zebra-rpc", +] + +[[package]] +name = "zaino-state" +version = "0.2.0" dependencies = [ - "async-stream", - "crossbeam-channel", + "arc-swap", + "async-trait", + "bitflags 2.11.0", + "blake2", + "bs58", + "cargo-lock", + "chrono", + "core2 0.4.0", + "dashmap", + "derive_more", "futures", "hex", - "http 1.1.0", - "lazy-regex", + "incrementalmerkletree", + "indexmap 2.13.0", + "lmdb", + "lmdb-sys", + "nonempty", + "once_cell", + "primitive-types 0.13.1", + "proptest", "prost", - "thiserror", + "rand 0.8.5", + "reqwest", + "sapling-crypto", + "serde", + "serde_json", + "sha2 0.10.9", + "simple-mermaid", + "tempfile", + "thiserror 1.0.69", "tokio", "tokio-stream", "tonic", + "tower 0.4.13", + "tracing", + "tracing-subscriber", "whoami", + "zaino-common", "zaino-fetch", "zaino-proto", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-rpc 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", -] - -[[package]] -name = "zaino-state" -version = "0.0.0" -dependencies = [ - "thiserror", - "tokio", + "zcash_address", + "zcash_keys", + "zcash_primitives", + "zcash_protocol", + "zcash_transparent", + "zebra-chain", + "zebra-rpc", + "zebra-state", ] [[package]] name = "zaino-testutils" -version = "0.0.0" +version = "0.2.0" dependencies = [ - "ctrlc", - "http 1.1.0", + "http", + "lazy_static", + "once_cell", "portpicker", + "proptest", "tempfile", "tokio", "tonic", - "zaino-fetch", + "tracing", + "tracing-subscriber", + "zaino-common", + "zaino-proto", + "zaino-serve", + "zaino-state", + "zaino-testvectors", "zainod", - "zingolib 0.2.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", + "zcash_client_backend", + "zcash_local_net", + "zcash_protocol", + "zebra-chain", + "zebra-state", + "zingo-netutils", + "zingo_common_components", + "zingo_test_vectors", + "zingolib", + "zingolib_testutils", + "zip32", +] + +[[package]] +name = "zaino-testvectors" +version = "0.2.0" +dependencies = [ + "lazy_static", ] [[package]] name = "zainod" -version = "0.0.0" +version = "0.2.0" dependencies = [ "clap", - "ctrlc", - "http 1.1.0", + "config", + "http", "serde", - "thiserror", + "tempfile", + "thiserror 1.0.69", "tokio", - "toml", + "toml 0.5.11", + "tracing", + "tracing-subscriber", + "zaino-common", "zaino-fetch", "zaino-serve", + "zaino-state", + "zebra-chain", + "zebra-state", ] [[package]] name = "zcash_address" -version = "0.4.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" -dependencies = [ - "bech32", - "bs58", - "f4jumble 0.1.0 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_protocol 0.2.0", -] - -[[package]] -name = "zcash_address" -version = "0.6.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff95eac82f71286a79c750e674550d64fb2b7aadaef7b89286b2917f645457d" +checksum = "ee4491dddd232de02df42481757054dc19c8bc51cf709cfec58feebfef7c3c9a" dependencies = [ "bech32", "bs58", - "f4jumble 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.4.0", + "core2 0.3.3", + "f4jumble", + "zcash_encoding", + "zcash_protocol", ] [[package]] name = "zcash_client_backend" -version = "0.13.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493217c813ba1f7ef4e6b6bf846f4e4cd57b6a070d679c9f15d2477e12d1464" dependencies = [ - "base64 0.21.7", + "arti-client", + "base64", "bech32", "bip32", "bls12_381", @@ -5611,53 +8913,65 @@ dependencies = [ "byteorder", "crossbeam-channel", "document-features", + "dynosaur", + "fs-mistrust", + "futures-util", + "getset", "group", "hex", + "http-body-util", + "hyper", "hyper-util", - "incrementalmerkletree 0.6.0", + "incrementalmerkletree", "memuse", - "nom", "nonempty", - "orchard 0.9.0", + "orchard", + "pasta_curves", "percent-encoding", "prost", + "rand 0.8.5", "rand_core 0.6.4", "rayon", - "sapling-crypto 0.2.0", + "rust_decimal", + "sapling-crypto", + "secp256k1 0.29.1", "secrecy", + "serde", + "serde_json", "shardtree", "subtle", "time", + "time-core", + "tokio", + "tokio-rustls", "tonic", - "tonic-build", + "tonic-prost", + "tonic-prost-build", + "tor-rtcompat", + "tower 0.5.3", "tracing", - "which", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", + "trait-variant", + "webpki-roots 1.0.6", + "which 8.0.0", + "zcash_address", + "zcash_encoding", "zcash_keys", "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", + "zcash_primitives", + "zcash_protocol", + "zcash_script", + "zcash_transparent", "zip32", "zip321", ] [[package]] name = "zcash_encoding" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052d8230202f0a018cd9b5d1b56b94cd25e18eccc2d8665073bcea8261ab87fc" -dependencies = [ - "byteorder", - "nonempty", -] - -[[package]] -name = "zcash_encoding" -version = "0.2.1" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +checksum = "bca38087e6524e5f51a5b0fb3fc18f36d7b84bf67b2056f494ca0c281590953d" dependencies = [ - "byteorder", + "core2 0.3.3", "nonempty", ] @@ -5669,13 +8983,14 @@ checksum = "2fde17bf53792f9c756b313730da14880257d7661b5bfc69d0571c3a7c11a76d" dependencies = [ "blake2b_simd", "byteorder", - "primitive-types", + "primitive-types 0.12.2", ] [[package]] name = "zcash_keys" -version = "0.3.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c115531caa1b7ca5ccd82dc26dbe3ba44b7542e928a3f77cd04abbe3cde4a4f2" dependencies = [ "bech32", "bip32", @@ -5683,57 +8998,53 @@ dependencies = [ "bls12_381", "bs58", "byteorder", + "core2 0.3.3", "document-features", "group", "memuse", "nonempty", - "orchard 0.9.0", + "orchard", "rand_core 0.6.4", - "sapling-crypto 0.2.0", + "sapling-crypto", "secrecy", "subtle", "tracing", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", + "zcash_address", + "zcash_encoding", + "zcash_protocol", + "zcash_transparent", "zip32", ] [[package]] name = "zcash_local_net" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zcash-local-net.git?branch=dev#f0c8324cebccba816fa82e95cab788db3871c8d5" +version = "0.4.0" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f#69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f" dependencies = [ "getset", "hex", - "http 1.1.0", "json", "portpicker", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", - "tokio-stream", - "tonic", "tracing", - "zcash_client_backend", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-node-services 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-rpc 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zingo-netutils 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", - "zingolib 0.2.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", + "zcash_protocol", + "zebra-chain", + "zebra-node-services", + "zebra-rpc", + "zingo_common_components", + "zingo_test_vectors", ] [[package]] name = "zcash_note_encryption" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4580cd6cee12e44421dac43169be8d23791650816bdb34e6ddfa70ac89c1c5" +checksum = "77efec759c3798b6e4d829fcc762070d9b229b0f13338c40bf993b7b609c2272" dependencies = [ - "chacha20", + "chacha20 0.9.1", "chacha20poly1305", "cipher", "rand_core 0.6.4", @@ -5742,108 +9053,52 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.16.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" -dependencies = [ - "aes", - "bip32", - "blake2b_simd", - "bs58", - "byteorder", - "document-features", - "equihash 0.2.0 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree 0.6.0", - "jubjub", - "memuse", - "nonempty", - "orchard 0.9.0", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "ripemd", - "sapling-crypto 0.2.0", - "secp256k1", - "sha2 0.10.8", - "subtle", - "tracing", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_note_encryption", - "zcash_protocol 0.2.0", - "zcash_spec", - "zip32", -] - -[[package]] -name = "zcash_primitives" -version = "0.19.0" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab47d526d7fd6f88b3a2854ad81b54757a80c2aeadd1d8b06f690556af9743c" +checksum = "3fd9ff256fb298a7e94a73c1adad6c7e0b4b194b902e777ee9f5f2e12c4c4776" dependencies = [ - "aes", "bip32", "blake2b_simd", + "block-buffer 0.11.0-rc.3", "bs58", - "byteorder", + "core2 0.3.3", + "crypto-common 0.2.0-rc.1", "document-features", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash", "ff", "fpe", + "getset", "group", "hex", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "jubjub", "memuse", "nonempty", - "orchard 0.10.0", + "orchard", "rand 0.8.5", "rand_core 0.6.4", "redjubjub", - "ripemd", - "sapling-crypto 0.3.0", - "secp256k1", - "sha2 0.10.8", + "ripemd 0.1.3", + "sapling-crypto", + "secp256k1 0.29.1", + "sha2 0.10.9", "subtle", "tracing", - "zcash_address 0.6.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address", + "zcash_encoding", "zcash_note_encryption", - "zcash_protocol 0.4.0", + "zcash_protocol", + "zcash_script", "zcash_spec", + "zcash_transparent", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.16.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" -dependencies = [ - "bellman", - "blake2b_simd", - "bls12_381", - "document-features", - "group", - "home", - "jubjub", - "known-folders", - "lazy_static", - "rand_core 0.6.4", - "redjubjub", - "sapling-crypto 0.2.0", - "tracing", - "xdg", - "zcash_primitives 0.16.0", -] - -[[package]] -name = "zcash_proofs" -version = "0.19.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daba607872e60d91a09248d8e1ea3d6801c819fb80d67016d9de02d81323c10d" +checksum = "43a2c13bb673d542608a0e6502ac5494136e7ce4ce97e92dd239489b2523eed9" dependencies = [ "bellman", "blake2b_simd", @@ -5854,287 +9109,204 @@ dependencies = [ "jubjub", "known-folders", "lazy_static", + "minreq", "rand_core 0.6.4", "redjubjub", - "sapling-crypto 0.3.0", + "sapling-crypto", "tracing", + "wagyu-zcash-parameters", "xdg", - "zcash_primitives 0.19.0", -] - -[[package]] -name = "zcash_protocol" -version = "0.2.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" -dependencies = [ - "document-features", - "memuse", + "zcash_primitives", ] [[package]] name = "zcash_protocol" -version = "0.4.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bc22b9155b2c7eb20105cd06de170d188c1bc86489b92aa3fda7b8da8d96acf" +checksum = "18b1a337bbc9a7d55ae35d31189f03507dbc7934e9a4bee5c1d5c47464860e48" dependencies = [ + "core2 0.3.3", "document-features", + "hex", "memuse", ] [[package]] name = "zcash_script" -version = "0.2.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2122a042c77d529d3c60b899e74705eda39ae96a8a992460caeb06afa76990a2" +checksum = "c6ef9d04e0434a80b62ad06c5a610557be358ef60a98afa5dbc8ecaf19ad72e7" dependencies = [ - "bindgen 0.70.1", - "cc", + "bip32", + "bitflags 2.11.0", + "bounded-vec", + "hex", + "ripemd 0.1.3", + "secp256k1 0.29.1", + "sha1", + "sha2 0.10.9", + "thiserror 2.0.18", ] [[package]] name = "zcash_spec" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cede95491c2191d3e278cab76e097a44b17fde8d6ca0d4e3a22cf4807b2d857" +checksum = "ded3f58b93486aa79b85acba1001f5298f27a46489859934954d262533ee2915" dependencies = [ "blake2b_simd", ] [[package]] -name = "zebra-chain" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" +name = "zcash_transparent" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9b7b4bc11d8bb20833d1b8ab6807f4dca941b381f1129e5bbd72a84e391991" dependencies = [ - "bitflags 2.6.0", - "bitflags-serde-legacy", - "bitvec", + "bip32", "blake2b_simd", - "blake2s_simd", - "bridgetree", "bs58", - "byteorder", - "chrono", - "dirs", - "ed25519-zebra", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures", - "group", - "halo2_proofs", + "core2 0.3.3", + "document-features", + "getset", "hex", - "humantime", - "incrementalmerkletree 0.7.0", - "itertools 0.13.0", - "jubjub", - "lazy_static", - "num-integer", - "orchard 0.10.0", - "primitive-types", - "rand_core 0.6.4", - "rayon", - "reddsa", - "redjubjub", - "ripemd", - "sapling-crypto 0.3.0", - "secp256k1", - "serde", - "serde-big-array", - "serde_json", - "serde_with", - "sha2 0.10.8", - "static_assertions", - "tempfile", - "thiserror", - "tokio", - "tracing", - "uint 0.10.0", - "x25519-dalek", - "zcash_address 0.6.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_history", - "zcash_note_encryption", - "zcash_primitives 0.19.0", - "zcash_protocol 0.4.0", + "nonempty", + "ripemd 0.1.3", + "secp256k1 0.29.1", + "sha2 0.10.9", + "subtle", + "zcash_address", + "zcash_encoding", + "zcash_protocol", + "zcash_script", + "zcash_spec", + "zip32", ] [[package]] name = "zebra-chain" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fcb0012bac23be72f1827ae14bccb2028bc7c9e2020e887a5f92232a42973d0" dependencies = [ - "bitflags 2.6.0", + "bech32", + "bitflags 2.11.0", "bitflags-serde-legacy", "bitvec", "blake2b_simd", "blake2s_simd", - "bridgetree", + "bounded-vec", "bs58", "byteorder", "chrono", + "derive-getters", "dirs", "ed25519-zebra", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash", "futures", "group", "halo2_proofs", "hex", "humantime", - "incrementalmerkletree 0.7.0", - "itertools 0.13.0", + "incrementalmerkletree", + "itertools 0.14.0", "jubjub", "lazy_static", "num-integer", - "orchard 0.10.0", - "primitive-types", + "orchard", + "primitive-types 0.12.2", + "proptest", + "proptest-derive", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", "rayon", "reddsa", "redjubjub", - "ripemd", - "sapling-crypto 0.3.0", - "secp256k1", + "ripemd 0.1.3", + "sapling-crypto", + "schemars 1.2.1", + "secp256k1 0.29.1", "serde", "serde-big-array", "serde_json", "serde_with", - "sha2 0.10.8", + "sha2 0.10.9", + "sinsemilla", "static_assertions", + "strum", "tempfile", - "thiserror", - "tokio", - "tracing", - "uint 0.10.0", - "x25519-dalek", - "zcash_address 0.6.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_history", - "zcash_note_encryption", - "zcash_primitives 0.19.0", - "zcash_protocol 0.4.0", -] - -[[package]] -name = "zebra-consensus" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" -dependencies = [ - "bellman", - "blake2b_simd", - "bls12_381", - "chrono", - "futures", - "futures-util", - "halo2_proofs", - "jubjub", - "lazy_static", - "metrics", - "once_cell", - "orchard 0.10.0", - "rand 0.8.5", - "rayon", - "sapling-crypto 0.3.0", - "serde", - "thiserror", - "tokio", - "tower 0.4.13", - "tower-batch-control 0.2.41-beta.18 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "tower-fallback 0.2.41-beta.18 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "tracing", - "tracing-futures", - "wagyu-zcash-parameters", - "zcash_proofs 0.19.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-node-services 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-script 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-state 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", -] - -[[package]] -name = "zebra-consensus" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" -dependencies = [ - "bellman", - "blake2b_simd", - "bls12_381", - "chrono", - "futures", - "futures-util", - "halo2_proofs", - "jubjub", - "lazy_static", - "metrics", - "once_cell", - "orchard 0.10.0", - "rand 0.8.5", - "rayon", - "sapling-crypto 0.3.0", - "serde", - "thiserror", + "thiserror 2.0.18", "tokio", - "tower 0.4.13", - "tower-batch-control 0.2.41-beta.18 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "tower-fallback 0.2.41-beta.18 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "tracing", - "tracing-futures", - "wagyu-zcash-parameters", - "zcash_proofs 0.19.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-node-services 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-script 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-state 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", + "tracing", + "uint 0.10.0", + "x25519-dalek", + "zcash_address", + "zcash_encoding", + "zcash_history", + "zcash_note_encryption", + "zcash_primitives", + "zcash_protocol", + "zcash_script", + "zcash_transparent", + "zebra-test", ] [[package]] -name = "zebra-network" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" +name = "zebra-consensus" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26a2cded2bfdd3019e7f20ab705061c7013128db3dab0956587471ed95a61c11" dependencies = [ - "bitflags 2.6.0", - "byteorder", - "bytes 1.8.0", + "bellman", + "blake2b_simd", + "bls12_381", "chrono", - "dirs", + "derive-getters", "futures", - "hex", - "humantime-serde", - "indexmap 2.6.0", - "itertools 0.13.0", + "futures-util", + "halo2_proofs", + "jubjub", "lazy_static", "metrics", - "num-integer", - "ordered-map", - "pin-project", + "mset", + "once_cell", + "orchard", "rand 0.8.5", "rayon", - "regex", + "sapling-crypto", "serde", - "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", - "tokio-stream", - "tokio-util 0.7.12", "tower 0.4.13", + "tower-batch-control", + "tower-fallback", "tracing", - "tracing-error", "tracing-futures", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", + "zcash_proofs", + "zcash_protocol", + "zebra-chain", + "zebra-node-services", + "zebra-script", + "zebra-state", ] [[package]] name = "zebra-network" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c7afee9c651c8db90970db9ab86d42a313016457271fdf3340ae0b6bc4b2d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.11.0", "byteorder", - "bytes 1.8.0", + "bytes", "chrono", "dirs", "futures", "hex", "humantime-serde", - "indexmap 2.6.0", - "itertools 0.13.0", + "indexmap 2.13.0", + "itertools 0.14.0", "lazy_static", "metrics", "num-integer", @@ -6143,143 +9315,122 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", + "schemars 1.2.1", "serde", "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", - "tokio-util 0.7.12", + "tokio-util", "tower 0.4.13", "tracing", "tracing-error", "tracing-futures", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", -] - -[[package]] -name = "zebra-node-services" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" -dependencies = [ - "color-eyre", - "jsonrpc-core", - "reqwest 0.11.27", - "serde", - "serde_json", - "tokio", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", + "zebra-chain", ] [[package]] name = "zebra-node-services" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" -dependencies = [ - "color-eyre", - "jsonrpc-core", - "reqwest 0.11.27", - "serde", - "serde_json", - "tokio", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", -] - -[[package]] -name = "zebra-rpc" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608b55b6cd9ddecc73a7a20d801847e0461ea5178e17cac5734b1b0e6d21aa5f" dependencies = [ - "base64 0.22.1", - "chrono", "color-eyre", - "futures", - "hex", - "indexmap 2.6.0", - "jsonrpc-core", - "jsonrpc-derive", - "jsonrpc-http-server", - "nix", - "rand 0.8.5", + "jsonrpsee-types", + "reqwest", "serde", "serde_json", "tokio", "tower 0.4.13", - "tracing", - "zcash_address 0.6.0", - "zcash_primitives 0.19.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-consensus 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-network 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-node-services 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-script 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", - "zebra-state 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", + "zebra-chain", ] [[package]] name = "zebra-rpc" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba8b0d44ea2372962f61552dce40b8261ad6c48dfecc50bea572f930b28a3b28" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "color-eyre", + "derive-getters", + "derive-new", "futures", "hex", - "indexmap 2.6.0", - "jsonrpc-core", - "jsonrpc-derive", - "jsonrpc-http-server", + "http-body-util", + "hyper", + "indexmap 2.13.0", + "jsonrpsee", + "jsonrpsee-proc-macros", + "jsonrpsee-types", + "metrics", "nix", + "openrpsee", + "phf 0.12.1", + "prost", "rand 0.8.5", + "sapling-crypto", + "schemars 1.2.1", + "semver", "serde", "serde_json", + "serde_with", "tokio", + "tokio-stream", + "tonic", + "tonic-prost", + "tonic-prost-build", + "tonic-reflection", "tower 0.4.13", "tracing", - "zcash_primitives 0.19.0", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-consensus 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-network 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-node-services 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-script 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", - "zebra-state 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", -] - -[[package]] -name = "zebra-script" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" -dependencies = [ - "thiserror", + "which 8.0.0", + "zcash_address", + "zcash_keys", + "zcash_primitives", + "zcash_protocol", "zcash_script", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", + "zcash_transparent", + "zebra-chain", + "zebra-consensus", + "zebra-network", + "zebra-node-services", + "zebra-script", + "zebra-state", ] [[package]] name = "zebra-script" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86811347886796806663c5e66cfef8085ba5bbc04bb9f94adf5fcb54e3ef76c6" dependencies = [ - "thiserror", + "libzcash_script", + "thiserror 2.0.18", + "zcash_primitives", "zcash_script", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", + "zebra-chain", ] [[package]] name = "zebra-state" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1#fef500a72840d4b7c89d68e14980eeda43869873" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd379520336d750a75d22e0aaaf9f48d16f03b31db86fe2629993ce219071fe0" dependencies = [ - "bincode", + "bincode 1.3.3", "chrono", + "crossbeam-channel", + "derive-getters", + "derive-new", "dirs", "futures", "hex", "hex-literal", "human_bytes", "humantime-serde", - "indexmap 2.6.0", - "itertools 0.13.0", + "indexmap 2.13.0", + "itertools 0.14.0", "lazy_static", "metrics", "mset", @@ -6287,116 +9438,125 @@ dependencies = [ "regex", "rlimit", "rocksdb", + "sapling-crypto", "semver", "serde", "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", "tower 0.4.13", "tracing", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?tag=v2.0.1)", + "zebra-chain", + "zebra-node-services", ] [[package]] -name = "zebra-state" -version = "1.0.0-beta.42" -source = "git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371#4eb285de50848f1a4dcebd0fbe353e4f150fd371" +name = "zebra-test" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd2b1e773cbe0e9377e6c73f7a45be4e33db3bd92738545e877131a054c60aec" dependencies = [ - "bincode", - "chrono", - "dirs", + "color-eyre", "futures", "hex", - "hex-literal", - "human_bytes", - "humantime-serde", - "indexmap 2.6.0", - "itertools 0.13.0", + "humantime", + "indexmap 2.13.0", + "insta", + "itertools 0.14.0", "lazy_static", - "metrics", - "mset", - "rayon", + "once_cell", + "owo-colors", + "proptest", + "rand 0.8.5", "regex", - "rlimit", - "rocksdb", - "semver", - "serde", - "tempfile", - "thiserror", + "spandoc", + "thiserror 2.0.18", "tokio", "tower 0.4.13", "tracing", - "zebra-chain 1.0.0-beta.42 (git+https://github.com/ZcashFoundation/zebra.git?rev=4eb285de50848f1a4dcebd0fbe353e4f150fd371)", + "tracing-error", + "tracing-subscriber", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ - "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.116", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ + "serde", "yoke", "zerofrom", "zerovec-derive", @@ -6404,289 +9564,174 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", -] - -[[package]] -name = "zingo-memo" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" -dependencies = [ - "zcash_address 0.4.0", - "zcash_client_backend", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_keys", - "zcash_primitives 0.16.0", + "syn 2.0.116", ] [[package]] name = "zingo-memo" version = "0.1.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4152c6c9ac701ef82b82deca2b5db7bbf70583c04031f97423bf9f850d74e4a" dependencies = [ - "zcash_address 0.4.0", + "zcash_address", "zcash_client_backend", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", + "zcash_encoding", "zcash_keys", - "zcash_primitives 0.16.0", + "zcash_primitives", ] [[package]] name = "zingo-netutils" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2ca11c717feb94a0032da1e6cffb2928ce2c34aca3b430e85cac02fd0ab425" dependencies = [ - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.5.0", - "hyper-rustls 0.27.3", + "http", + "hyper", + "hyper-rustls", "hyper-util", - "prost", - "rustls-pemfile 1.0.4", - "thiserror", - "tokio-rustls 0.26.0", + "thiserror 1.0.69", + "tokio-rustls", "tonic", - "tower 0.4.13", - "webpki-roots 0.21.1", "zcash_client_backend", ] [[package]] -name = "zingo-netutils" -version = "0.1.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +name = "zingo-price" +version = "0.0.1" +source = "git+https://github.com/zingolabs/zingolib.git?rev=14a69853a8bd2e473dee8a433004c4c06aaf5308#14a69853a8bd2e473dee8a433004c4c06aaf5308" dependencies = [ - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.5.0", - "hyper-rustls 0.27.3", - "hyper-util", - "prost", - "rustls-pemfile 1.0.4", - "thiserror", - "tokio-rustls 0.26.0", - "tonic", - "tower 0.4.13", - "webpki-roots 0.21.1", + "byteorder", + "reqwest", + "rust_decimal", + "serde", + "serde_json", + "thiserror 2.0.18", "zcash_client_backend", + "zcash_encoding", ] [[package]] name = "zingo-status" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" -dependencies = [ - "zcash_primitives 0.16.0", -] - -[[package]] -name = "zingo-status" -version = "0.1.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b345e3911479cf21dddf6027c8fb78010b787044d42461d6864dcc42620f159" dependencies = [ - "zcash_primitives 0.16.0", + "byteorder", + "zcash_primitives", + "zcash_protocol", ] [[package]] -name = "zingo-sync" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" -dependencies = [ - "crossbeam-channel", - "futures", - "getset", - "incrementalmerkletree 0.6.0", - "memuse", - "orchard 0.9.0", - "rayon", - "sapling-crypto 0.2.0", - "shardtree", - "tokio", - "tonic", - "tracing", - "zcash_client_backend", - "zcash_keys", - "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zingo-netutils 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", -] +name = "zingo_common_components" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ed7ebc771980a59ec5f208d5dcf40c010dce2b5d493164c9d2f7baa73e9284" [[package]] -name = "zingo-sync" -version = "0.1.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +name = "zingo_test_vectors" +version = "0.0.1" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f#69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f" dependencies = [ - "crossbeam-channel", - "futures", - "getset", - "incrementalmerkletree 0.6.0", - "memuse", - "orchard 0.9.0", - "rayon", - "sapling-crypto 0.2.0", - "shardtree", - "tokio", - "tonic", - "tracing", - "zcash_client_backend", - "zcash_keys", - "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zingo-netutils 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", + "bip0039 0.12.0", ] [[package]] name = "zingolib" -version = "0.2.0" -source = "git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2#95e5b0d8f9d5ee0485c6141533da2f727aeafae2" +version = "3.0.0" +source = "git+https://github.com/zingolabs/zingolib.git?rev=14a69853a8bd2e473dee8a433004c4c06aaf5308#14a69853a8bd2e473dee8a433004c4c06aaf5308" dependencies = [ "append-only-vec", - "base58", - "base64 0.13.1", - "bip0039", + "bech32", + "bip0039 0.14.0", "bip32", - "bls12_381", "bs58", - "build_utils 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", "byteorder", - "bytes 0.4.12", + "bytes", "chrono", "dirs", - "enum_dispatch", - "ff", "futures", - "getset", - "group", "hex", - "http 1.1.0", - "incrementalmerkletree 0.6.0", - "indoc", + "http", + "hyper-rustls", + "hyper-util", + "incrementalmerkletree", "json", "jubjub", - "lazy_static", "log", - "log4rs", "nonempty", - "orchard 0.9.0", + "orchard", + "pepper-sync", "portpicker", - "proptest", "prost", "rand 0.8.5", - "reqwest 0.12.9", - "ring 0.17.8", + "ring", + "rusqlite", "rust-embed", - "sapling-crypto 0.2.0", - "secp256k1", + "rustls 0.23.37", + "sapling-crypto", + "secp256k1 0.31.1", "secrecy", "serde", "serde_json", - "sha2 0.9.9", "shardtree", - "subtle", - "tempdir", "tempfile", - "test-case", - "thiserror", + "thiserror 2.0.18", "tokio", + "tokio-rustls", "tonic", + "tower 0.5.3", + "tracing", "tracing-subscriber", - "zcash_address 0.4.0", + "webpki-roots 1.0.6", + "zcash_address", "zcash_client_backend", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", + "zcash_encoding", "zcash_keys", - "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zcash_proofs 0.16.0", - "zingo-memo 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", - "zingo-netutils 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", - "zingo-status 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", - "zingo-sync 0.1.0 (git+https://github.com/zingolabs/zingolib.git?tag=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2)", + "zcash_primitives", + "zcash_proofs", + "zcash_protocol", + "zcash_transparent", + "zingo-memo", + "zingo-netutils", + "zingo-price", + "zingo-status", + "zingo_common_components", + "zingo_test_vectors", "zip32", ] [[package]] -name = "zingolib" -version = "0.2.0" -source = "git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering#58bf3afa55e63285063148e35deb7423535e8fd4" +name = "zingolib_testutils" +version = "0.1.0" +source = "git+https://github.com/zingolabs/zingolib.git?rev=14a69853a8bd2e473dee8a433004c4c06aaf5308#14a69853a8bd2e473dee8a433004c4c06aaf5308" dependencies = [ - "append-only-vec", - "base58", - "base64 0.13.1", - "bip0039", - "bip32", - "bls12_381", - "bs58", - "build_utils 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", - "byteorder", - "bytes 0.4.12", - "chrono", - "dirs", - "enum_dispatch", - "ff", - "futures", - "getset", - "group", - "hex", - "http 1.1.0", - "incrementalmerkletree 0.6.0", - "indoc", - "json", - "jubjub", - "lazy_static", - "log", - "log4rs", - "nonempty", - "orchard 0.9.0", + "bip0039 0.14.0", + "http", + "pepper-sync", "portpicker", - "proptest", - "prost", - "rand 0.8.5", - "reqwest 0.12.9", - "ring 0.17.8", - "rust-embed", - "sapling-crypto 0.2.0", - "secp256k1", - "secrecy", - "serde", - "serde_json", - "sha2 0.9.9", - "shardtree", - "subtle", - "tempdir", "tempfile", - "test-case", - "thiserror", - "tokio", - "tonic", - "tracing-subscriber", - "zcash_address 0.4.0", - "zcash_client_backend", - "zcash_encoding 0.2.1 (git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08)", - "zcash_keys", - "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zcash_proofs 0.16.0", - "zingo-memo 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", - "zingo-netutils 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", - "zingo-status 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", - "zingo-sync 0.1.0 (git+https://github.com/Oscar-Pepper/zingolib.git?branch=zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2_with_output_ordering)", + "zcash_local_net", + "zcash_protocol", + "zingo_common_components", + "zingo_test_vectors", + "zingolib", "zip32", ] [[package]] name = "zip32" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92022ac1e47c7b78f9cee29efac8a1a546e189506f3bb5ad46d525be7c519bf6" +checksum = "b64bf5186a8916f7a48f2a98ef599bf9c099e2458b36b819e393db1c0e768c4b" dependencies = [ + "bech32", "blake2b_simd", "memuse", "subtle", @@ -6695,12 +9740,47 @@ dependencies = [ [[package]] name = "zip321" -version = "0.1.0" -source = "git+https://github.com/zingolabs/librustzcash.git?tag=zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08#121371a089f076a5ee2737809c792d905f5a4b3a" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3090953750ce1d56aa213710765eb14997868f463c45dae115cf1ebe09fe39eb" dependencies = [ - "base64 0.21.7", + "base64", "nom", "percent-encoding", - "zcash_address 0.4.0", - "zcash_protocol 0.2.0", + "zcash_address", + "zcash_protocol", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index c635a2286..b5fdb59bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ # Workspace Members are in dependency order, do not change this without prior consideration. [workspace] members = [ + "zaino-common", "integration-tests", "zaino-testutils", "zainod", @@ -8,6 +9,7 @@ members = [ "zaino-state", "zaino-fetch", "zaino-proto", + "zaino-testvectors", ] # Use the edition 2021 dependency resolver in the workspace, to match the crates @@ -19,51 +21,135 @@ repository = "https://github.com/zingolabs" homepage = "https://www.zingolabs.org/" edition = "2021" license = "Apache-2.0" -version = "0.1.1" +version = "0.2.0" [workspace.dependencies] -# Zingolabs -zingolib = { git = "https://github.com/zingolabs/zingolib.git", tag = "zaino_dep_002_091024_95e5b0d8f9d5ee0485c6141533da2f727aeafae2", features = ["zaino-test"] } + +# Zingo + +zingo_common_components = "0.3.0" +zingo-netutils = "2.0.2" +zingolib = { git = "https://github.com/zingolabs/zingolib.git", rev = "14a69853a8bd2e473dee8a433004c4c06aaf5308", features = [ + "testutils", +] } +zingolib_testutils = { git = "https://github.com/zingolabs/zingolib.git", rev = "14a69853a8bd2e473dee8a433004c4c06aaf5308" } +zingo_test_vectors = { git = "https://github.com/zingolabs/infrastructure.git", rev = "69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f" } +zcash_local_net = { git = "https://github.com/zingolabs/infrastructure.git", rev = "69d7a4b72ebe871d2c6bc6f18d93d5b4c9dbec9f" } + # Librustzcash -zcash_protocol = { git = "https://github.com/zingolabs/librustzcash.git", tag = "zcash_client_sqlite-0.11.2_plus_zingolabs_changes-1-g7ad60b5d5-2-g121371a08" } +zcash_address = "0.10" +zcash_keys = "0.12" +zcash_protocol = "0.7.2" +zcash_primitives = "0.26.0" +zcash_transparent = "0.6.3" +zcash_client_backend = "0.21.0" # Zebra -zebra-chain = { git = "https://github.com/ZcashFoundation/zebra.git", rev = "4eb285de50848f1a4dcebd0fbe353e4f150fd371" } -zebra-rpc = { git = "https://github.com/ZcashFoundation/zebra.git", rev = "4eb285de50848f1a4dcebd0fbe353e4f150fd371" } +zebra-chain = "6.0.0" +zebra-state = "5.0.0" +zebra-rpc = "6.0.0" -# Zcash-Local-Net -zcash_local_net = { git = "https://github.com/zingolabs/zcash-local-net.git", branch = "dev", features = [ "test_fixtures" ] } - -# Miscellaneous +# Runtime tokio = { version = "1.38", features = ["full"] } -tonic = "0.12" -http = "1.1" -thiserror = "1.0" -async-stream = "0.3" -base64 = "0.22" -byteorder = "1.5" +tokio-stream = "0.1" + +# CLI clap = "4.0" + +# Tracing +tracing = "0.1" +tracing-subscriber = { version = "0.3.20", features = [ + "fmt", + "env-filter", + "time", +] } +tracing-futures = "0.2" + +# Network / RPC +http = "1.1" +url = "2.5" +reqwest = { version = "0.12", default-features = false, features = [ + "cookies", + "rustls-tls", +] } +tower = { version = "0.4", features = ["buffer", "util"] } +tonic = { version = "0.14" } +tonic-build = "0.14" +prost = "0.14" +serde = "1.0" +serde_json = "1.0" +jsonrpsee-core = "0.24" +jsonrpsee-types = "0.24" +jsonrpsee = { version = "0.24", features = ["server", "macros"] } +hyper = "1.6" + +# Hashmaps, channels, DBs +indexmap = "2.2.6" crossbeam-channel = "0.5" -ctrlc = "3.4" +dashmap = "6.1" +lmdb = "0.8" +lmdb-sys = "0.8" + +# Async +async-stream = "0.3" +async-trait = "0.1" futures = "0.3.30" -hex = "0.4.3" -indexmap = "2.2.6" + +# Utility +thiserror = "1.0" lazy-regex = "3.3" -once_cell = "1.20.2" -portpicker = "0.1" -prost = "0.13" -reqwest = "0.12" -serde = "1.0" -serde_json = "1.0" -sha2 = "0.10" -tempfile = "3.2" -tokio-stream = "0.1" -toml = "0.5" -tonic-build = "0.12" -tracing-subscriber = "0.3.15" -url = "2.5" +once_cell = "1.20" +ctrlc = "3.4" +chrono = "0.4" which = "4" whoami = "1.5" +core2 = "0.4" + +# Formats +base64 = "0.22" +byteorder = "1.5" +sha2 = "0.10" +blake2 = "0.10" +hex = "0.4" +toml = "0.5" +primitive-types = "0.13" +bs58 = "0.5" +bitflags = "2.9" + +# Test +portpicker = "0.1" +tempfile = "3.2" +zaino-fetch = { path = "zaino-fetch" } +zaino-proto = { path = "zaino-proto" } +zaino-serve = { path = "zaino-serve" } +zaino-state = { path = "zaino-state" } +zaino-common.path = "zaino-common" +zaino-testutils = { path = "zaino-testutils" } +zaino-testvectors = { path = "zaino-testvectors" } +zainod = { path = "zainod" } +config = { version = "0.15", default-features = false, features = ["toml"] } +nonempty = "0.11.0" +proptest = "~1.6" +zip32 = "0.2.1" + +# Patch for vulnerable dependency +slab = "0.4.11" +anyhow = "1.0" +arc-swap = "1.7.1" +cargo-lock = "10.1.0" +derive_more = "2.0.1" +lazy_static = "1.5.0" + +[patch.crates-io] +#zcash_client_backend = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +#zcash_address = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +#zcash_keys = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +#zcash_primitives = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +#zcash_protocol = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +#zcash_transparent = { git = "https://github.com/zcash/librustzcash", tag = "zcash_client_backend-0.21.0"} +[profile.test] +opt-level = 3 +debug = true diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..3b2ccafde --- /dev/null +++ b/Dockerfile @@ -0,0 +1,92 @@ +# syntax=docker/dockerfile:1 + +############################ +# Global build args +############################ +ARG RUST_VERSION=1.86.0 +ARG UID=1000 +ARG GID=1000 +ARG USER=container_user +ARG HOME=/home/container_user + +############################ +# Builder +############################ +FROM rust:${RUST_VERSION}-bookworm AS builder +SHELL ["/bin/bash", "-euo", "pipefail", "-c"] +WORKDIR /app + +# Toggle to build without TLS feature if needed +ARG NO_TLS=false + +# Build deps incl. protoc for prost-build +RUN apt-get update && apt-get install -y --no-install-recommends \ + pkg-config clang cmake make libssl-dev ca-certificates \ + protobuf-compiler \ + && rm -rf /var/lib/apt/lists/* + +# Copy entire workspace (prevents missing members) +COPY . . + +# Efficient caches + install to a known prefix (/out) +# This avoids relying on target/release/ paths. +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/app/target \ + if [ "${NO_TLS}" = "true" ]; then \ + cargo install --locked --path zainod --bin zainod --root /out --features no_tls_use_unencrypted_traffic; \ + else \ + cargo install --locked --path zainod --bin zainod --root /out; \ + fi + +############################ +# Runtime +############################ +FROM debian:bookworm-slim AS runtime +SHELL ["/bin/bash", "-euo", "pipefail", "-c"] + +ARG UID +ARG GID +ARG USER +ARG HOME + +# Runtime deps + setpriv for privilege dropping +RUN apt-get -qq update && \ + apt-get -qq install -y --no-install-recommends \ + ca-certificates libssl3 libgcc-s1 util-linux \ + && rm -rf /var/lib/apt/lists/* + +# Create non-root user (entrypoint will drop privileges to this user) +RUN addgroup --gid "${GID}" "${USER}" && \ + adduser --uid "${UID}" --gid "${GID}" --home "${HOME}" \ + --disabled-password --gecos "" "${USER}" + +# Make UID/GID available to entrypoint +ENV UID=${UID} GID=${GID} HOME=${HOME} + +WORKDIR ${HOME} + +# Create ergonomic mount points with symlinks to XDG defaults +# Users mount to /app/config and /app/data, zaino uses ~/.config/zaino and ~/.cache/zaino +RUN mkdir -p /app/config /app/data && \ + mkdir -p ${HOME}/.config ${HOME}/.cache && \ + ln -s /app/config ${HOME}/.config/zaino && \ + ln -s /app/data ${HOME}/.cache/zaino && \ + chown -R ${UID}:${GID} /app ${HOME}/.config ${HOME}/.cache + +# Copy binary and entrypoint +COPY --from=builder /out/bin/zainod /usr/local/bin/zainod +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +# Default ports +ARG ZAINO_GRPC_PORT=8137 +ARG ZAINO_JSON_RPC_PORT=8237 +EXPOSE ${ZAINO_GRPC_PORT} ${ZAINO_JSON_RPC_PORT} + +HEALTHCHECK --interval=30s --timeout=5s --start-period=15s --retries=3 \ + CMD /usr/local/bin/zainod --version >/dev/null 2>&1 || exit 1 + +# Start as root; entrypoint drops privileges after setting up directories +ENTRYPOINT ["/entrypoint.sh"] +CMD ["start"] diff --git a/Makefile.toml b/Makefile.toml new file mode 100644 index 000000000..e76055a94 --- /dev/null +++ b/Makefile.toml @@ -0,0 +1,605 @@ +extend = [ + { path = "makefiles/lints.toml" }, + { path = "makefiles/notify.toml" }, +] +env_files = [".env.testing-artifacts"] + +[config] +default_to_workspace = false + +[env] +IMAGE_NAME = "zingodevops/zaino-ci" +TEST_BINARIES_DIR = "/home/container_user/artifacts" + +[tasks.help] +description = "List available commands and usage notes" +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +echo "" +echo "Zaino CI Image Tasks" +echo "---------------------" +echo "" +echo "Common usage:" +echo " makers container-test" +echo "" +echo "If you modify '.env.testing-artifacts', the test command will automatically:" +echo " - Recompute the image tag" +echo " - Build a new local Docker image if needed" +echo "" +echo "Available commands:" +echo "" +echo " container-test Run integration tests using the local image" +echo " container-test-save-failures Run tests, save failures to .failed-tests" +echo " container-test-retry-failures Rerun only the previously failed tests" +echo " build-image Build the Docker image with current artifact versions" +echo " push-image Push the image (used in CI, can be used manually)" +echo " compute-image-tag Compute the tag for the Docker image based on versions" +echo " get-docker-hash Get DOCKER_DIR_HASH value (hash for the image defining files)" +echo " ensure-image-exists Check if the required image exists locally, build if not" +echo " check-matching-zebras Verify Zebra versions match between Cargo.toml and .env" +echo " validate-test-targets Check if nextest targets match CI workflow matrix" +echo " update-test-targets Update CI workflow matrix to match nextest targets" +echo " validate-makefile-tasks Run minimal validation of all maker tasks" +echo " hello-rust Test rust-script functionality" +echo "" +echo "Lint commands:" +echo " lint Run all lints (fmt, clippy, doc). Use as a pre-commit hook." +echo " fmt Check formatting (cargo fmt --all -- --check)" +echo " clippy Run Clippy with -D warnings (--all-targets --all-features)" +echo " doc Build docs (no deps, all features, document private items) with RUSTDOCFLAGS='-D warnings'" +echo " toggle-hooks Toggle the git config for core.hooksPath to use .githooks/" +echo "" +echo "Environment:" +echo " Defined by: .env.testing-artifacts" +echo " Affects: RUST_VERSION, ZCASH_VERSION, ZEBRA_VERSION" +echo "" +echo "Build Context:" +echo " test_environment/ Directory containing the Docker build environment" +echo " ├── Dockerfile Dockerfile for CI/test container" +echo " └── entrypoint.sh Entrypoint script that sets up test binaries" +echo "" +echo "Helpers:" +echo " - utils/get-ci-image-tag.sh: computes the version-based image tag" +echo " - utils/helpers.sh: logging and helper functions" +echo "" +''' + +[tasks.base-script] +script.pre = ''' +source "./utils/helpers.sh" +TAG=$(./utils/get-ci-image-tag.sh) + +# Generic cleanup function for docker containers +docker_cleanup() { + # Prevent running cleanup twice + if [ "${CLEANUP_RAN:-0}" -eq 1 ]; then + return + fi + CLEANUP_RAN=1 + + # Check if we're cleaning up due to interruption + if [ "$?" -eq 130 ] || [ "$?" -eq 143 ]; then + echo "" + warn "Task '${CARGO_MAKE_CURRENT_TASK_NAME}' interrupted! Cleaning up..." + fi + + # Kill all child processes + local pids=$(jobs -pr) + if [ -n "$pids" ]; then + kill $pids 2>/dev/null || true + fi + + # Stop any docker containers started by this script + if [ -n "${CONTAINER_ID:-}" ]; then + info "Stopping Docker container..." + docker stop "$CONTAINER_ID" >/dev/null 2>&1 || true + fi + + # Also stop by name if CONTAINER_NAME is set + if [ -n "${CONTAINER_NAME:-}" ] && [ -z "${CONTAINER_ID:-}" ]; then + info "Stopping Docker container ${CONTAINER_NAME}..." + docker stop "$CONTAINER_NAME" >/dev/null 2>&1 || true + fi +} + +# Set up cleanup trap +trap docker_cleanup EXIT INT TERM +''' +script.main = "err 'default main script. define a proper script to skip this one'" +script.post = "" + +# ------------------------------------------------------------------- + +[tasks.init-docker-volumes] +description = "Initialize Docker volume directories with proper permissions" +script = ''' +# Check if directories exist with wrong permissions +if [ -d "container-target" ] && [ ! -w "container-target" ]; then + echo "WARNING: container-target exists but is not writable by current user." + echo "You may need to run: sudo chown -R $(id -u):$(id -g) container-target" + echo "Or remove it with: sudo rm -rf container-target" +fi + +# Create directories if they don't exist +# Docker will respect the ownership if directories already exist +for dir in container-target docker_cargo/git docker_cargo/registry; do + if [ ! -d "$dir" ]; then + mkdir -p "$dir" + echo "Created directory: $dir" + fi +done + +# Set permissions to ensure they're accessible +chmod -R 755 container-target docker_cargo 2>/dev/null || true +''' + +# ------------------------------------------------------------------- + +[tasks.compute-image-tag] +description = "Compute image tag from version vars" +script = ''' +TAG=$(./utils/get-ci-image-tag.sh) +echo "CARGO_MAKE_IMAGE_TAG=$TAG" +export CARGO_MAKE_IMAGE_TAG=$TAG +''' +# ------------------------------------------------------------------- + +[tasks.get-docker-hash] +description = "Get the current DOCKER_DIR_HASH" +script_runner = "bash" +script = ''' +HASH=$(./utils/get-docker-hash.sh) +echo "DOCKER_DIR_HASH=$HASH" +''' + +[tasks.ensure-image-exists] +description = "Ensure the image exists locally or on Docker Hub before building" +# This task checks if the required Docker image exists locally or on Docker Hub. +# If not found, it triggers the build-image task to build from test_environment. +dependencies = ["init-docker-volumes"] +extend = "base-script" +script.main = ''' +if ! docker image inspect ${IMAGE_NAME}:${TAG} > /dev/null 2>&1; then + warn "Image not found locally. Attempting to pull from Docker Hub..." + if docker pull ${IMAGE_NAME}:${TAG}; then + info "Image ${IMAGE_NAME}:${TAG} pulled successfully." + else + warn "Image not found on Docker Hub. Building image..." + makers build-image + fi +else + info "Image ${IMAGE_NAME}:${TAG} already exists locally." +fi +''' + +# ------------------------------------------------------------------- + +[tasks.build-image] +description = "Build the Docker image for testing artifacts" +# Note: This task builds the Docker image from the test_environment directory, +# which contains the Dockerfile and entrypoint.sh for the CI/test environment. +# The build context is set to test_environment to keep paths simple. +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +# Create target directory with correct ownership before Docker creates it as root +mkdir -p target + +TARGET=$(resolve_build_target "$ZCASH_VERSION" "$ZEBRA_VERSION") + +# For local builds, use the current user's UID/GID to avoid permission issues +# CI builds will use the default UID=1001 from the Dockerfile + +info "Building image" +info "Tag: ${TAG}" +info "Target: $TARGET" +info "Current directory: $(pwd)" +info "Files in utils/: $(ls -la utils/ | head -5)" + +cd test_environment && \ +docker build -f Dockerfile \ + --target "$TARGET" \ + --build-arg ZCASH_VERSION=$ZCASH_VERSION \ + --build-arg ZEBRA_VERSION=$ZEBRA_VERSION \ + --build-arg RUST_VERSION=$RUST_VERSION \ + --build-arg UID=$(id -u) \ + --build-arg GID=$(id -g) \ + -t ${IMAGE_NAME}:$TAG \ + ${@} \ + . +''' + +# ------------------------------------------------------------------- + +[tasks.push-image] +description = "Push image if running in CI" +# condition = { env_set = ["CI"] } +dependencies = ["ensure-image-exists"] +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +info "Pushing image: ${IMAGE_NAME}:$TAG" + +docker push ${IMAGE_NAME}:$TAG +''' + +# ------------------------------------------------------------------- + +[tasks.container-test] +clear = true +description = "Run integration tests using the local image" +# This task runs tests inside the Docker container built from test_environment. +# The entrypoint.sh script in the container sets up test binaries (zcashd, zebrad, zcash-cli) +# by creating symlinks from /home/container_user/artifacts to the expected test_binaries/bins location. +dependencies = ["init-docker-volumes", "ensure-image-exists"] +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +info "Running tests using:" +info "-- IMAGE = ${IMAGE_NAME}" +info "-- TAG = $TAG" +# info "-- TEST_BINARIES_DIR = ${TEST_BINARIES_DIR}" + +# Create directories with correct ownership before Docker creates them as root +mkdir -p container-target docker_cargo/git docker_cargo/registry + +# Set container name for cleanup +CONTAINER_NAME="zaino-testing" + +# Run docker in foreground with proper signal handling +docker run --rm \ + --init \ + --name "$CONTAINER_NAME" \ + -v "$PWD":/home/container_user/zaino \ + -v "$PWD/container-target":/home/container_user/zaino/target \ + -v "$PWD/docker_cargo/git":/home/container_user/.cargo/git \ + -v "$PWD/docker_cargo/registry":/home/container_user/.cargo/registry \ + -e "TEST_BINARIES_DIR=${TEST_BINARIES_DIR}" \ + -e "NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1" \ + -e "CARGO_TARGET_DIR=/home/container_user/zaino/target" \ + -w /home/container_user/zaino \ + -u container_user \ + "${IMAGE_NAME}:$TAG" \ + cargo nextest run "${@}" & + +# Capture the background job PID +DOCKER_PID=$! + +# Wait for the docker process +wait $DOCKER_PID +''' +script.post = "makers notify" + +# ------------------------------------------------------------------- + +[tasks.check-matching-zebras] +description = "Check that zebra versions in .env.testing-artifacts match what's in Cargo.toml" +extend = "base-script" +script_runner = "bash" +script.main = ''' +set -euo pipefail + +# source .env.testing-artifacts + +# Normalize Cargo.toml (stripping comments, whitespace) +cargo_toml=$(sed 's/#.*//' Cargo.toml | tr -d '[:space:]') + +# Check Zebra rev +zebra_revs=$(echo "$cargo_toml" | grep -o 'zebra-[a-z]*={[^}]*rev="[^"]*"' | grep -o 'rev="[^"]*"' | cut -d'"' -f2 | sort -u) + +if [[ $(echo "$zebra_revs" | wc -l) -ne 1 ]]; then + err "❌ Multiple Zebra revs detected in Cargo.toml:" + echo "$zebra_revs" + exit 1 +fi + +actual_rev="$zebra_revs" + +# Accept short SHA from env if it matches prefix of actual +if [[ "$actual_rev" != "$ZEBRA_VERSION" && "${actual_rev:0:${#ZEBRA_VERSION}}" != "$ZEBRA_VERSION" ]]; then + err "❌ Mismatch for Zebra git rev: Cargo.toml has $actual_rev, but .env.testing-artifacts has $ZEBRA_VERSION" + exit 1 +fi + +info "✅ All versions match between Cargo.toml and .env.testing-artifacts" +''' + +# ------------------------------------------------------------------- + +[tasks.validate-makefile-tasks] +description = "Validate all tasks work correctly with minimal execution" +dependencies = ["init-docker-volumes"] +script_runner = "@rust" +script = ''' +use std::process::{Command, Stdio}; +use std::env; + +fn main() -> Result<(), Box> { + println!("🔍 Starting validation of all Makefile tasks..."); + + // 1. Check version matching + println!("\nStep 1: Checking version consistency..."); + run_makers_task("check-matching-zebras")?; + + // 2. Compute the image tag + println!("\nStep 2: Computing image tag..."); + run_makers_task("compute-image-tag")?; + + // 3. Ensure image exists (will build if necessary) + println!("\nStep 3: Ensuring Docker image exists..."); + run_makers_task("ensure-image-exists")?; + + // 4. Get the computed tag + let tag = get_image_tag()?; + let image_name = env::var("IMAGE_NAME").unwrap_or_else(|_| "zingodevops/zaino-ci".to_string()); + let working_dir = env::current_dir()?.to_string_lossy().to_string(); + + // 5. Run a single fast test to validate the full pipeline + println!("\nStep 4: Running minimal test to validate setup..."); + println!("Using image: {}:{}", image_name, tag); + + let status = Command::new("docker") + .args(&[ + "run", "--rm", + "--init", + "--name", "zaino-validation-test", + "-v", &format!("{}:/home/container_user/zaino", working_dir), + "-v", &format!("{}/container-target:/home/container_user/zaino/target", working_dir), + "-v", &format!("{}/docker_cargo/git:/home/container_user/.cargo/git", working_dir), + "-v", &format!("{}/docker_cargo/registry:/home/container_user/.cargo/registry", working_dir), + "-e", "TEST_BINARIES_DIR=/home/container_user/zaino/test_binaries/bins", + "-e", "CARGO_TARGET_DIR=/home/container_user/zaino/target", + "-w", "/home/container_user/zaino", + "-u", "container_user", + &format!("{}:{}", image_name, tag), + "cargo", "test", + "--package", "zaino-testutils", + "--lib", "launch_testmanager::zcashd::basic", + "--", "--nocapture" + ]) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status()?; + + if !status.success() { + eprintln!("❌ Validation failed!"); + std::process::exit(1); + } + + println!("\n✅ All tasks validated successfully!"); + Ok(()) +} + +fn run_makers_task(task: &str) -> Result<(), Box> { + println!("DEBUG: About to run makers {}", task); + let status = Command::new("makers") + .arg(task) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status()?; + + println!("DEBUG: makers {} completed with status: {:?}", task, status); + if !status.success() { + return Err(format!("Task '{}' failed", task).into()); + } + Ok(()) +} + +fn get_image_tag() -> Result> { + println!("DEBUG: Getting image tag..."); + // First try to get from environment + if let Ok(tag) = env::var("CARGO_MAKE_IMAGE_TAG") { + if !tag.is_empty() { + println!("DEBUG: Found tag in env: {}", tag); + return Ok(tag); + } + } + + println!("DEBUG: Computing tag with script..."); + // Otherwise compute it + let output = Command::new("./utils/get-ci-image-tag.sh") + .output()?; + + if !output.status.success() { + return Err("Failed to compute image tag".into()); + } + + let tag = String::from_utf8(output.stdout)?.trim().to_string(); + println!("DEBUG: Computed tag: {}", tag); + Ok(tag) +} +''' + +# ------------------------------------------------------------------- + +[tasks.validate-test-targets] +description = "Validate that nextest targets match CI workflow matrix" +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +info "🔍 Validating test targets between nextest and CI workflow..." + +# Extract nextest targets with non-empty testcases +info "Extracting targets from nextest..." +NEXTEST_TARGETS=$(mktemp) +cargo nextest list --profile ci -T json-pretty | jq -r '.["rust-suites"] | to_entries[] | select(.value.testcases | length > 0) | .key' | sort > "$NEXTEST_TARGETS" + +# Extract CI matrix partition values +info "Extracting targets from CI workflow..." +CI_TARGETS=$(mktemp) +yq '.jobs.test.strategy.matrix.partition[]' .github/workflows/ci.yml | sed 's/"//g' | sort > "$CI_TARGETS" + +# Compare the lists +info "Comparing target lists..." + +MISSING_IN_CI=$(mktemp) +EXTRA_IN_CI=$(mktemp) + +# Find targets in nextest but not in CI +comm -23 "$NEXTEST_TARGETS" "$CI_TARGETS" > "$MISSING_IN_CI" + +# Find targets in CI but not in nextest (or with empty testcases) +comm -13 "$NEXTEST_TARGETS" "$CI_TARGETS" > "$EXTRA_IN_CI" + +# Display results +if [[ ! -s "$MISSING_IN_CI" && ! -s "$EXTRA_IN_CI" ]]; then + info "✅ All test targets are synchronized!" + echo "Nextest targets ($(wc -l < "$NEXTEST_TARGETS")):" + cat "$NEXTEST_TARGETS" | sed 's/^/ - /' +else + warn "❌ Test target synchronization issues found:" + + if [[ -s "$MISSING_IN_CI" ]]; then + echo "" + warn "📋 Targets with tests missing from CI matrix ($(wc -l < "$MISSING_IN_CI")):" + cat "$MISSING_IN_CI" | sed 's/^/ - /' + fi + + if [[ -s "$EXTRA_IN_CI" ]]; then + echo "" + warn "🗑️ Targets in CI matrix with no tests ($(wc -l < "$EXTRA_IN_CI")):" + cat "$EXTRA_IN_CI" | sed 's/^/ - /' + fi + + echo "" + info "💡 To automatically update the CI workflow, run:" + info " makers update-test-targets" +fi + +# Cleanup temp files +rm "$NEXTEST_TARGETS" "$CI_TARGETS" "$MISSING_IN_CI" "$EXTRA_IN_CI" +''' + +# ------------------------------------------------------------------- + +[tasks.update-test-targets] +description = "Update CI workflow matrix to match nextest targets" +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +info "🔧 Updating CI workflow matrix to match nextest targets..." + +# Extract nextest targets with non-empty testcases +info "Extracting current nextest targets..." +NEXTEST_TARGETS=$(mktemp) +cargo nextest list --profile ci -T json-pretty | jq -r '.["rust-suites"] | to_entries[] | select(.value.testcases | length > 0) | .key' | sort > "$NEXTEST_TARGETS" + +echo "Found $(wc -l < "$NEXTEST_TARGETS") targets with tests:" +cat "$NEXTEST_TARGETS" | sed 's/^/ - /' + +# Update only the partition array using sed to preserve formatting +# First, create the new partition list in the exact format we need +NEW_PARTITION_LINES=$(mktemp) +while IFS= read -r target; do + echo " - \"${target}\"" +done < "$NEXTEST_TARGETS" > "$NEW_PARTITION_LINES" + +# Use sed to replace just the partition array section +# Find the partition: line and replace everything until the next non-indented item +sed -i '/^[[:space:]]*partition:/,/^[[:space:]]*[^[:space:]-]/{ + /^[[:space:]]*partition:/!{ + /^[[:space:]]*[^[:space:]-]/!d + } +}' .github/workflows/ci.yml + +# Now insert the new partition lines after the "partition:" line +sed -i "/^[[:space:]]*partition:$/r $NEW_PARTITION_LINES" .github/workflows/ci.yml + +rm "$NEW_PARTITION_LINES" + +info "✅ CI workflow updated successfully!" + +# Show what was changed using git diff +echo "" +info "Changes made:" +git diff --no-index /dev/null .github/workflows/ci.yml 2>/dev/null | grep "^[+-].*partition\|^[+-].*integration-tests\|^[+-].*zaino\|^[+-].*zainod" || git diff .github/workflows/ci.yml || echo "No changes detected" + +# Cleanup temp files +rm "$NEXTEST_TARGETS" +''' + +# ------------------------------------------------------------------- + +[tasks.container-test-save-failures] +description = "Run container-test and save failed test names for later retry" +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -uo pipefail + +FAILURES_FILE=".failed-tests" + +info "Running container-test with failure tracking..." + +# Run container-test with libtest-json, tee output for parsing +makers container-test --no-fail-fast --message-format libtest-json "${@}" 2>&1 | tee /tmp/nextest-output.json || true + +# Extract failed test names +grep '"event":"failed"' /tmp/nextest-output.json 2>/dev/null | \ + jq -r '.name // empty' | sort -u > "$FAILURES_FILE" + +FAIL_COUNT=$(wc -l < "$FAILURES_FILE" | tr -d ' ') + +if [[ "$FAIL_COUNT" -gt 0 ]]; then + warn "💾 Saved $FAIL_COUNT failed test(s) to $FAILURES_FILE" + cat "$FAILURES_FILE" + echo "" + info "Run 'makers container-test-retry-failures' to rerun them" +else + info "✅ All tests passed!" + rm -f "$FAILURES_FILE" +fi +''' +script.post = "makers notify" + +# ------------------------------------------------------------------- + +[tasks.container-test-retry-failures] +description = "Rerun only failed tests from previous container-test-save-failures" +script_runner = "bash" +extend = "base-script" +script.main = ''' +set -euo pipefail + +FAILURES_FILE=".failed-tests" + +if [[ ! -f "$FAILURES_FILE" ]]; then + err "No $FAILURES_FILE found. Run 'makers container-test-save-failures' first." + exit 1 +fi + +FAIL_COUNT=$(wc -l < "$FAILURES_FILE" | tr -d ' ') +if [[ "$FAIL_COUNT" -eq 0 ]]; then + info "No failed tests to retry!" + exit 0 +fi + +info "Retrying $FAIL_COUNT failed test(s)..." + +# Build filter from libtest-json format: "package::binary$testname" +# Output format: (package(P) & binary(B) & test(=T)) | ... +FILTER=$(while IFS= read -r line; do + pkg="${line%%::*}" + rest="${line#*::}" + bin="${rest%%\$*}" + test="${rest#*\$}" + echo "(package($pkg) & binary($bin) & test(=$test))" +done < "$FAILURES_FILE" | tr '\n' '|' | sed 's/|$//; s/|/ | /g') + +info "Filter: $FILTER" +makers container-test -E "$FILTER" "${@}" +''' +script.post = "makers notify" diff --git a/README.md b/README.md index e0fd97587..b43daad19 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,17 @@ # Zaino Zaino is an indexer for the Zcash blockchain implemented in Rust. -Zaino provides all necessary functionality for "light" clients (wallets and other applications that don't rely on the complete history of blockchain) and "full" clients / wallets and block explorers providing access both the finalized chain and the non-finalized best chain and mempool held by either a Zebra or Zcashd full validator. +Zaino provides all necessary functionality for "light" clients (wallets and other applications that don't rely on the complete history of blockchain) and "full" clients / wallets and block explorers providing access to both the finalized chain and the non-finalized best chain and mempool held by either a Zebra or Zcashd full validator. ### Motivations With the ongoing Zcashd deprecation project, there is a push to transition to a modern, Rust-based software stack for the Zcash ecosystem. By implementing Zaino in Rust, we aim to modernize the codebase, enhance performance and improve overall security. This work will build on the foundations laid down by [Librustzcash](https://github.com/zcash/librustzcash) and [Zebra](https://github.com/ZcashFoundation/zebra), helping to ensure that the Zcash infrastructure remains robust and maintainable for the future. -Due to current potential data leaks / security weaknesses highlighted in [revised-nym-for-zcash-network-level-privacy](https://forum.zcashcommunity.com/t/revised-nym-for-zcash-network-level-privacy/46688) and [wallet-threat-model](https://zcash.readthedocs.io/en/master/rtd_pages/wallet_threat_model.html), there is a need to use anonymous transport protocols (such as Nym or Tor) to obfuscate clients' identities from Zcash's indexing servers ([Lightwalletd](https://github.com/zcash/lightwalletd), [Zcashd](https://github.com/zcash/zcash), Zaino). As Nym has chosen Rust as their primary SDK ([Nym-SDK](https://github.com/nymtech/nym)), and Tor is currently implementing Rust support ([Arti](https://gitlab.torproject.org/tpo/core/arti)), Rust is a straight-forward and well-suited choice for this software. +Due to current potential data leaks / security weaknesses highlighted in [revised-nym-for-zcash-network-level-privacy](https://forum.zcashcommunity.com/t/revised-nym-for-zcash-network-level-privacy/46688) and [wallet-threat-model](https://zcash.readthedocs.io/en/master/rtd_pages/wallet_threat_model.html), there is a need to use anonymous transport protocols (such as Nym or Tor) to obfuscate clients' identities from Zcash's indexing servers ([Lightwalletd](https://github.com/zcash/lightwalletd), [Zcashd](https://github.com/zcash/zcash), Zaino). As Nym has chosen Rust as their primary SDK ([Nym-SDK](https://github.com/nymtech/nym)), and Tor is currently implementing Rust support ([Arti](https://gitlab.torproject.org/tpo/core/arti)), Rust is a straightforward and well-suited choice for this software. Zebra has been designed to allow direct read access to the finalized state and RPC access to the non-finalized state through its ReadStateService. Integrating directly with this service enables efficient access to chain data and allows new indices to be offered with minimal development. -Separation of validation and indexing functionality serves several purposes. First, by removing indexing functionality from the Validator (Zebra) will lead to a smaller and more maintainable codebase. Second, by moving all indexing functionality away from Zebra into Zaino will unify this paradigm and simplify Zcash's security model. Separating this concerns (consensus node and blockchain indexing) serves to create a clear trust boundary between the Indexer and Validator allowing the Indexer to take on this responsibility. Historically, this had been the case for "light" clients/wallets using [Lightwalletd](https://github.com/zcash/lightwalletd) as opposed to "full-node" client/wallets and block explorers that were directly served by the [Zcashd full node](https://github.com/zcash/zcash). +Separation of validation and indexing functionality serves several purposes. First, by removing indexing functionality from the Validator (Zebra) will lead to a smaller and more maintainable codebase. Second, by moving all indexing functionality away from Zebra into Zaino will unify this paradigm and simplify Zcash's security model. Separating these concerns (consensus node and blockchain indexing) serves to create a clear trust boundary between the Indexer and Validator allowing the Indexer to take on this responsibility. Historically, this had been the case for "light" clients/wallets using [Lightwalletd](https://github.com/zcash/lightwalletd) as opposed to "full-node" client/wallets and block explorers that were directly served by the [Zcashd full node](https://github.com/zcash/zcash). ### Goals @@ -29,18 +29,19 @@ Currently Zebra's `ReadStateService` only enables direct access to chain data (b ## Documentation - [Use Cases](./docs/use_cases.md): Holds instructions and example use cases. -- [Testing](./docs/testing.md): Hold intructions fo running tests. -- [Live Service System Architecture](./docs/live_system_architecture.pdf): Holds the Zcash system architecture diagram for the Zaino live service. -- [Library System Architecture](./docs/lib_system_architecture.pdf): Holds the Zcash system architecture diagram for the Zaino client library. -- [Internal Architecture](./docs/internal_architecture.pdf): Holds an internal Zaino system architecture diagram. -- [Internal Specification](./docs/internal_spec.md): Holds a specification for Zaino and its crates, detailing and their functionality, interfaces and dependencies. +- [Testing](./docs/testing.md): Holds instructions for running tests. +- [Live Service System Architecture](./docs/zaino_live_system_architecture.pdf): Holds the Zcash system architecture diagram for the Zaino live service. +- [Library System Architecture](./docs/zaino_lib_system_architecture.pdf): Holds the Zcash system architecture diagram for the Zaino client library. +- [ZainoD (Live Service) Internal Architecture](./docs/zaino_serve_architecture_v020.pdf): Holds an internal Zaino system architecture diagram. +- [Zaino-State (Library) Internal Architecture](./docs/zaino_state_architecture_v020.pdf): Holds an internal Zaino system architecture diagram. +- [Internal Specification](./docs/internal_spec.md): Holds a specification for Zaino and its crates, detailing their functionality, interfaces and dependencies. - [RPC API Spec](./docs/rpc_api.md): Holds a full specification of all of the RPC services served by Zaino. - [Cargo Docs](https://zingolabs.github.io/zaino/): Holds a full code specification for Zaino. ## Security Vulnerability Disclosure -If you believe you have discovered a security issue, please contact us at: - +If you believe you have discovered a security issue, and it is time sensitive, please contact us online on Matrix. See our [CONTRIBUTING.md document](./CONTRIBUTING.md) for contact points. +Otherwise you can send an email to: zingodisclosure@proton.me diff --git a/ci/copy-bins.sh b/ci/copy-bins.sh new file mode 100755 index 000000000..d70d2c6ab --- /dev/null +++ b/ci/copy-bins.sh @@ -0,0 +1,4 @@ +docker create --name my_zaino_container zingodevops/zaino-ci:latest +docker cp my_zaino_container:/usr/local/bin ./test_binaries/ +mv ./test_binaries/bin ./test_binaries/bins +docker rm my_zaino_container diff --git a/docker_cargo/.gitkeep b/docker_cargo/.gitkeep new file mode 100755 index 000000000..e69de29bb diff --git a/docker_cargo/git/.gitkeep b/docker_cargo/git/.gitkeep new file mode 100755 index 000000000..e69de29bb diff --git a/docker_cargo/registry/.gitkeep b/docker_cargo/registry/.gitkeep new file mode 100755 index 000000000..e69de29bb diff --git a/docs/Zaino-zcash-rpcs.pdf b/docs/Zaino-zcash-rpcs.pdf new file mode 100644 index 000000000..672dd5ca0 Binary files /dev/null and b/docs/Zaino-zcash-rpcs.pdf differ diff --git a/docs/docker.md b/docs/docker.md new file mode 100644 index 000000000..13adae542 --- /dev/null +++ b/docs/docker.md @@ -0,0 +1,136 @@ +# Docker Usage + +This document covers running Zaino using the official Docker image. + +## Overview + +The Docker image runs `zainod` - the Zaino indexer daemon. The image: + +- Uses `zainod` as the entrypoint with `start` as the default subcommand +- Runs as non-root user (`container_user`, UID 1000) after initial setup +- Handles volume permissions automatically for default paths + +For CLI usage details, see the CLI documentation or run `docker run --rm zaino --help`. + +## Configuration Options + +The container can be configured via: + +1. **Environment variables only** - Suitable for simple deployments, but sensitive fields (passwords, secrets, tokens, cookies, private keys) cannot be set via env vars for security reasons +2. **Config file + env vars** - Mount a config file for sensitive fields, override others with env vars +3. **Config file only** - Mount a complete config file + +For data persistence, volume mounts are recommended for the database/cache directory. + +## Deployment with Docker Compose + +The recommended way to run Zaino is with Docker Compose, typically alongside Zebra: + +```yaml +services: + zaino: + image: zaino:latest + ports: + - "8137:8137" # gRPC + - "8237:8237" # JSON-RPC (if enabled) + volumes: + - ./config:/app/config:ro + - zaino-data:/app/data + environment: + - ZAINO_VALIDATOR_SETTINGS__VALIDATOR_JSONRPC_LISTEN_ADDRESS=zebra:18232 + depends_on: + - zebra + + zebra: + image: zfnd/zebra:latest + volumes: + - zebra-data:/home/zebra/.cache/zebra + # ... zebra configuration + +volumes: + zaino-data: + zebra-data: +``` + +If Zebra runs on a different host/network, adjust `VALIDATOR_JSONRPC_LISTEN_ADDRESS` accordingly. + +## Initial Setup: Generating Configuration + +To generate a config file on your host for customization: + +```bash +mkdir -p ./config + +docker run --rm -v ./config:/app/config zaino generate-config + +# Config is now at ./config/zainod.toml - edit as needed +``` + +## Container Paths + +The container provides simple mount points: + +| Purpose | Mount Point | +|---------|-------------| +| Config | `/app/config` | +| Database | `/app/data` | + +These are symlinked internally to the XDG paths that Zaino expects. + +## Volume Permission Handling + +The entrypoint handles permissions automatically: + +1. Container starts as root +2. Creates directories and sets ownership to UID 1000 +3. Drops privileges and runs `zainod` + +This means you can mount volumes without pre-configuring ownership. + +### Read-Only Config Mounts + +Config files can (and should) be mounted read-only: + +```yaml +volumes: + - ./config:/app/config:ro +``` + +## Configuration via Environment Variables + +Config values can be set via environment variables prefixed with `ZAINO_`, using `__` for nesting: + +```yaml +environment: + - ZAINO_NETWORK=Mainnet + - ZAINO_VALIDATOR_SETTINGS__VALIDATOR_JSONRPC_LISTEN_ADDRESS=zebra:18232 + - ZAINO_GRPC_SETTINGS__LISTEN_ADDRESS=0.0.0.0:8137 +``` + +### Sensitive Fields + +For security, the following fields **cannot** be set via environment variables and must use a config file: + +- `*_password` (e.g., `validator_password`) +- `*_secret` +- `*_token` +- `*_cookie` +- `*_private_key` + +If you attempt to set these via env vars, Zaino will error on startup. + +## Health Check + +The image includes a health check: + +```bash +docker inspect --format='{{.State.Health.Status}}' +``` + +## Local Testing + +Permission handling can be tested locally: + +```bash +./test_environment/test-docker-permissions.sh zaino:latest +``` diff --git a/docs/internal_architecture.pdf b/docs/internal_architecture.pdf deleted file mode 100644 index c227f3525..000000000 Binary files a/docs/internal_architecture.pdf and /dev/null differ diff --git a/docs/internal_spec.md b/docs/internal_spec.md index 9737cff72..7818ee138 100644 --- a/docs/internal_spec.md +++ b/docs/internal_spec.md @@ -1,25 +1,94 @@ # Zaino The Zaino repo consists of several crates that collectively provide an indexing service and APIs for the Zcash blockchain. The crates are modularized to separate concerns, enhance maintainability, and allow for flexible integration. -The main crates are: +### Crates - `Zainod` - `Zaino-Serve` - `Zaino-State` - `Zaino-Fetch` - `Zaino-Proto` + - `Zaino-Testutils` + - `Integration-tests` ### Workspace Dependencies - - `zcash_protocol` - - `zebra-chain` - - `zebra-rpc` - - `tokio` - - `tonic` - - `http` - - `thiserror` - +**Zingo Labs:** +- zingolib +- testvectors + +**Librustzcash:** +- zcash_client_backend +- zcash_protocol + + +**Zebra:** +- zebra-chain +- zebra-state +- zebra-rpc + + +**Zingo-infra-services:** +- zingo-infra-services + +**Runtime:** +- tokio +- tokio-stream + +**CLI:** +- clap + +**Tracing:** +- tracing +- tracing-subscriber +- tracing-futures + +**Network / RPC:** +- http +- url +- reqwest +- tower +- tonic +- tonic-build +- prost +- serde +- serde_json +- jsonrpsee-core +- jsonrpsee-types + +**Hashmaps, channels, DBs:** +- indexmap +- crossbeam-channel +- dashmap +- lmdb + +**Async:** +- async-stream +- async-trait +- futures + +**Utility:** +- thiserror +- lazy-regex +- once_cell +- ctrlc +- chrono +- which +- whoami + +**Formats:** +- base64 +- byteorder +- sha2 +- hex +- toml + +**Test:** +- portpicker +- tempfile + + Below is a detailed specification for each crate. -A full specification of the public functionality and RPC services available in Zaino is availabe in [Cargo Docs](https://zingolabs.github.io/zaino/index.html) and [RPC API Spec](./rpc_api.md). +A full specification of the public functionality and RPC services available in Zaino is available in [Cargo Docs](https://zingolabs.github.io/zaino/index.html) and [RPC API Spec](./rpc_api.md). ## ZainoD @@ -30,29 +99,11 @@ A full specification of the public functionality and RPC services available in Z - Parses command-line arguments and configuration files. - Initializes the gRPC server and internal caching systems using components from `zaino-serve` and `zaino-state` (backed by `zaino-fetch`). - Sets up logging and monitoring systems. - + - Runtime Management: - Manages the asynchronous runtime using `Tokio`. - Handles graceful shutdowns and restarts. -### Interfaces -- Executable Interface: - - Provides a CLI for configuring the service. (Currently it is only possible to set the conf file path.) - -- Configuration Files: - - Supports TOML files for complex configurations. - -### Dependencies - - `zaino-fetch` - - `zaino-serve` - - `tokio` - - `http` - - `thiserror` - - `serde` - - `ctrlc` - - `toml` - - `clap` - Full documentation for `ZainoD` can be found [here](https://zingolabs.github.io/zaino/zainod/index.html) and [here](https://zingolabs.github.io/zaino/zainodlib/index.html). @@ -62,8 +113,6 @@ Full documentation for `ZainoD` can be found [here](https://zingolabs.github.io/ ### Functionality - gRPC Server Implementation: - Utilizes `Tonic` to implement the gRPC server. - - Uses a `Director-Ingestor-Worker` model (see [Internal Architecture](./internal_architecture.pdf)) to allow the addition of Nym or Tor based `Ingestors`. - - Dynamically manages the internal Worker pool and Request queue and active Ingestors, handling errors and restarting services where necessary. - Hosts the `CompactTxStreamerServer` service for client interactions. - `CompactTxStreamerServer` Method Implementations: @@ -71,42 +120,17 @@ Full documentation for `ZainoD` can be found [here](https://zingolabs.github.io/ - Request Handling: - Validates and parses client requests. - - Communicates with `zaino-state` or `zaino-fetch` to retrieve data. + - Communicates with `zaino-state` to retrieve data. - Error Handling: - Maps internal errors to appropriate gRPC status codes. - Provides meaningful error messages to clients. -### Interfaces -- Public gRPC API: - - Defined in `zaino-proto` and exposed to clients. - -- Internal Library: - - The `server::director` module provides the following gRPC server management functions: `ServerStatus::new`, `ServerStatus::load`, `Server::spawn`, `Server::serve`, `Server::check_for_shutdown`, `Server::shutdown`, `Server::status`, `Server::statustype`, `Server::statuses`, `Server::check_statuses`. - -### Dependencies - - `zaino-proto` - - `zaino-fetch` - - `zebra-chain` - - `zebra-rpc` - - `tokio` - - `tonic` - - `http` - - `thiserror` - - `prost` - - `hex` - - `tokio-stream` - - `futures` - - `async-stream` - - `crossbeam-channel` - - `lazy-regex` - - `whoami` - Full documentation for `Zaino-Serve` can be found [here](https://zingolabs.github.io/zaino/zaino_serve/index.html). ## Zaino-State -`Zaino-State` is a library that provides access to the mempool and blockchain data by interfacing directly with `Zebra`'s `ReadStateService`. It is designed for direct consumption by full node wallets and internal services. (Currently unimplemented.) +`Zaino-State` is Zaino's chain fetch and transaction submission library, interfacing with zcash validators throught a configurable backend. It is designed for direct consumption by full node wallets and internal services, enabling a simlified interface for Zcash clients. ### Functionality - Blockchain Data Access: @@ -122,27 +146,17 @@ Full documentation for `Zaino-Serve` can be found [here](https://zingolabs.githu - Keeps track of the chain state in sync with Zebra. - Handles reorgs and updates to the best chain. -Caching Mechanisms: +- Caching Mechanisms: - Implements caching for frequently accessed data to improve performance. -### Interfaces -- Public Library API: - - Provides data retrieval and submission functions that directly correspond to the RPC services offered by `zaino-serve`. - - Provides asynchronous interfaces compatible with `Tokio`. - -- Event Streams: - - Offers highly concurrent, lock-free streams or channels to subscribe to blockchain events. - -### Dependencies - - `zebra-state` - - `tokio` - - `thiserror` +- Configurable Backend: + - Implementes a configurable backend service enabling clients to use a single interface for any validator set-up. Full documentation for `Zaino-State` can be found [here](https://zingolabs.github.io/zaino/zaino_state/index.html). ## Zaino-Fetch -`Zaino-Fetch` is a library that provides access to the mempool and blockchain data using Zebra's RPC interface. It is primarily used as a backup and for backward compatibility with systems that rely on RPC communication such as `Zcashd`. +`Zaino-Fetch` is a library that provides access to the mempool and blockchain data using Zcash's JsonRPC interface. It is primarily used as a backup and for backward compatibility with systems that rely on RPC communication such as `Zcashd`. ### Functionality - RPC Client Implementation: @@ -153,6 +167,9 @@ Full documentation for `Zaino-State` can be found [here](https://zingolabs.githu - Fetches blocks, transactions, and mempool data via RPC. - Sends transactions to the network using the `sendrawtransaction` RPC method. +- Block and Transaction Deserialisation logic: + - Provides Block and transaction deserialisation implementaions. + - Mempool and CompactFormat access: - Provides a simple mempool implementation for use in gRPC service implementations. (This is due to be refactored and possibly moved with the development of `Zaino-State`.) - Provides parse implementations for converting "full" blocks and transactions to "compact" blocks and transactions. @@ -160,35 +177,6 @@ Full documentation for `Zaino-State` can be found [here](https://zingolabs.githu - Fallback Mechanism: - Acts as a backup when direct access via `zaino-state` is unavailable. -### Interfaces -- Internal API: - - The `jsonrpc::connector` module provides the following JSON-RPC client management functions: `new`, `uri`, `url`, `test_node_and_return_uri`. - - The `jsonrpc::connector` module provides the following data retrieval and submission functions: `get_info`, `get_blockchain_info`, `get_address_balance`, `send_raw_transaction`, `get_block`, `get_raw_mempool`, `get_treestate`, `get_subtrees_by_index`, `get_raw_transaction`, `get_address_txids`, `get_address_utxos`. (This may be expanded to match the set of Zcash RPC's that Zaino is taking over from Zcashd.) - - The `chain::block` module provides the following block parsing and fetching functions: `get_block_from_node`, `get_nullifiers_from_node`, `FullBlock::parse_from_hex`, `FullBlock::to_compact`, FullBlock::header, FullBlock::transactions, FullBlock::Height, FullBlockHeader::version, FullBlockHeader::hash_prev_block, FullBlockHeader::hash_merkle_root, FullBlockHeader::time, FullBlockHeader::n_bits_bytes, FullBlockHeader::nonce, FullBlockHeader::solution, FullBlockHeader::cached_hash. - The `chain::transaction` module provides the following transaction parsing and fetching functions: `FullTransaction::f_overwintered`, `FullTransaction::version`, `FullTransaction::n_version_group_id`, `FullTransaction::consensus_branch_id`, `FullTransaction::transparent_inputs`, `FullTransaction::transparent_outputs`, `FullTransaction::shielded_spends`, `FullTransaction::shielded_outputs`, `FullTransaction::join_splits`, `FullTransaction::orchard_actions`, `FullTransaction::raw_bytes`, `FullTransaction::tx_id`, `FullTransaction::to_compact`. - - The `chain::mempool` module provides the following mempool management and fetching functions: `new`, `update`, `get_mempool_txids`, `get_filtered_mempool_txids`, `get_best_block_hash`. (This is due to be refactored and possibly moved with the development of `Zaino-State`.) - - Designed to be used by `zaino-serve` transparently. - -### Dependencies - - `zaino-proto` - - `zcash_protocol` - - `zebra-chain` - - `zebra-rpc` - - `tokio` - - `tonic` - - `http` - - `thiserror` - - `prost` - - `reqwest` - - `url` - - `serde_json` - - `serde` - - `hex` - - `indexmap` - - `base64` - - `byteorder` - - `sha2` - Full documentation for `Zaino-Fetch` can be found [here](https://zingolabs.github.io/zaino/zaino_fetch/index.html). @@ -204,17 +192,6 @@ Full documentation for `Zaino-Fetch` can be found [here](https://zingolabs.githu - Uses `prost` to generate Rust types from `.proto` files. - Generates client and server stubs for gRPC services. -### Interfaces -- Generated Code: - - Provides Rust modules that can be imported by other crates. - - Exposes types and traits required for implementing gRPC services. - -### Dependencies - - `tonic` - - `prost` - - `tonic-build` - - `which` - * We plan to eventually rely on `LibRustZcash`'s versions but hold our own here for development purposes. @@ -224,22 +201,7 @@ The `Zaino-Testutils` and `Integration-Tests` crates are dedicated to testing th - `Integration-Tests`: This crate houses integration tests that validate the interaction between different Zaino components and external services like `Zebra` and `Zingolib`. ### Test Modules -- `integrations`: Holds Wallet-to-Validator tests that test Zaino's functionality within the compete software stack. +- `wallet_to_validator`: Holds Wallet-to-Validator tests that test Zaino's functionality within the compete software stack. - `client_rpcs`: Holds RPC tests that test the functionality of the LightWallet gRPC services in Zaino and compares the outputs with the corresponding services in `Lightwalletd` to ensure compatibility. -### Dependencies - - `zaino-fetch` - - `zainod` - - `zingolib` - - `zaino-testutils` - - `zcash_local_net` - - `tokio` - - `tonic` - - `http` - - `ctrlc` - - `tempfile` - - `portpicker` - - `tracing-subscriber` - - `once_cell` - Full documentation for `Zaino-Testutils` can be found [here](https://zingolabs.github.io/zaino/zaino_testutils/index.html). diff --git a/docs/lib_system_architecture.pdf b/docs/lib_system_architecture.pdf deleted file mode 100644 index 767345ebb..000000000 Binary files a/docs/lib_system_architecture.pdf and /dev/null differ diff --git a/docs/live_system_architecture.pdf b/docs/live_system_architecture.pdf deleted file mode 100644 index 592d9174c..000000000 Binary files a/docs/live_system_architecture.pdf and /dev/null differ diff --git a/docs/rpc_api.md b/docs/rpc_api.md index 59f173d6c..1a731448f 100644 --- a/docs/rpc_api.md +++ b/docs/rpc_api.md @@ -1,9 +1,9 @@ # Zaino RPC APIs -Zaino's Final RPC API is currently unfinalised and will be completed in this [GitHub Issue](https://github.com/zingolabs/zaino/issues/69). +## Lightwallet gRPC Services +Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/service.proto): +(gRPC service (function) arguments and return values are defined [here](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/compact_formats.proto)) -## Lightwallet gRPC Services -Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/librustzcash/blob/main/zcash_client_backend/proto/service.proto): - GetLatestBlock (ChainSpec) returns (BlockID) - GetBlock (BlockID) returns (CompactBlock) - GetBlockNullifiers (BlockID) returns (CompactBlock) @@ -11,7 +11,7 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetBlockRangeNullifiers (BlockRange) returns (stream CompactBlock) - GetTransaction (TxFilter) returns (RawTransaction) - SendTransaction (RawTransaction) returns (SendResponse) - - GetTaddressTxids (TransparentAddressBlockFilter) returns (stream RawTransaction) + - GetTaddressTransactions (TransparentAddressBlockFilter) returns (stream RawTransaction) - GetTaddressBalance (AddressList) returns (Balance) - GetTaddressBalanceStream (stream Address) returns (Balance) (**MARKED FOR DEPRECATION**) - GetMempoolTx (Exclude) returns (stream CompactTx) @@ -22,41 +22,8 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetAddressUtxos (GetAddressUtxosArg) returns (GetAddressUtxosReplyList) - GetAddressUtxosStream (GetAddressUtxosArg) returns (stream GetAddressUtxosReply) - GetLightdInfo (Empty) returns (LightdInfo) - - Ping (Duration) returns (PingResponse) (**CURRENTLY UNIMPLEMENTED**) - ## Zcash RPC Services -Zaino has commited to taking over responsibility for serving the following [Zcash RPCs](https://zcash.github.io/rpc/) from Zcashd: - - [getaddressbalance](https://zcash.github.io/rpc/getaddressbalance.html) - - [getaddressdeltas](https://zcash.github.io/rpc/getaddressdeltas.html) - - [getaddressmempool](https://zcash.github.io/rpc/getaddressmempool.html) (**MARKED FOR DEPRECATION**) - - [getaddresstxids](https://zcash.github.io/rpc/getaddresstxids.html) - - [getaddressutxos](https://zcash.github.io/rpc/getaddressutxos.html) - - [getbestblockhash](https://zcash.github.io/rpc/getbestblockhash.html) (**LOW PRIORITY. MARKED FOR POSSIBLE DEPRECATION**) - - [getblock](https://zcash.github.io/rpc/getblock.html) - - [getblockchaininfo](https://zcash.github.io/rpc/getblockchaininfo.html) - - [getblockcount](https://zcash.github.io/rpc/getblockcount.html) - - [getblockdeltas](https://zcash.github.io/rpc/getblockdeltas.html) - - [getblockhash](https://zcash.github.io/rpc/getblockhash.html) - - [getblockhashes](https://zcash.github.io/rpc/getblockhashes.html) - - [getblockheader](https://zcash.github.io/rpc/getblockheader.html) - - [getchaintips](https://zcash.github.io/rpc/getchaintips.html) - - [getdifficulty](https://zcash.github.io/rpc/getdifficulty.html) - - [getmempoolinfo](https://zcash.github.io/rpc/getmempoolinfo.html) - - [getrawmempool](https://zcash.github.io/rpc/getrawmempool.html) - - [getspentinfo](https://zcash.github.io/rpc/getspentinfo.html) - - [gettxout](https://zcash.github.io/rpc/gettxout.html) - - [gettxoutproof](https://zcash.github.io/rpc/gettxoutproof.html) (**LOW PRIORITY. MARKED FOR POSSIBLE DEPRECATION**) - - [gettxoutsetinfo](https://zcash.github.io/rpc/gettxoutsetinfo.html) - - [verifytxoutproof](https://zcash.github.io/rpc/verifytxoutproof.html)(**LOW PRIORITY. MARKED FOR POSSIBLE DEPRECATION**) - - [z_gettreestate](https://zcash.github.io/rpc/z_gettreestate.html) - -Zaino will also provide wrapper functionality for the following RPCs in Zebra (to allow block explorers to fetch all data they require directly from / through Zaino): - - [getinfo](https://zcash.github.io/rpc/getinfo.html) - - [getmininginfo](https://zcash.github.io/rpc/getmininginfo.html) - - [getnetworksolps](https://zcash.github.io/rpc/getnetworksolps.html) - - [getnetworkinfo](https://zcash.github.io/rpc/getnetworkinfo.html) - - [getpeerinfo](https://zcash.github.io/rpc/getpeerinfo.html) - - [ping](https://zcash.github.io/rpc/ping.html) - +Zaino has also committed to taking over responsibility for serving all [Zcash RPC Services](https://zcash.github.io/rpc/) required by non-validator (miner) clients from Zcashd. +A full specification of the Zcash RPC services served by Zaino, and their current state of development, can be seen [here](./Zaino-zcash-rpcs.pdf). diff --git a/docs/testing.md b/docs/testing.md index dabddadcd..d9684c05f 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -4,25 +4,18 @@ 2) [Lightwalletd](https://github.com/zcash/lightwalletd.git) 3) [Zcashd, Zcash-Cli](https://github.com/zcash/zcash) -### Wallet to Node Tests -- To run tests: -1) Simlink or copy compiled `zcashd`, `zcash-cli`, `zebrad` and `lightwalletd` binaries to `$ zaino/test_binaries/bins/*` -2) Run `$ cargo nextest run --test integrations` +### Tests +1) Symlink or copy compiled `zebrad`, `zcashd` and `zcash-cli` binaries to `zaino/test_binaries/bins/*` +2) Add `zaino/test_binaries/bins` to `$PATH` or to `$TEST_BINARIES_DIR` +3) Run `cargo nextest run` -### Client RPC Tests -- To run client rpc tests: -1) Simlink or copy compiled `zebrad`, zcashd` and `zcash-cli` binaries to `$ zaino/test_binaries/bins/*` -2) Build release binary `cargo build --release` WARNING: these tests do not use the binary built by cargo nextest -3) Generate the chain cache `cargo nextest run generate_zcashd_chain_cache --run-ignored ignored-only` -4) Run `cargo nextest run --test client_rpcs` +The expected versions of these binaries is detailed in the file ``.env.testing-artifacts`. -- To run client rpc test `get_subtree_roots_sapling`: -1) sync Zebrad testnet to at least 2 sapling shards -2) copy the Zebrad testnet `state` cache to `zaino/integration-tests/chain_cache/get_subtree_roots_sapling` directory. -See the `get_subtree_roots_sapling` test fixture doc comments in zcash_local_net for more details. - -- To run client rpc test `get_subtree_roots_orchard`: -1) sync Zebrad mainnet to at least 2 orchard shards -2) copy the Zebrad mainnet `state` cache to `zaino/integration-tests/chain_cache/get_subtree_roots_orchard` directory. -See the `get_subtree_roots_orchard` test fixture doc comments in zcash_local_net for more details. +## Cargo Make +Another method to work with tests is using `cargo make`, a Rust task runner and build tool. +This can be installed by running `cargo install --force cargo-make` which will install cargo-make in your ~/.cargo/bin. +From that point you will have two executables available: `cargo-make` (invoked with `cargo make`) and `makers` which is invoked directly and not as a cargo plugin. +`cargo make help` +will print a help output. +`Makefile.toml` holds a configuration file. diff --git a/docs/updating_zebra_crates.md b/docs/updating_zebra_crates.md new file mode 100644 index 000000000..6c88b43f7 --- /dev/null +++ b/docs/updating_zebra_crates.md @@ -0,0 +1,61 @@ +# Updating Zebra crates ~best~ possible practices. + +Zaino depends on Zebra as its main validator. Zainod and Zebrad are +tightly coupled. Keeping up-to-date with latest Zebra crates is +a priority for Zaino maintainers. A lesser delta between the +zebra-crates Zaino depends on and their latest ensures that there +are no surprises when new versions of these crates are released. + +When there's a spread between latest and supported version of the +Zebra crates we consider that a high priority _tech debt_. + +# How to approach updating Zebra crates + +Note: We expect readers of this document are familiarized with the [testing](./testing.md) +documentation. If you haven't done so, please familiarize yourselve with that +document first + +## Pre-condition: run all test and establish a baseline +Run all tests on `dev` with `cargo nextest run --all-features` + +This baseline will tell you which tests are currently passing, failing +and their performance. This will help you identify regressions when +updating these or any other dependencies. + +## update `.env.testing-artifacts` to the corresponding version of Zebra +Instructions on how to do this can be found in [testing](./testing.md) +documentation. + +## Finding out which crates depend on Zebra crates. +Find out which dependencies use `zebra-*` crates by running +`cargo tree` and spotting the usage of Zebra crates. + +## Always specify `all-features` when building + +Make sure you build and run the project with `all-features` in +order to catch any posible compile errors early. + +## Updating Zingo dependencies. +Zaino makes use of ZingoLabs tooling extensively. This means that +when updating a major dependency on Zaino, "different versions of +crate {NAME} are being used" kind of errors. Use `cargo tree` to +find out the crate {NAME} usage and evaluate a highest common denominator +to update all the affected dependencies to that version. + +## Juggling transitive dependencies +### Tonic +Tonic is used in Zebra, Zaino, ZingoLib and Librustzcash. This one is +going to be a challenge. Priotize what works with Zebra and then work +your way down the stack. Tonic can break the `.proto` files downstream if +you notice that there are significant issues consult with Zebra and +[Lightclient Protocol](https://github.com/zcash/lightwallet-protocol) maintainers. + +### Prost +Prost is used in conjunction with `tonic` to build gRPC .rs files from `.proto` files +it is also used accross many crates like `zaino-proto`, `zerba-rpc`, zingo `infrastructure` and `zaino-integration-tests`. Zaino can't build without reliably generating the files so it's +important to figure this dependency graph out. + +## Updating Librustzcash dependencies. +Always try to stick with the latest tag you can find. Although given Zebra uses Librustzcash +as well as ZingoLib, these may clash. Strategy here is to find the highest common denominator +for the two in a per-crate basis. diff --git a/docs/use_cases.md b/docs/use_cases.md index b7b18e69a..28f07c846 100644 --- a/docs/use_cases.md +++ b/docs/use_cases.md @@ -4,30 +4,52 @@ 2) [Zingolib](https://github.com/zingolabs/zingolib.git) [if running Zingo-Cli] ### Running ZainoD -- To run a Zaino server, backed locally by Zebrad first build Zaino: +- To run a Zaino server, backed locally by Zebrad first build Zaino. + +Recently the newest GCC version on Arch has broken a build script in the `rocksdb` dependency. A workaround is: +`export CXXFLAGS="$CXXFLAGS -include cstdint"` + 1) Run `$ cargo build --release` 2) Add compiled binary held at `#PATH_TO/zaino/target/release/zainod` to PATH. -- Then to launch Zaino: [in seperate terminals]: +- Then to launch Zaino: [in separate terminals]: 3) Run `$ zebrad --config #PATH_TO_CONF/zebrad.toml start` -4) Run `$ zainod --config #PATH_TO_CONF/zindexer.toml` +4) Run `$ zainod start` (uses default config at `~/.config/zaino/zainod.toml`) + Or with explicit config: `$ zainod start -c #PATH_TO_CONF/zainod.toml` + + To generate a default config file: `$ zainod generate-config` + +NOTE: Unless the `no_db` option is set to true in the config file zaino will sync its internal `CompactBlock` cache with the validator it is connected to on launch. This can be a very slow process the first time Zaino's DB is synced with a new chain and zaino will not be operable until the database is fully synced. If Zaino exits during this process the database is saved in its current state, enabling the chain to be synced in several stages. - To launch Zingo-Cli running through Zaino [from #PATH_TO/zingolib]: 5) Run `$ cargo run --release --package zingo-cli -- --chain "CHAIN_TYPE" --server "ZAINO_LISTEN_ADDR" --data-dir #PATH_TO_WALLET_DATA_DIR` -- Example Config files for running Zebra and Zaino on testnet are given in `zaino/zainod/*` +- Example Config files for running Zebra on testnet are given in `zaino/zainod/` -A system architecture diagram for this service can be seen at [Live Service System Architecture](./live_system_architecture.pdf). +A system architecture diagram for this service can be seen at [Live Service System Architecture](./zaino_live_system_architecture.pdf). # Local Library -**Currently Unimplemented, documentation will be added here as this functionality becomes available.** +Zaino-State serves as Zaino's chain fetch and transaction submission library. The intended endpoint for this lib is the `IndexerService` (and `IndexerServiceSubscriber`) held in `Zaino_state::indexer`. This generic endpoint enables zaino to add new backend options (Tonic, Darkside, Nym) to the IndexerService without changing the interface clients will see. -A system architecture diagram for this service can be seen at [Library System Architecture](./lib_system_architecture.pdf). +The use of a `Service` and `ServiceSubscriber` separates the core chainstate maintainer processes from fetch fuctionality, enabling zaino to serve a large number of concurrent clients efficiently. In the future we will also be adding a lightweight tonic backend option for clients that do not want to run any chainstate processes locally. +Currently 2 `Service's` are being implemented, with plans for several more: +- FetchService: Zcash JsonRPC powered backend service enabling compatibility with a large number of validator options (zcashd, zebrad). +- StateService: Highly efficient chain fetch service tailored to run with ZebraD. + +Future Planned backend Services: +- TonicService: gRPC powered backend enabling lightclients and lightwieght users to use Zaino's unified chain fetch and transaction submission services. +- DarksideService: Local test backend replacing functionality in lightwalletd. +- NymService: Nym powered backend enabling clients to obfuscate their identities from zcash servers. + +An example of how to spawn an `IndexerService` and create a `Subscriber` can be seen in `zainod::indexer::Indexer::spawn()`. + +A system architecture diagram for this service can be seen at [Library System Architecture](./zaino_lib_system_architecture.pdf). + +NOTE: Currently for the mempool to function the `IndexerService` can not be dropped. An option to only keep the `Subscriber` in scope will be added with the addition of the gRPC backend (`TonicService`). # Remote Library **Currently Unimplemented, documentation will be added here as this functionality becomes available.** -A system architecture diagram for this service can be seen at [Library System Architecture](./lib_system_architecture.pdf). - +A system architecture diagram for this service can be seen at [Library System Architecture](./zaino_lib_system_architecture.pdf). diff --git a/docs/zaino_lib_system_architecture.pdf b/docs/zaino_lib_system_architecture.pdf new file mode 100644 index 000000000..6a69d9cc7 Binary files /dev/null and b/docs/zaino_lib_system_architecture.pdf differ diff --git a/docs/zaino_live_system_architecture.pdf b/docs/zaino_live_system_architecture.pdf new file mode 100644 index 000000000..10b2197c0 Binary files /dev/null and b/docs/zaino_live_system_architecture.pdf differ diff --git a/docs/zaino_serve_architecture_v020.pdf b/docs/zaino_serve_architecture_v020.pdf new file mode 100644 index 000000000..9ac76ce7c Binary files /dev/null and b/docs/zaino_serve_architecture_v020.pdf differ diff --git a/docs/zaino_state_architecture_v020.pdf b/docs/zaino_state_architecture_v020.pdf new file mode 100644 index 000000000..d963656cb Binary files /dev/null and b/docs/zaino_state_architecture_v020.pdf differ diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 000000000..bf19242ac --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Entrypoint for running Zaino in Docker. +# +# This script handles privilege dropping and directory setup for default paths. +# Configuration is managed by config-rs using defaults, optional TOML, and +# environment variables prefixed with ZAINO_. +# +# NOTE: This script only handles directories specified via environment variables +# or the defaults below. If you configure custom paths in a TOML config file, +# you are responsible for ensuring those directories exist with appropriate +# permissions before starting the container. + +set -eo pipefail + +# Default writable paths. +# The Dockerfile creates symlinks: /app/config -> ~/.config/zaino, /app/data -> ~/.cache/zaino +# So we handle /app/* paths directly for Docker users. +# +# Database path (symlinked from ~/.cache/zaino) +: "${ZAINO_STORAGE__DATABASE__PATH:=/app/data}" +# +# Cookie dir (runtime, ephemeral) +: "${ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR:=${XDG_RUNTIME_DIR:-/tmp}/zaino}" +# +# Config directory (symlinked from ~/.config/zaino) +ZAINO_CONFIG_DIR="/app/config" + +# Drop privileges and execute command as non-root user +exec_as_user() { + user=$(id -u) + if [[ ${user} == '0' ]]; then + exec setpriv --reuid="${UID}" --regid="${GID}" --init-groups "$@" + else + exec "$@" + fi +} + +exit_error() { + echo "ERROR: $1" >&2 + exit 1 +} + +# Creates a directory if it doesn't exist and sets ownership to UID:GID. +# Gracefully handles read-only mounts by skipping chown if it fails. +create_owned_directory() { + local dir="$1" + [[ -z ${dir} ]] && return + + # Try to create directory; skip if read-only + if ! mkdir -p "${dir}" 2>/dev/null; then + echo "WARN: Cannot create ${dir} (read-only or permission denied), skipping" + return 0 + fi + + # Try to set ownership; skip if read-only + if ! chown -R "${UID}:${GID}" "${dir}" 2>/dev/null; then + echo "WARN: Cannot chown ${dir} (read-only?), skipping" + return 0 + fi + + # Set ownership on parent if it's not root or home + local parent_dir + parent_dir="$(dirname "${dir}")" + if [[ "${parent_dir}" != "/" && "${parent_dir}" != "${HOME}" ]]; then + chown "${UID}:${GID}" "${parent_dir}" 2>/dev/null || true + fi +} + +# Create and set ownership on writable directories +create_owned_directory "${ZAINO_STORAGE__DATABASE__PATH}" +create_owned_directory "${ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR}" +create_owned_directory "${ZAINO_CONFIG_DIR}" + +# Execute zainod with dropped privileges +exec_as_user zainod "$@" diff --git a/externals/.gitinclude b/externals/.gitinclude new file mode 100644 index 000000000..e69de29bb diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 50d705981..07e2c3ca9 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -1,26 +1,55 @@ [package] name = "integration-tests" -description = "Crate containing Zaino 'Wallet-to-Node' tests." -edition = { workspace = true } +description = "Crate containing Zaino's integration-tests." +publish = false + authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } -[dependencies] -# Test utility -zaino-testutils = { path = "../zaino-testutils" } +[features] +# **Experimental and alpha features** +# Exposes the **complete** set of experimental / alpha features currently implemented in Zaino. +experimental_features = ["transparent_address_history_experimental"] + +# Activates transparent address history capability in zaino +# +# NOTE: currently this is only implemented in the finalised state. +transparent_address_history_experimental = [ + "zaino-state/transparent_address_history_experimental", + "zainod/transparent_address_history_experimental", + "zaino-testutils/transparent_address_history_experimental" +] -# Test fixtures -zcash_local_net = { workspace = true, features = ["test_fixtures"] } +[dev-dependencies] +anyhow = { workspace = true } -# Lightclient -zingolib = { workspace = true } +# Test utility +zainod = { workspace = true } +zaino-testutils = { workspace = true } +zaino-fetch = { workspace = true } +zaino-proto.workspace = true +zaino-common.workspace = true +zaino-state = { workspace = true, features = ["test_dependencies"] } +zebra-chain.workspace = true +zebra-state.workspace = true +zebra-rpc.workspace = true +zip32 = {workspace = true} + +core2 = { workspace = true } +prost = { workspace = true } +serde_json = { workspace = true, features = ["preserve_order"] } +futures.workspace = true +tempfile.workspace = true +tower = { workspace = true, features = ["buffer", "util"] } +hex = { workspace = true } # Runtime tokio = { workspace = true } -# Logging -tracing-subscriber = { workspace = true } - -# Lazy -once_cell = { workspace = true } +# Zingo-infra +zcash_local_net = { workspace = true } +zingo_test_vectors = { workspace = true } diff --git a/integration-tests/src/lib.rs b/integration-tests/src/lib.rs new file mode 100644 index 000000000..13de7609a --- /dev/null +++ b/integration-tests/src/lib.rs @@ -0,0 +1 @@ +//! Helpers for integration-tests go here. diff --git a/integration-tests/tests/chain_cache.rs b/integration-tests/tests/chain_cache.rs new file mode 100644 index 000000000..eb4a6596f --- /dev/null +++ b/integration-tests/tests/chain_cache.rs @@ -0,0 +1,530 @@ +use zaino_common::network::ActivationHeights; +use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; +use zaino_state::{ZcashIndexer, ZcashService}; +use zaino_testutils::{TestManager, ValidatorExt, ValidatorKind}; +use zainodlib::config::ZainodConfig; +use zainodlib::error::IndexerError; + +#[allow(deprecated)] +async fn create_test_manager_and_connector( + validator: &ValidatorKind, + activation_heights: Option, + chain_cache: Option, + enable_zaino: bool, + enable_clients: bool, +) -> (TestManager, JsonRpSeeConnector) +where + T: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let test_manager = TestManager::::launch( + validator, + None, + activation_heights, + chain_cache, + enable_zaino, + false, + enable_clients, + ) + .await + .unwrap(); + + let json_service = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + (test_manager, json_service) +} + +#[allow(deprecated)] +mod chain_query_interface { + + use std::{path::PathBuf, time::Duration}; + + use futures::TryStreamExt as _; + use tempfile::TempDir; + use zaino_common::{CacheConfig, DatabaseConfig, ServiceConfig, StorageConfig}; + use zaino_state::{ + chain_index::{ + source::ValidatorConnector, + types::{BestChainLocation, TransactionHash}, + NodeBackedChainIndex, NodeBackedChainIndexSubscriber, + }, + test_dependencies::{ + chain_index::{self, ChainIndex}, + BlockCacheConfig, + }, + Height, StateService, StateServiceConfig, ZcashService, + }; + use zcash_local_net::validator::{zcashd::Zcashd, zebrad::Zebrad}; + use zebra_chain::{ + parameters::{NetworkKind, testnet::{ConfiguredActivationHeights, RegtestParameters}}, + serialization::{ZcashDeserialize, ZcashDeserializeInto}, + }; + + use super::*; + + #[allow(deprecated)] + async fn create_test_manager_and_chain_index( + validator: &ValidatorKind, + chain_cache: Option, + enable_zaino: bool, + enable_clients: bool, + ) -> ( + TestManager, + JsonRpSeeConnector, + Option, + NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, + ) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, json_service) = create_test_manager_and_connector::( + validator, + None, + chain_cache.clone(), + enable_zaino, + enable_clients, + ) + .await; + + match validator { + ValidatorKind::Zebrad => { + let state_chain_cache_dir = match chain_cache { + Some(dir) => dir, + None => test_manager.data_dir.clone(), + }; + let network = match test_manager.network { + NetworkKind::Regtest => { + let local_net_activation_heights = test_manager.local_net.get_activation_heights().await; + + zebra_chain::parameters::Network::new_regtest(RegtestParameters::from( + ConfiguredActivationHeights { + before_overwinter: local_net_activation_heights.overwinter(), + overwinter: local_net_activation_heights.overwinter(), + sapling: local_net_activation_heights.sapling(), + blossom: local_net_activation_heights.blossom(), + heartwood: local_net_activation_heights.heartwood(), + canopy: local_net_activation_heights.canopy(), + nu5: local_net_activation_heights.nu5(), + nu6: local_net_activation_heights.nu6(), + nu6_1: local_net_activation_heights.nu6_1(), + nu7: local_net_activation_heights.nu7(), + } + )) + } + + NetworkKind::Testnet => zebra_chain::parameters::Network::new_default_testnet(), + NetworkKind::Mainnet => zebra_chain::parameters::Network::Mainnet, + }; + // FIXME: when state service is integrated into chain index this initialization must change + let state_service = StateService::spawn(StateServiceConfig::new( + zebra_state::Config { + cache_dir: state_chain_cache_dir, + ephemeral: false, + delete_old_database: true, + debug_stop_at_height: None, + debug_validity_check_interval: None, + // todo: does this matter? + should_backup_non_finalized_state: true, + debug_skip_non_finalized_state_backup_task: false, + }, + test_manager.full_node_rpc_listen_address.to_string(), + test_manager.full_node_grpc_listen_address, + false, + None, + None, + None, + ServiceConfig::default(), + StorageConfig { + cache: CacheConfig::default(), + database: DatabaseConfig { + path: test_manager.data_dir.as_path().to_path_buf().join("zaino"), + ..Default::default() + }, + }, + network.into(), + )) + .await + .unwrap(); + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: zaino_common::Network::Regtest(ActivationHeights::from( + test_manager.local_net.get_activation_heights().await, + )), + }; + let chain_index = NodeBackedChainIndex::new( + ValidatorConnector::State(chain_index::source::State { + read_state_service: state_service.read_state_service().clone(), + mempool_fetcher: json_service.clone(), + network: config.network, + }), + config, + ) + .await + .unwrap(); + let index_reader = chain_index.subscriber(); + tokio::time::sleep(Duration::from_secs(3)).await; + + ( + test_manager, + json_service, + Some(state_service), + chain_index, + index_reader, + ) + } + ValidatorKind::Zcashd => { + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: zaino_common::Network::Regtest( + test_manager.local_net.get_activation_heights().await.into(), + ), + }; + let chain_index = NodeBackedChainIndex::new( + ValidatorConnector::Fetch(json_service.clone()), + config, + ) + .await + .unwrap(); + let index_reader = chain_index.subscriber(); + tokio::time::sleep(Duration::from_secs(3)).await; + + (test_manager, json_service, None, chain_index, index_reader) + } + } + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_block_range_zebrad() { + get_block_range::(&ValidatorKind::Zebrad).await + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_block_range_zcashd() { + get_block_range::(&ValidatorKind::Zcashd).await + } + + async fn get_block_range(validator: &ValidatorKind) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = + create_test_manager_and_chain_index::(validator, None, false, false).await; + + test_manager + .generate_blocks_and_poll_chain_index(5, &indexer) + .await; + let snapshot = indexer.snapshot_nonfinalized_state(); + assert_eq!(snapshot.as_ref().blocks.len(), 8); + let range = indexer + .get_block_range(&snapshot, Height::try_from(0).unwrap(), None) + .unwrap() + .try_collect::>() + .await + .unwrap(); + for block in range { + let block = block + .zcash_deserialize_into::() + .unwrap(); + assert_eq!( + block.hash().0, + snapshot + .heights_to_hashes + .get( + &chain_index::types::Height::try_from(block.coinbase_height().unwrap()) + .unwrap() + ) + .unwrap() + .0 + ); + } + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn find_fork_point_zebrad() { + find_fork_point::(&ValidatorKind::Zebrad).await + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn find_fork_point_zcashd() { + find_fork_point::(&ValidatorKind::Zcashd).await + } + + async fn find_fork_point(validator: &ValidatorKind) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = + create_test_manager_and_chain_index::(validator, None, false, false).await; + + test_manager + .generate_blocks_and_poll_chain_index(5, &indexer) + .await; + let snapshot = indexer.snapshot_nonfinalized_state(); + assert_eq!(snapshot.as_ref().blocks.len(), 8); + for block_hash in snapshot.heights_to_hashes.values() { + // As all blocks are currently on the main chain, + // this should be the block provided + assert_eq!( + block_hash, + &indexer + .find_fork_point(&snapshot, block_hash) + .await + .unwrap() + .unwrap() + .0 + ) + } + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_raw_transaction_zebrad() { + get_raw_transaction::(&ValidatorKind::Zebrad).await + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_raw_transaction_zcashd() { + get_raw_transaction::(&ValidatorKind::Zcashd).await + } + + async fn get_raw_transaction(validator: &ValidatorKind) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = + create_test_manager_and_chain_index::(validator, None, false, false).await; + + test_manager + .generate_blocks_and_poll_chain_index(5, &indexer) + .await; + let snapshot = indexer.snapshot_nonfinalized_state(); + assert_eq!(snapshot.as_ref().blocks.len(), 8); + for (txid, height) in snapshot.blocks.values().flat_map(|block| { + block + .transactions() + .iter() + .map(|txdata| (txdata.txid().0, block.height())) + }) { + let (raw_transaction, branch_id) = indexer + .get_raw_transaction(&snapshot, &TransactionHash(txid)) + .await + .unwrap() + .unwrap(); + let zebra_txn = + zebra_chain::transaction::Transaction::zcash_deserialize(&raw_transaction[..]) + .unwrap(); + + assert_eq!( + branch_id, + if height == chain_index::types::GENESIS_HEIGHT { + None + } else if height == Height::try_from(1).unwrap() { + zebra_chain::parameters::NetworkUpgrade::Canopy + .branch_id() + .map(u32::from) + } else { + zebra_chain::parameters::NetworkUpgrade::Nu6 + .branch_id() + .map(u32::from) + } + ); + + let correct_txid = zebra_txn.hash().0; + + assert_eq!(txid, correct_txid); + } + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_transaction_status_zebrad() { + get_transaction_status::(&ValidatorKind::Zebrad).await + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn get_transaction_status_zcashd() { + get_transaction_status::(&ValidatorKind::Zcashd).await + } + + async fn get_transaction_status(validator: &ValidatorKind) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = + create_test_manager_and_chain_index::(validator, None, false, false).await; + let snapshot = indexer.snapshot_nonfinalized_state(); + assert_eq!(snapshot.as_ref().blocks.len(), 3); + + test_manager + .generate_blocks_and_poll_chain_index(5, &indexer) + .await; + let snapshot = indexer.snapshot_nonfinalized_state(); + assert_eq!(snapshot.as_ref().blocks.len(), 8); + for (txid, height, block_hash) in snapshot.blocks.values().flat_map(|block| { + block + .transactions() + .iter() + .map(|txdata| (txdata.txid().0, block.height(), block.hash())) + }) { + let (transaction_status_best_chain, transaction_status_nonbest_chain) = indexer + .get_transaction_status(&snapshot, &TransactionHash(txid)) + .await + .unwrap(); + assert_eq!( + transaction_status_best_chain.unwrap(), + BestChainLocation::Block(*block_hash, height) + ); + assert!(transaction_status_nonbest_chain.is_empty()); + } + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn sync_large_chain_zebrad() { + sync_large_chain::(&ValidatorKind::Zebrad).await + } + + #[ignore = "prone to timeouts and hangs, to be fixed in chain index integration"] + #[tokio::test(flavor = "multi_thread")] + async fn sync_large_chain_zcashd() { + sync_large_chain::(&ValidatorKind::Zcashd).await + } + + async fn sync_large_chain(validator: &ValidatorKind) + where + C: ValidatorExt, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, + { + let (test_manager, json_service, option_state_service, _chain_index, indexer) = + create_test_manager_and_chain_index::(validator, None, false, false).await; + + test_manager + .generate_blocks_and_poll_chain_index(5, &indexer) + .await; + if let Some(state_service) = option_state_service.as_ref() { + test_manager + .generate_blocks_and_poll_indexer(0, state_service.get_subscriber().inner_ref()) + .await + } + { + let chain_height = + Height::try_from(json_service.get_blockchain_info().await.unwrap().blocks.0) + .unwrap(); + let indexer_height = indexer.snapshot_nonfinalized_state().best_tip.height; + assert_eq!(chain_height, indexer_height); + } + + test_manager + .generate_blocks_and_poll_chain_index(150, &indexer) + .await; + if let Some(state_service) = option_state_service.as_ref() { + test_manager + .generate_blocks_and_poll_indexer(0, state_service.get_subscriber().inner_ref()) + .await; + } + + tokio::time::sleep(std::time::Duration::from_millis(5000)).await; + + let snapshot = indexer.snapshot_nonfinalized_state(); + let chain_height = json_service.get_blockchain_info().await.unwrap().blocks.0; + let indexer_height = snapshot.best_tip.height; + assert_eq!(Height::try_from(chain_height).unwrap(), indexer_height); + + let finalised_start = Height::try_from(chain_height - 150).unwrap(); + let finalised_tip = Height::try_from(chain_height - 100).unwrap(); + let end = Height::try_from(chain_height - 50).unwrap(); + + let finalized_blocks = indexer + .get_block_range(&snapshot, finalised_start, Some(finalised_tip)) + .unwrap() + .try_collect::>() + .await + .unwrap(); + for block in finalized_blocks { + block + .zcash_deserialize_into::() + .unwrap(); + } + + let non_finalised_blocks = indexer + .get_block_range(&snapshot, finalised_tip, Some(end)) + .unwrap() + .try_collect::>() + .await + .unwrap(); + for block in non_finalised_blocks { + block + .zcash_deserialize_into::() + .unwrap(); + } + } +} diff --git a/integration-tests/tests/client_rpcs.rs b/integration-tests/tests/client_rpcs.rs deleted file mode 100644 index a16748665..000000000 --- a/integration-tests/tests/client_rpcs.rs +++ /dev/null @@ -1,513 +0,0 @@ -//! Tests Zainod release binary against the `zcash_local_net` client RPC test fixtures. -//! -//! Ensure the release binary is up-to-date with `cargo build --release` before running this test-suite. -//! -//! See `Testing` section of README.md for more details. - -use std::path::PathBuf; - -use once_cell::sync::Lazy; -use zcash_local_net::network::Network; - -static ZCASHD_BIN: Lazy> = Lazy::new(|| { - let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - workspace_root_path.pop(); - Some(workspace_root_path.join("test_binaries/bins/zcashd")) -}); -static ZCASH_CLI_BIN: Lazy> = Lazy::new(|| { - let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - workspace_root_path.pop(); - Some(workspace_root_path.join("test_binaries/bins/zcash-cli")) -}); -static ZEBRAD_BIN: Lazy> = Lazy::new(|| { - let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - workspace_root_path.pop(); - Some(workspace_root_path.join("test_binaries/bins/zebrad")) -}); -static LIGHTWALLETD_BIN: Lazy> = Lazy::new(|| { - let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - workspace_root_path.pop(); - Some(workspace_root_path.join("test_binaries/bins/lightwalletd")) -}); -static ZAINOD_BIN: Lazy> = Lazy::new(|| { - let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - workspace_root_path.pop(); - Some(workspace_root_path.join("target/release/zainod")) -}); - -#[ignore = "not a test. generates chain cache for client_rpc tests."] -#[tokio::test] -async fn generate_zcashd_chain_cache() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::generate_zcashd_chain_cache( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_lightd_info() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_lightd_info( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_latest_block() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_latest_block( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_out_of_bounds() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_out_of_bounds( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_nullifiers() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_nullifiers( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_nullifiers() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_nullifiers( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_nullifiers_reverse() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_nullifiers_reverse( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_lower() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_lower( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_upper() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_upper( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_reverse() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_reverse( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_block_range_out_of_bounds() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_block_range_out_of_bounds( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_transaction() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_transaction( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[ignore = "incomplete"] -#[tokio::test] -async fn send_transaction() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::send_transaction( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_taddress_txids_all() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_taddress_txids_all( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_taddress_txids_lower() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_taddress_txids_lower( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_taddress_txids_upper() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_taddress_txids_upper( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_taddress_balance() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_taddress_balance( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_taddress_balance_stream() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_taddress_balance_stream( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_mempool_tx() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_mempool_tx( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_mempool_stream_zingolib_mempool_monitor() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_mempool_stream_zingolib_mempool_monitor( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_mempool_stream() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_mempool_stream( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_tree_state_by_height() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_tree_state_by_height( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_tree_state_by_hash() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_tree_state_by_hash( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_tree_state_out_of_bounds() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_tree_state_out_of_bounds( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_latest_tree_state() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_latest_tree_state( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -/// This test requires Zebrad testnet to be already synced to at least 2 sapling shards with the cache at -/// `zaino/chain_cache/get_subtree_roots_sapling` -/// -/// See doc comments of test_fixture for more details. -#[tokio::test] -async fn get_subtree_roots_sapling() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_subtree_roots_sapling( - ZEBRAD_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - Network::Testnet, - ) - .await; -} - -/// This test requires Zebrad testnet to be already synced to at least 2 orchard shards with the cache at -/// `zaino/chain_cache/get_subtree_roots_orchard` -/// -/// See doc comments of test_fixture for more details. -#[tokio::test] -async fn get_subtree_roots_orchard() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_subtree_roots_orchard( - ZEBRAD_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - Network::Mainnet, - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_all() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_all( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_lower() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_lower( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_upper() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_upper( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_out_of_bounds() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_out_of_bounds( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_stream_all() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_stream_all( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_stream_lower() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_stream_lower( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_stream_upper() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_stream_upper( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} - -#[tokio::test] -async fn get_address_utxos_stream_out_of_bounds() { - tracing_subscriber::fmt().init(); - - zcash_local_net::test_fixtures::get_address_utxos_stream_out_of_bounds( - ZCASHD_BIN.clone(), - ZCASH_CLI_BIN.clone(), - ZAINOD_BIN.clone(), - LIGHTWALLETD_BIN.clone(), - ) - .await; -} diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs new file mode 100644 index 000000000..48ea4651c --- /dev/null +++ b/integration-tests/tests/fetch_service.rs @@ -0,0 +1,2612 @@ +//! These tests compare the output of `FetchService` with the output of `JsonRpcConnector`. + +use futures::StreamExt as _; +use hex::ToHex as _; +use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; +use zaino_proto::proto::compact_formats::CompactBlock; +use zaino_proto::proto::service::{ + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetMempoolTxRequest, GetSubtreeRootsArg, + PoolType, TransparentAddressBlockFilter, TxFilter, +}; +use zaino_state::ChainIndex; +use zaino_state::FetchServiceSubscriber; +#[allow(deprecated)] +use zaino_state::{FetchService, LightWalletIndexer, Status, StatusType, ZcashIndexer}; +use zaino_testutils::{TestManager, ValidatorExt, ValidatorKind}; +use zebra_chain::parameters::subsidy::ParameterSubsidy as _; +use zebra_chain::subtree::NoteCommitmentSubtreeIndex; +use zebra_rpc::client::ValidateAddressResponse; +use zebra_rpc::methods::{ + GetAddressBalanceRequest, GetAddressTxIdsRequest, GetBlock, GetBlockHash, +}; +use zip32::AccountId; + +#[allow(deprecated)] +async fn create_test_manager_and_fetch_service( + validator: &ValidatorKind, + chain_cache: Option, + enable_clients: bool, +) -> (TestManager, FetchServiceSubscriber) { + let mut test_manager = TestManager::::launch( + validator, + None, + None, + chain_cache, + true, + false, + enable_clients, + ) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + (test_manager, fetch_service_subscriber) +} + +async fn launch_fetch_service( + validator: &ValidatorKind, + chain_cache: Option, +) { + let (mut test_manager, fetch_service_subscriber) = + create_test_manager_and_fetch_service::(validator, chain_cache, false).await; + assert_eq!(fetch_service_subscriber.status(), StatusType::Ready); + dbg!(fetch_service_subscriber.data.clone()); + dbg!(fetch_service_subscriber.get_info().await.unwrap()); + dbg!(fetch_service_subscriber + .get_blockchain_info() + .await + .unwrap() + .blocks()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_address_balance(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_address = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + dbg!(clients + .faucet + .account_balance(AccountId::ZERO) + .await + .unwrap()); + dbg!(clients.faucet.transaction_summaries(false).await.unwrap()); + + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_address.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + clients.recipient.sync_and_await().await.unwrap(); + let recipient_balance = clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap(); + + let fetch_service_balance = fetch_service_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_address])) + .await + .unwrap(); + + dbg!(recipient_balance.clone()); + dbg!(fetch_service_balance); + + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000, + ); + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + fetch_service_balance.balance(), + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_raw(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + dbg!(fetch_service_subscriber + .z_get_block("1".to_string(), Some(0)) + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_object(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + dbg!(fetch_service_subscriber + .z_get_block("1".to_string(), Some(1)) + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_raw_mempool(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + let json_service = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua: String = clients.get_recipient_address("unified").await; + let recipient_taddr: String = clients.get_recipient_address("transparent").await; + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let mut fetch_service_mempool = fetch_service_subscriber.get_raw_mempool().await.unwrap(); + let mut json_service_mempool = json_service.get_raw_mempool().await.unwrap().transactions; + + dbg!(&fetch_service_mempool); + dbg!(&json_service_mempool); + json_service_mempool.sort(); + fetch_service_mempool.sort(); + assert_eq!(json_service_mempool, fetch_service_mempool); + + test_manager.close().await; +} + +// `getmempoolinfo` computed from local Broadcast state for all validators +#[allow(deprecated)] +pub async fn test_get_mempool_info(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + + // Zebra cannot mine directly to Orchard in this setup, so shield funds first. + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + + let recipient_unified_address = clients.get_recipient_address("unified").await; + let recipient_transparent_address = clients.get_recipient_address("transparent").await; + + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent_address, 250_000, None)], + ) + .await + .unwrap(); + + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_unified_address, 250_000, None)], + ) + .await + .unwrap(); + + // Allow the broadcaster and subscribers to observe new transactions. + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + // Internal method now used for all validators. + let info = fetch_service_subscriber.get_mempool_info().await.unwrap(); + + // Derive expected values directly from the current mempool contents. + + let keys = fetch_service_subscriber + .indexer + .get_mempool_txids() + .await + .unwrap(); + + let values = fetch_service_subscriber + .indexer + .get_mempool_transactions(Vec::new()) + .await + .unwrap(); + + // Size + assert_eq!(info.size, values.len() as u64); + assert!(info.size >= 1); + + // Bytes: sum of SerializedTransaction lengths + let expected_bytes: u64 = values.iter().map(|entry| entry.len() as u64).sum(); + + // Key heap bytes: sum of txid String capacities + let expected_key_heap_bytes: u64 = keys + .iter() + .map(|key| key.encode_hex::().capacity() as u64) + .sum(); + + let expected_usage = expected_bytes.saturating_add(expected_key_heap_bytes); + + assert!(info.bytes > 0); + assert_eq!(info.bytes, expected_bytes); + + assert!(info.usage >= info.bytes); + assert_eq!(info.usage, expected_usage); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_z_get_treestate(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + // TODO: investigate why 101 blocks are needed instead of the previous 100 blocks (chain index integration related?) + test_manager + .generate_blocks_and_poll_indexer(101, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + dbg!(fetch_service_subscriber + .z_get_treestate("2".to_string()) + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_z_get_subtrees_by_index(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + dbg!(fetch_service_subscriber + .z_get_subtrees_by_index("orchard".to_string(), NoteCommitmentSubtreeIndex(0), None) + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_raw_transaction(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + dbg!(fetch_service_subscriber + .get_raw_transaction(tx.first().to_string(), Some(1)) + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_address_tx_ids(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let chain_height: u32 = fetch_service_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + dbg!(&chain_height); + + let fetch_service_txids = fetch_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr], + Some(chain_height - 2), + None, + )) + .await + .unwrap(); + + dbg!(&tx); + dbg!(&fetch_service_txids); + assert_eq!(tx.first().to_string(), fetch_service_txids[0]); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_address_utxos(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let txid_1 = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_utxos = fetch_service_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr])) + .await + .unwrap(); + let (_, fetch_service_txid, ..) = fetch_service_utxos[0].into_parts(); + + dbg!(&txid_1); + dbg!(&fetch_service_utxos); + assert_eq!(txid_1.first().to_string(), fetch_service_txid.to_string()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_latest_block(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let json_service = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let fetch_service_get_latest_block = + dbg!(fetch_service_subscriber.get_latest_block().await.unwrap()); + + let json_service_blockchain_info = json_service.get_blockchain_info().await.unwrap(); + + let json_service_get_latest_block = dbg!(BlockId { + height: json_service_blockchain_info.blocks.0 as u64, + hash: json_service_blockchain_info.best_block_hash.0.to_vec(), + }); + + assert_eq!(fetch_service_get_latest_block.height, 3); + assert_eq!( + fetch_service_get_latest_block, + json_service_get_latest_block + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn assert_fetch_service_difficulty_matches_rpc(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let fetch_service_get_difficulty = fetch_service_subscriber.get_difficulty().await.unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_difficulty_response = jsonrpc_client.get_difficulty().await.unwrap(); + assert_eq!(fetch_service_get_difficulty, rpc_difficulty_response.0); +} + +#[allow(deprecated)] +async fn assert_fetch_service_mininginfo_matches_rpc(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let fetch_service_mining_info = fetch_service_subscriber.get_mining_info().await.unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_mining_info_response = jsonrpc_client.get_mining_info().await.unwrap(); + assert_eq!(fetch_service_mining_info, rpc_mining_info_response); +} + +#[allow(deprecated)] +async fn assert_fetch_service_peerinfo_matches_rpc(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let fetch_service_get_peer_info = fetch_service_subscriber.get_peer_info().await.unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_peer_info_response = jsonrpc_client.get_peer_info().await.unwrap(); + + dbg!(&rpc_peer_info_response); + dbg!(&fetch_service_get_peer_info); + assert_eq!(fetch_service_get_peer_info, rpc_peer_info_response); +} + +#[allow(deprecated)] +async fn fetch_service_get_block_subsidy(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let first_halving_height = fetch_service_subscriber + .network() + .to_zebra_network() + .height_for_first_halving(); + let block_limit = match validator { + // Block generation is more expensive in zcashd, and 10 is sufficient + ValidatorKind::Zcashd => 10, + // To stay consistent with zcashd, ten successful examples. Any calls + // below the first halving height should fail. + ValidatorKind::Zebrad => first_halving_height.0 + 10, + }; + + for i in 0..block_limit { + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + // Zebrad does not support the founders' reward block subsidy + if i < first_halving_height.0 && validator == &ValidatorKind::Zebrad { + assert!(fetch_service_subscriber.get_block_subsidy(i).await.is_err()); + continue; + } + let fetch_service_get_block_subsidy = + fetch_service_subscriber.get_block_subsidy(i).await.unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_block_subsidy_response = jsonrpc_client.get_block_subsidy(i).await.unwrap(); + assert_eq!(fetch_service_get_block_subsidy, rpc_block_subsidy_response); + } +} + +#[allow(deprecated)] +async fn fetch_service_get_block(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let block_id = BlockId { + height: 1, + hash: Vec::new(), + }; + + let fetch_service_get_block = dbg!(fetch_service_subscriber + .get_block(block_id.clone()) + .await + .unwrap()); + + assert_eq!(fetch_service_get_block.height, block_id.height); + let block_id_by_hash = BlockId { + height: 0, + hash: fetch_service_get_block.hash.clone(), + }; + let fetch_service_get_block_by_hash = fetch_service_subscriber + .get_block(block_id_by_hash.clone()) + .await + .unwrap(); + assert_eq!(fetch_service_get_block_by_hash.hash, block_id_by_hash.hash); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_header(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + const BLOCK_LIMIT: u32 = 10; + + for i in 0..BLOCK_LIMIT { + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let block = fetch_service_subscriber + .z_get_block(i.to_string(), Some(1)) + .await + .unwrap(); + + let block_hash = match block { + GetBlock::Object(block) => block.hash(), + GetBlock::Raw(_) => panic!("Expected block object"), + }; + + let fetch_service_get_block_header = fetch_service_subscriber + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_block_header_response = jsonrpc_client + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + + let fetch_service_get_block_header_verbose = fetch_service_subscriber + .get_block_header(block_hash.to_string(), true) + .await + .unwrap(); + + let rpc_block_header_response_verbose = jsonrpc_client + .get_block_header(block_hash.to_string(), true) + .await + .unwrap(); + + assert_eq!(fetch_service_get_block_header, rpc_block_header_response); + assert_eq!( + fetch_service_get_block_header_verbose, + rpc_block_header_response_verbose + ); + } +} + +#[allow(deprecated)] +async fn fetch_service_get_best_blockhash(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(5, &fetch_service_subscriber) + .await; + + let inspected_block: GetBlock = fetch_service_subscriber + // Some(verbosity) : 1 for JSON Object, 2 for tx data as JSON instead of hex + .z_get_block("7".to_string(), Some(1)) + .await + .unwrap(); + + let ret = match inspected_block { + GetBlock::Object(obj) => Some(obj.hash()), + _ => None, + }; + + let fetch_service_get_best_blockhash: GetBlockHash = + dbg!(fetch_service_subscriber.get_best_blockhash().await.unwrap()); + + assert_eq!( + fetch_service_get_best_blockhash.hash(), + ret.expect("ret to be Some(GetBlockHash) not None") + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_count(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(5, &fetch_service_subscriber) + .await; + + let block_id = BlockId { + height: 7, + hash: Vec::new(), + }; + + let fetch_service_get_block_count = + dbg!(fetch_service_subscriber.get_block_count().await.unwrap()); + + assert_eq!(fetch_service_get_block_count.0 as u64, block_id.height); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_validate_address(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + // scriptpubkey: "76a914000000000000000000000000000000000000000088ac" + let expected_validation = ValidateAddressResponse::new( + true, + Some("tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma".to_string()), + Some(false), + ); + let fetch_service_validate_address = fetch_service_subscriber + .validate_address("tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma".to_string()) + .await + .unwrap(); + + assert_eq!(fetch_service_validate_address, expected_validation); + + // scriptpubkey: "a914000000000000000000000000000000000000000087" + let expected_validation_script = ValidateAddressResponse::new( + true, + Some("t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2".to_string()), + Some(true), + ); + + let fetch_service_validate_address_script = fetch_service_subscriber + .validate_address("t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2".to_string()) + .await + .unwrap(); + + assert_eq!( + fetch_service_validate_address_script, + expected_validation_script + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_nullifiers(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let block_id = BlockId { + height: 1, + hash: Vec::new(), + }; + + let fetch_service_get_block_nullifiers = dbg!(fetch_service_subscriber + .get_block_nullifiers(block_id.clone()) + .await + .unwrap()); + + assert_eq!(fetch_service_get_block_nullifiers.height, block_id.height); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_range(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(10, &fetch_service_subscriber) + .await; + + let block_range = BlockRange { + start: Some(BlockId { + height: 1, + hash: Vec::new(), + }), + end: Some(BlockId { + height: 10, + hash: Vec::new(), + }), + pool_types: vec![], + }; + + let fetch_service_stream = fetch_service_subscriber + .get_block_range(block_range.clone()) + .await + .unwrap(); + let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; + + let fetch_blocks: Vec<_> = fetch_service_compact_blocks + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + dbg!(fetch_blocks); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(14, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap() + .head; + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 100 + } else { + 1 + }; + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 106 + } else { + 17 + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // Transparent tx are now included in compact blocks unless specified so the + // expected block count should be 4 (3 sent tx + coinbase) + let expected_transaction_count = 4; + + // the compact block has the right number of transactions + assert_eq!(compact_block.vtx.len(), expected_transaction_count); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + dbg!(deshielding_tx); + + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard( + validator: &ValidatorKind, +) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(14, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap() + .head; + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 100 + } else { + 10 + }; + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 106 + } else { + 17 + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + let expected_tx_count = if matches!(validator, ValidatorKind::Zebrad) { + 3 + } else { + 4 // zcashd shields coinbase and tx count will be one more than zebra's + }; + // the compact block has 3 transactions + assert_eq!(compact_block.vtx.len(), expected_tx_count); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + deshielding_tx.vout.is_empty(), + "transparent data should not be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when default pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when default pool types are specified in the request." + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_range_nullifiers(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(10, &fetch_service_subscriber) + .await; + + let block_range = BlockRange { + start: Some(BlockId { + height: 1, + hash: Vec::new(), + }), + end: Some(BlockId { + height: 10, + hash: Vec::new(), + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; + + let fetch_service_stream = fetch_service_subscriber + .get_block_range_nullifiers(block_range.clone()) + .await + .unwrap(); + let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; + + let fetch_nullifiers: Vec = fetch_service_compact_blocks + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + dbg!(fetch_nullifiers); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_transaction_mined(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let tx_filter = TxFilter { + block: None, + index: 0, + hash: tx.first().as_ref().to_vec(), + }; + + let fetch_service_get_transaction = dbg!(fetch_service_subscriber + .get_transaction(tx_filter.clone()) + .await + .unwrap()); + + dbg!(fetch_service_get_transaction); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_transaction_mempool(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + let tx_filter = TxFilter { + block: None, + index: 0, + hash: tx.first().as_ref().to_vec(), + }; + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let fetch_service_get_transaction = dbg!(fetch_service_subscriber + .get_transaction(tx_filter.clone()) + .await + .unwrap()); + + dbg!(fetch_service_get_transaction); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_taddress_txids(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let chain_height: u32 = fetch_service_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + dbg!(&chain_height); + + let block_filter = TransparentAddressBlockFilter { + address: recipient_taddr, + range: Some(BlockRange { + start: Some(BlockId { + height: (chain_height - 2) as u64, + hash: Vec::new(), + }), + end: Some(BlockId { + height: chain_height as u64, + hash: Vec::new(), + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }), + }; + + let fetch_service_stream = fetch_service_subscriber + .get_taddress_txids(block_filter.clone()) + .await + .unwrap(); + let fetch_service_tx: Vec<_> = fetch_service_stream.collect().await; + + let fetch_tx: Vec<_> = fetch_service_tx + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + dbg!(tx); + dbg!(&fetch_tx); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_taddress_balance(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + clients.recipient.sync_and_await().await.unwrap(); + let balance = clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap(); + + let address_list = AddressList { + addresses: vec![recipient_taddr], + }; + + let fetch_service_balance = fetch_service_subscriber + .get_taddress_balance(address_list.clone()) + .await + .unwrap(); + + dbg!(&fetch_service_balance); + assert_eq!( + fetch_service_balance.value_zat as u64, + balance.confirmed_transparent_balance.unwrap().into_u64() + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_mempool_tx(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let recipient_taddr = clients.get_recipient_address("transparent").await; + let tx_1 = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + let tx_2 = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let exclude_list_empty = GetMempoolTxRequest { + exclude_txid_suffixes: Vec::new(), + pool_types: Vec::new(), + }; + + let fetch_service_stream = fetch_service_subscriber + .get_mempool_tx(exclude_list_empty.clone()) + .await + .unwrap(); + let fetch_service_mempool_tx: Vec<_> = fetch_service_stream.collect().await; + + let fetch_mempool_tx: Vec<_> = fetch_service_mempool_tx + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + let mut sorted_fetch_mempool_tx = fetch_mempool_tx.clone(); + sorted_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); + + // Transaction IDs from quick_send are already in internal byte order, + // which matches what the mempool returns, so no reversal needed + let tx1_bytes = *tx_1.first().as_ref(); + let tx2_bytes = *tx_2.first().as_ref(); + + let mut sorted_txids = [tx1_bytes, tx2_bytes]; + sorted_txids.sort_by_key(|hash| *hash); + + assert_eq!(sorted_fetch_mempool_tx[0].txid, sorted_txids[0]); + assert_eq!(sorted_fetch_mempool_tx[1].txid, sorted_txids[1]); + assert_eq!(sorted_fetch_mempool_tx.len(), 2); + + let exclude_list = GetMempoolTxRequest { + exclude_txid_suffixes: vec![sorted_txids[0][8..].to_vec()], + pool_types: vec![], + }; + + let exclude_fetch_service_stream = fetch_service_subscriber + .get_mempool_tx(exclude_list.clone()) + .await + .unwrap(); + let exclude_fetch_service_mempool_tx: Vec<_> = exclude_fetch_service_stream.collect().await; + + let exclude_fetch_mempool_tx: Vec<_> = exclude_fetch_service_mempool_tx + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + let mut sorted_exclude_fetch_mempool_tx = exclude_fetch_mempool_tx.clone(); + sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); + + assert_eq!(sorted_exclude_fetch_mempool_tx[0].txid, sorted_txids[1]); + assert_eq!(sorted_exclude_fetch_mempool_tx.len(), 1); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_mempool_stream(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let fetch_service_subscriber_2 = fetch_service_subscriber.clone(); + let fetch_service_handle = tokio::spawn(async move { + let fetch_service_stream = fetch_service_subscriber_2 + .get_mempool_stream() + .await + .unwrap(); + let fetch_service_mempool_tx: Vec<_> = fetch_service_stream.collect().await; + fetch_service_mempool_tx + .into_iter() + .filter_map(|result| result.ok()) + .collect::>() + }); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let recipient_ua = clients.get_recipient_address("unified").await; + let recipient_taddr = clients.get_recipient_address("transparent").await; + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let fetch_mempool_tx = fetch_service_handle.await.unwrap(); + + let mut sorted_fetch_mempool_tx = fetch_mempool_tx.clone(); + sorted_fetch_mempool_tx.sort_by_key(|tx| tx.data.clone()); + + dbg!(sorted_fetch_mempool_tx); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_tree_state(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let block_id = BlockId { + height: 1, + hash: Vec::new(), + }; + + let fetch_service_get_tree_state = dbg!(fetch_service_subscriber + .get_tree_state(block_id.clone()) + .await + .unwrap()); + + dbg!(fetch_service_get_tree_state); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_latest_tree_state(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + dbg!(fetch_service_subscriber + .get_latest_tree_state() + .await + .unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_subtree_roots(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let subtree_roots_arg = GetSubtreeRootsArg { + start_index: 0, + shielded_protocol: 1, + max_entries: 0, + }; + + let fetch_service_stream = fetch_service_subscriber + .get_subtree_roots(subtree_roots_arg) + .await + .unwrap(); + let fetch_service_roots: Vec<_> = fetch_service_stream.collect().await; + + let fetch_roots: Vec<_> = fetch_service_roots + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + dbg!(fetch_roots); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_taddress_utxos(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let utxos_arg = GetAddressUtxosArg { + addresses: vec![recipient_taddr], + start_height: 0, + max_entries: 0, + }; + + let fetch_service_get_taddress_utxos = fetch_service_subscriber + .get_address_utxos(utxos_arg.clone()) + .await + .unwrap(); + + dbg!(tx); + dbg!(&fetch_service_get_taddress_utxos); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_taddress_utxos_stream(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_taddr, 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let utxos_arg = GetAddressUtxosArg { + addresses: vec![recipient_taddr], + start_height: 0, + max_entries: 0, + }; + + let fetch_service_stream = fetch_service_subscriber + .get_address_utxos_stream(utxos_arg.clone()) + .await + .unwrap(); + let fetch_service_utxos: Vec<_> = fetch_service_stream.collect().await; + + let fetch_utxos: Vec<_> = fetch_service_utxos + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + dbg!(fetch_utxos); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_lightd_info(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + dbg!(fetch_service_subscriber.get_lightd_info().await.unwrap()); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn assert_fetch_service_getnetworksols_matches_rpc( + validator: &ValidatorKind, +) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let fetch_service_get_networksolps = fetch_service_subscriber + .get_network_sol_ps(None, None) + .await + .unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_getnetworksolps_response = jsonrpc_client.get_network_sol_ps(None, None).await.unwrap(); + assert_eq!(fetch_service_get_networksolps, rpc_getnetworksolps_response); +} + +mod zcashd { + + use super::*; + use zcash_local_net::validator::zcashd::Zcashd; + + mod launch { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn regtest_no_cache() { + launch_fetch_service::(&ValidatorKind::Zcashd, None).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[ignore = "We no longer use chain caches. See zcashd::launch::regtest_no_cache."] + pub(crate) async fn regtest_with_cache() { + launch_fetch_service::( + &ValidatorKind::Zcashd, + zaino_testutils::ZCASHD_CHAIN_CACHE_DIR.clone(), + ) + .await; + } + } + + mod validation { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn validate_address() { + fetch_service_validate_address::(&ValidatorKind::Zcashd).await; + } + } + + mod get { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_balance() { + fetch_service_get_address_balance::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_raw() { + fetch_service_get_block_raw::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_object() { + fetch_service_get_block_object::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn raw_mempool() { + fetch_service_get_raw_mempool::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_info() { + test_get_mempool_info::(&ValidatorKind::Zcashd).await; + } + + mod z { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_treestate() { + fetch_service_z_get_treestate::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtrees_by_index() { + fetch_service_z_get_subtrees_by_index::(&ValidatorKind::Zcashd).await; + } + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn raw_transaction() { + fetch_service_get_raw_transaction::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_tx_ids() { + fetch_service_get_address_tx_ids::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_utxos() { + fetch_service_get_address_utxos::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn latest_block() { + fetch_service_get_latest_block::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block() { + fetch_service_get_block::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_header() { + fetch_service_get_block_header::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn difficulty() { + assert_fetch_service_difficulty_matches_rpc::(&ValidatorKind::Zcashd).await; + } + + #[allow(deprecated)] + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_deltas() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + true, + false, + false, + ) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let current_block = fetch_service_subscriber.get_latest_block().await.unwrap(); + + let block_hash_bytes: [u8; 32] = current_block.hash.as_slice().try_into().unwrap(); + + let block_hash = zebra_chain::block::Hash::from(block_hash_bytes); + + // Note: we need an 'expected' block hash in order to query its deltas. + // Having a predictable or test vector chain is the way to go here. + let fetch_service_block_deltas = fetch_service_subscriber + .get_block_deltas(block_hash.to_string()) + .await + .unwrap(); + + let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + let rpc_block_deltas = jsonrpc_client + .get_block_deltas(block_hash.to_string()) + .await + .unwrap(); + + assert_eq!(fetch_service_block_deltas, rpc_block_deltas); + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mining_info() { + assert_fetch_service_mininginfo_matches_rpc::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn peer_info() { + assert_fetch_service_peerinfo_matches_rpc::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_subsidy() { + fetch_service_get_block_subsidy::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn best_blockhash() { + fetch_service_get_best_blockhash::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_count() { + fetch_service_get_block_count::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_nullifiers() { + fetch_service_get_block_nullifiers::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range() { + fetch_service_get_block_range::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zcashd, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_nullifiers() { + fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn transaction_mined() { + fetch_service_get_transaction_mined::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn transaction_mempool() { + fetch_service_get_transaction_mempool::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_txids() { + fetch_service_get_taddress_txids::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_balance() { + fetch_service_get_taddress_balance::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_tx() { + fetch_service_get_mempool_tx::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_stream() { + fetch_service_get_mempool_stream::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn tree_state() { + fetch_service_get_tree_state::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn latest_tree_state() { + fetch_service_get_latest_tree_state::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtree_roots() { + fetch_service_get_subtree_roots::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_utxos() { + fetch_service_get_taddress_utxos::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_utxos_stream() { + fetch_service_get_taddress_utxos_stream::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn lightd_info() { + fetch_service_get_lightd_info::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_network_sol_ps() { + assert_fetch_service_getnetworksols_matches_rpc::(&ValidatorKind::Zcashd).await; + } + } +} + +mod zebrad { + + use super::*; + use zcash_local_net::validator::zebrad::Zebrad; + + mod launch { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn regtest_no_cache() { + launch_fetch_service::(&ValidatorKind::Zebrad, None).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[ignore = "We no longer use chain caches. See zebrad::launch::regtest_no_cache."] + pub(crate) async fn regtest_with_cache() { + launch_fetch_service::( + &ValidatorKind::Zebrad, + zaino_testutils::ZEBRAD_CHAIN_CACHE_DIR.clone(), + ) + .await; + } + } + + mod validation { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn validate_address() { + fetch_service_validate_address::(&ValidatorKind::Zebrad).await; + } + } + + mod get { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_balance() { + fetch_service_get_address_balance::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_raw() { + fetch_service_get_block_raw::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_object() { + fetch_service_get_block_object::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn raw_mempool() { + fetch_service_get_raw_mempool::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_info() { + test_get_mempool_info::(&ValidatorKind::Zebrad).await; + } + + mod z { + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn treestate() { + fetch_service_z_get_treestate::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtrees_by_index() { + fetch_service_z_get_subtrees_by_index::(&ValidatorKind::Zebrad).await; + } + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn raw_transaction() { + fetch_service_get_raw_transaction::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_tx_ids() { + fetch_service_get_address_tx_ids::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn address_utxos() { + fetch_service_get_address_utxos::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn latest_block() { + fetch_service_get_latest_block::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block() { + fetch_service_get_block::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zebrad, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_header() { + fetch_service_get_block_header::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn difficulty() { + assert_fetch_service_difficulty_matches_rpc::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mining_info() { + assert_fetch_service_mininginfo_matches_rpc::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn peer_info() { + assert_fetch_service_peerinfo_matches_rpc::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_subsidy() { + fetch_service_get_block_subsidy::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn best_blockhash() { + fetch_service_get_best_blockhash::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_count() { + fetch_service_get_block_count::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_nullifiers() { + fetch_service_get_block_nullifiers::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_nullifiers() { + fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn transaction_mined() { + fetch_service_get_transaction_mined::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn transaction_mempool() { + fetch_service_get_transaction_mempool::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_txids() { + fetch_service_get_taddress_txids::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_balance() { + fetch_service_get_taddress_balance::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_tx() { + fetch_service_get_mempool_tx::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn mempool_stream() { + fetch_service_get_mempool_stream::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn tree_state() { + fetch_service_get_tree_state::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn latest_tree_state() { + fetch_service_get_latest_tree_state::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtree_roots() { + fetch_service_get_subtree_roots::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_utxos() { + fetch_service_get_taddress_utxos::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn taddress_utxos_stream() { + fetch_service_get_taddress_utxos_stream::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn lightd_info() { + fetch_service_get_lightd_info::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_network_sol_ps() { + assert_fetch_service_getnetworksols_matches_rpc::(&ValidatorKind::Zebrad).await; + } + } +} diff --git a/integration-tests/tests/integrations.rs b/integration-tests/tests/integrations.rs deleted file mode 100644 index cb8271db5..000000000 --- a/integration-tests/tests/integrations.rs +++ /dev/null @@ -1,316 +0,0 @@ -//! Integration tests for zingo-Indexer. -//! Currently uses ZCashD as ZebraD has not yet implemented Regtest Mode. - -#![forbid(unsafe_code)] - -use std::sync::{atomic::AtomicBool, Arc}; -use zaino_testutils::{drop_test_manager, zingo_lightclient::get_address, TestManager}; - -mod zingo_wallet_basic { - use zingolib::{lightclient::LightClient, testutils::lightclient::from_inputs}; - - use super::*; - - #[tokio::test] - async fn connect_to_node_get_info() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - let lightd_info = zingo_client.do_info().await; - println!("[TEST LOG] Lightd_info response:\n{:#?}.", lightd_info); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn send_to_orchard() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "unified").await, 250_000, None)], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.orchard_balance.unwrap(), 1_875_000_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn send_to_sapling() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "sapling").await, 250_000, None)], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.sapling_balance.unwrap(), 250_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn send_to_transparent() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![( - &get_address(&zingo_client, "transparent").await, - 250_000, - None, - )], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.transparent_balance.unwrap(), 250_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn send_to_multiple() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(2).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "unified").await, 250_000, None)], - ) - .await - .unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "sapling").await, 250_000, None)], - ) - .await - .unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![( - &get_address(&zingo_client, "transparent").await, - 250_000, - None, - )], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.orchard_balance.unwrap(), 2_499_500_000); - assert_eq!(balance.sapling_balance.unwrap(), 250_000); - assert_eq!(balance.transparent_balance.unwrap(), 250_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn shield_from_transparent() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![( - &get_address(&zingo_client, "transparent").await, - 250_000, - None, - )], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.transparent_balance.unwrap(), 250_000); - - zingo_client.quick_shield().await.unwrap(); - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.transparent_balance.unwrap(), 0); - assert_eq!(balance.orchard_balance.unwrap(), 2_500_000_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn sync_full_batch() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = test_manager.build_lightclient().await; - - test_manager.regtest_manager.generate_n_blocks(2).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - - test_manager.regtest_manager.generate_n_blocks(5).unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "unified").await, 250_000, None)], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(15).unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "sapling").await, 250_000, None)], - ) - .await - .unwrap(); - - test_manager.regtest_manager.generate_n_blocks(15).unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![( - &get_address(&zingo_client, "transparent").await, - 250_000, - None, - )], - ) - .await - .unwrap(); - test_manager.regtest_manager.generate_n_blocks(70).unwrap(); - - println!("[TEST LOG] syncing full batch."); - zingo_client.do_sync(false).await.unwrap(); - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.orchard_balance.unwrap(), 67_499_500_000); - assert_eq!(balance.sapling_balance.unwrap(), 250_000); - assert_eq!(balance.transparent_balance.unwrap(), 250_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } - - #[tokio::test] - async fn monitor_unverified_mempool() { - let online = Arc::new(AtomicBool::new(true)); - let (test_manager, regtest_handler, _indexer_handler) = - TestManager::launch(online.clone()).await; - let zingo_client = Arc::new(test_manager.build_lightclient().await); - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_sync(false).await.unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "sapling").await, 250_000, None)], - ) - .await - .unwrap(); - from_inputs::quick_send( - &zingo_client, - vec![(&get_address(&zingo_client, "sapling").await, 250_000, None)], - ) - .await - .unwrap(); - - zingo_client.clear_state().await; - LightClient::start_mempool_monitor(zingo_client.clone()); - tokio::time::sleep(std::time::Duration::from_secs(5)).await; - - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.unverified_sapling_balance.unwrap(), 500_000); - - test_manager.regtest_manager.generate_n_blocks(1).unwrap(); - zingo_client.do_rescan().await.unwrap(); - let balance = zingo_client.do_balance().await; - println!("[TEST LOG] zingo_client balance: \n{:#?}.", balance); - assert_eq!(balance.verified_sapling_balance.unwrap(), 500_000); - - drop_test_manager( - Some(test_manager.temp_conf_dir.path().to_path_buf()), - regtest_handler, - online, - ) - .await; - } -} diff --git a/integration-tests/tests/json_server.rs b/integration-tests/tests/json_server.rs new file mode 100644 index 000000000..ea464e421 --- /dev/null +++ b/integration-tests/tests/json_server.rs @@ -0,0 +1,993 @@ +//! Tests that compare the output of both `zcashd` and `zainod` through `FetchService`. + +use zaino_common::network::ActivationHeights; +use zaino_common::{DatabaseConfig, ServiceConfig, StorageConfig}; + +#[allow(deprecated)] +use zaino_state::{ + ChainIndex, FetchService, FetchServiceConfig, FetchServiceSubscriber, ZcashIndexer, + ZcashService as _, +}; +use zaino_testutils::from_inputs; +use zaino_testutils::{TestManager, ValidatorKind}; +use zcash_local_net::logs::LogsToStdoutAndStderr as _; +use zcash_local_net::validator::zcashd::Zcashd; +use zcash_local_net::validator::Validator as _; +use zebra_chain::subtree::NoteCommitmentSubtreeIndex; +use zebra_rpc::client::GetAddressBalanceRequest; +use zebra_rpc::methods::{GetAddressTxIdsRequest, GetInfo}; + +#[allow(deprecated)] +async fn create_zcashd_test_manager_and_fetch_services( + clients: bool, +) -> ( + TestManager, + FetchService, + FetchServiceSubscriber, + FetchService, + FetchServiceSubscriber, +) { + println!("Launching test manager.."); + let test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + true, + true, + clients, + ) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; + + println!("Launching zcashd fetch service.."); + let zcashd_fetch_service = FetchService::spawn(FetchServiceConfig::new( + test_manager.full_node_rpc_listen_address.to_string(), + None, + None, + None, + ServiceConfig::default(), + StorageConfig { + database: DatabaseConfig { + path: test_manager + .local_net + .data_dir() + .path() + .to_path_buf() + .join("zaino"), + ..Default::default() + }, + ..Default::default() + }, + zaino_common::Network::Regtest(ActivationHeights::default()), + )) + .await + .unwrap(); + let zcashd_subscriber = zcashd_fetch_service.get_subscriber().inner(); + + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; + + println!("Launching zaino fetch service.."); + let zaino_fetch_service = FetchService::spawn(FetchServiceConfig::new( + test_manager.full_node_rpc_listen_address.to_string(), + test_manager.json_server_cookie_dir.clone(), + None, + None, + ServiceConfig::default(), + StorageConfig { + database: DatabaseConfig { + path: test_manager + .local_net + .data_dir() + .path() + .to_path_buf() + .join("zaino"), + ..Default::default() + }, + ..Default::default() + }, + zaino_common::Network::Regtest(ActivationHeights::default()), + )) + .await + .unwrap(); + let zaino_subscriber = zaino_fetch_service.get_subscriber().inner(); + + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; + + println!("Testmanager launch complete!"); + ( + test_manager, + zcashd_fetch_service, + zcashd_subscriber, + zaino_fetch_service, + zaino_subscriber, + ) +} + +#[allow(deprecated)] +async fn generate_blocks_and_poll_all_chain_indexes( + n: u32, + test_manager: &TestManager, + zaino_subscriber: FetchServiceSubscriber, + zcashd_subscriber: FetchServiceSubscriber, +) { + test_manager.generate_blocks_and_poll(n).await; + test_manager + .generate_blocks_and_poll_indexer(0, &zaino_subscriber) + .await; + test_manager + .generate_blocks_and_poll_indexer(0, &zcashd_subscriber) + .await; +} + +async fn launch_json_server_check_info() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(false).await; + let zcashd_info = dbg!(zcashd_subscriber.get_info().await.unwrap()); + let zcashd_blockchain_info = dbg!(zcashd_subscriber.get_blockchain_info().await.unwrap()); + let zaino_info = dbg!(zaino_subscriber.get_info().await.unwrap()); + let zaino_blockchain_info = dbg!(zaino_subscriber.get_blockchain_info().await.unwrap()); + + // Clean timestamp from get_info + let ( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + _, + ) = zcashd_info.into_parts(); + let cleaned_zcashd_info = GetInfo::new( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + 0, + ); + + let ( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + _, + ) = zaino_info.into_parts(); + let cleaned_zaino_info = GetInfo::new( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + 0, + ); + + assert_eq!(cleaned_zcashd_info, cleaned_zaino_info); + + assert_eq!( + zcashd_blockchain_info.chain(), + zaino_blockchain_info.chain() + ); + assert_eq!( + zcashd_blockchain_info.blocks(), + zaino_blockchain_info.blocks() + ); + assert_eq!( + zcashd_blockchain_info.best_block_hash(), + zaino_blockchain_info.best_block_hash() + ); + assert_eq!( + zcashd_blockchain_info.estimated_height(), + zaino_blockchain_info.estimated_height() + ); + assert_eq!( + zcashd_blockchain_info.value_pools(), + zaino_blockchain_info.value_pools() + ); + assert_eq!( + zcashd_blockchain_info.upgrades(), + zaino_blockchain_info.upgrades() + ); + assert_eq!( + zcashd_blockchain_info.consensus(), + zaino_blockchain_info.consensus() + ); + + test_manager.close().await; +} + +async fn get_best_blockhash_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(false).await; + + let zcashd_bbh = dbg!(zcashd_subscriber.get_best_blockhash().await.unwrap()); + let zaino_bbh = dbg!(zaino_subscriber.get_best_blockhash().await.unwrap()); + + assert_eq!(zcashd_bbh, zaino_bbh); + + test_manager.close().await; +} + +async fn get_block_count_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(false).await; + + let zcashd_block_count = dbg!(zcashd_subscriber.get_block_count().await.unwrap()); + let zaino_block_count = dbg!(zaino_subscriber.get_block_count().await.unwrap()); + + assert_eq!(zcashd_block_count, zaino_block_count); + + test_manager.close().await; +} + +async fn validate_address_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(false).await; + + // Using a testnet transparent address + let address_string = "tmHMBeeYRuc2eVicLNfP15YLxbQsooCA6jb"; + + let address_with_script = "t3TAfQ9eYmXWGe3oPae1XKhdTxm8JvsnFRL"; + + let zcashd_valid = zcashd_subscriber + .validate_address(address_string.to_string()) + .await + .unwrap(); + + let zaino_valid = zaino_subscriber + .validate_address(address_string.to_string()) + .await + .unwrap(); + + assert_eq!(zcashd_valid, zaino_valid, "Address should be valid"); + + let zcashd_valid_script = zcashd_subscriber + .validate_address(address_with_script.to_string()) + .await + .unwrap(); + + let zaino_valid_script = zaino_subscriber + .validate_address(address_with_script.to_string()) + .await + .unwrap(); + + assert_eq!( + zcashd_valid_script, zaino_valid_script, + "Address should be valid" + ); + + test_manager.close().await; +} + +async fn z_get_address_balance_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + clients.recipient.sync_and_await().await.unwrap(); + let recipient_balance = clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap(); + + let zcashd_service_balance = zcashd_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap(); + + let zaino_service_balance = zaino_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_taddr])) + .await + .unwrap(); + + dbg!(&recipient_balance); + dbg!(&zcashd_service_balance); + dbg!(&zaino_service_balance); + + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000, + ); + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + zcashd_service_balance.balance(), + ); + assert_eq!(zcashd_service_balance, zaino_service_balance); + + test_manager.close().await; +} + +async fn z_get_block_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(false).await; + + let zcashd_block_raw = dbg!(zcashd_subscriber + .z_get_block("1".to_string(), Some(0)) + .await + .unwrap()); + + let zaino_block_raw = dbg!(zaino_subscriber + .z_get_block("1".to_string(), Some(0)) + .await + .unwrap()); + + assert_eq!(zcashd_block_raw, zaino_block_raw); + + let zcashd_block = dbg!(zcashd_subscriber + .z_get_block("1".to_string(), Some(1)) + .await + .unwrap()); + + let zaino_block = dbg!(zaino_subscriber + .z_get_block("1".to_string(), Some(1)) + .await + .unwrap()); + + assert_eq!(zcashd_block, zaino_block); + + let hash = match zcashd_block { + zebra_rpc::methods::GetBlock::Raw(_) => panic!("expected object"), + zebra_rpc::methods::GetBlock::Object(obj) => obj.hash().to_string(), + }; + let zaino_get_block_by_hash = zaino_subscriber + .z_get_block(hash.clone(), Some(1)) + .await + .unwrap(); + assert_eq!(zaino_get_block_by_hash, zaino_block); + + test_manager.close().await; +} + +async fn get_raw_mempool_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + let recipient_ua = &clients.get_recipient_address("unified").await; + let recipient_taddr = &clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_taddr, 250_000, None)]) + .await + .unwrap(); + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_ua, 250_000, None)]) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let mut zcashd_mempool = zcashd_subscriber.get_raw_mempool().await.unwrap(); + let mut zaino_mempool = zaino_subscriber.get_raw_mempool().await.unwrap(); + + dbg!(&zcashd_mempool); + zcashd_mempool.sort(); + + dbg!(&zaino_mempool); + zaino_mempool.sort(); + + assert_eq!(zcashd_mempool, zaino_mempool); + + test_manager.close().await; +} + +async fn get_mempool_info_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + let recipient_ua = &clients.get_recipient_address("unified").await; + let recipient_taddr = &clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_taddr, 250_000, None)]) + .await + .unwrap(); + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_ua, 250_000, None)]) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let zcashd_subscriber_mempool_info = zcashd_subscriber.get_mempool_info().await.unwrap(); + let zaino_subscriber_mempool_info = zaino_subscriber.get_mempool_info().await.unwrap(); + + assert_eq!( + zcashd_subscriber_mempool_info, + zaino_subscriber_mempool_info + ); + + test_manager.close().await; +} + +async fn z_get_treestate_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let recipient_ua = &clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + let zcashd_treestate = dbg!(zcashd_subscriber + .z_get_treestate("2".to_string()) + .await + .unwrap()); + + let zaino_treestate = dbg!(zaino_subscriber + .z_get_treestate("2".to_string()) + .await + .unwrap()); + + assert_eq!(zcashd_treestate, zaino_treestate); + + test_manager.close().await; +} + +async fn z_get_subtrees_by_index_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let recipient_ua = &clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + let zcashd_subtrees = dbg!(zcashd_subscriber + .z_get_subtrees_by_index("orchard".to_string(), NoteCommitmentSubtreeIndex(0), None) + .await + .unwrap()); + + let zaino_subtrees = dbg!(zaino_subscriber + .z_get_subtrees_by_index("orchard".to_string(), NoteCommitmentSubtreeIndex(0), None) + .await + .unwrap()); + + assert_eq!(zcashd_subtrees, zaino_subtrees); + + test_manager.close().await; +} + +async fn get_raw_transaction_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let recipient_ua = &clients.get_recipient_address("unified").await; + let tx = from_inputs::quick_send(&mut clients.faucet, vec![(recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + test_manager.local_net.print_stdout(); + + let zcashd_transaction = dbg!(zcashd_subscriber + .get_raw_transaction(tx.first().to_string(), Some(1)) + .await + .unwrap()); + + let zaino_transaction = dbg!(zaino_subscriber + .get_raw_transaction(tx.first().to_string(), Some(1)) + .await + .unwrap()); + + assert_eq!(zcashd_transaction, zaino_transaction); + + test_manager.close().await; +} + +async fn get_address_tx_ids_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + let tx = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + let chain_height = zcashd_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + dbg!(&chain_height); + + let zcashd_txids = zcashd_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr.clone()], + Some(chain_height - 2), + Some(chain_height), + )) + .await + .unwrap(); + + let zaino_txids = zaino_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr], + Some(chain_height - 2), + Some(chain_height), + )) + .await + .unwrap(); + + dbg!(&tx); + dbg!(&zcashd_txids); + assert_eq!(tx.first().to_string(), zcashd_txids[0]); + + dbg!(&zaino_txids); + assert_eq!(zcashd_txids, zaino_txids); + + test_manager.close().await; +} + +async fn z_get_address_utxos_inner() { + let (mut test_manager, _zcashd_service, zcashd_subscriber, _zaino_service, zaino_subscriber) = + create_zcashd_test_manager_and_fetch_services(true).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + let txid_1 = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + let zcashd_utxos = zcashd_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap(); + let (_, zcashd_txid, ..) = zcashd_utxos[0].into_parts(); + + let zaino_utxos = zaino_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr])) + .await + .unwrap(); + let (_, zaino_txid, ..) = zaino_utxos[0].into_parts(); + + dbg!(&txid_1); + dbg!(&zcashd_utxos); + assert_eq!(txid_1.first().to_string(), zcashd_txid.to_string()); + + dbg!(&zaino_utxos); + + assert_eq!(zcashd_txid.to_string(), zaino_txid.to_string()); + + test_manager.close().await; +} + +// TODO: This module should not be called `zcashd` +mod zcashd { + use super::*; + + pub(crate) mod zcash_indexer { + use zaino_state::LightWalletIndexer; + use zebra_rpc::methods::GetBlock; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + async fn check_info_no_cookie() { + launch_json_server_check_info().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn check_info_with_cookie() { + launch_json_server_check_info().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn z_get_address_balance() { + z_get_address_balance_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_best_blockhash() { + get_best_blockhash_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_count() { + get_block_count_inner().await; + } + + /// Checks that the difficulty is the same between zcashd and zaino. + /// + /// This tests generates blocks and checks that the difficulty is the same between zcashd and zaino + /// after each block is generated. + #[tokio::test(flavor = "multi_thread")] + async fn get_difficulty() { + let ( + mut test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + const BLOCK_LIMIT: i32 = 10; + + for _ in 0..BLOCK_LIMIT { + let zcashd_difficulty = zcashd_subscriber.get_difficulty().await.unwrap(); + let zaino_difficulty = zaino_subscriber.get_difficulty().await.unwrap(); + + assert_eq!(zcashd_difficulty, zaino_difficulty); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + } + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_deltas() { + let ( + mut test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + const BLOCK_LIMIT: i32 = 10; + + for _ in 0..BLOCK_LIMIT { + let current_block = zcashd_subscriber.get_latest_block().await.unwrap(); + + let block_hash_bytes: [u8; 32] = current_block.hash.as_slice().try_into().unwrap(); + + let block_hash = zebra_chain::block::Hash::from(block_hash_bytes); + + let zcashd_deltas = zcashd_subscriber + .get_block_deltas(block_hash.to_string()) + .await + .unwrap(); + let zaino_deltas = zaino_subscriber + .get_block_deltas(block_hash.to_string()) + .await + .unwrap(); + + assert_eq!(zcashd_deltas, zaino_deltas); + + test_manager.local_net.generate_blocks(1).await.unwrap(); + } + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_mining_info() { + let ( + mut test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + const BLOCK_LIMIT: i32 = 10; + + for _ in 0..BLOCK_LIMIT { + let zcashd_mining_info = zcashd_subscriber.get_mining_info().await.unwrap(); + let zaino_mining_info = zaino_subscriber.get_mining_info().await.unwrap(); + + assert_eq!(zcashd_mining_info, zaino_mining_info); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + } + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_peer_info() { + let ( + mut test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + let zcashd_peer_info = zcashd_subscriber.get_peer_info().await.unwrap(); + let zaino_peer_info = zaino_subscriber.get_peer_info().await.unwrap(); + + assert_eq!(zcashd_peer_info, zaino_peer_info); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_subsidy() { + let ( + mut test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + let zcashd_block_subsidy = zcashd_subscriber.get_block_subsidy(1).await.unwrap(); + let zaino_block_subsidy = zaino_subscriber.get_block_subsidy(1).await.unwrap(); + + assert_eq!(zcashd_block_subsidy, zaino_block_subsidy); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn validate_address() { + validate_address_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn z_get_block() { + z_get_block_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_header() { + let ( + test_manager, + _zcashd_service, + zcashd_subscriber, + _zaino_service, + zaino_subscriber, + ) = create_zcashd_test_manager_and_fetch_services(false).await; + + const BLOCK_LIMIT: u32 = 10; + + for i in 0..BLOCK_LIMIT { + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + zaino_subscriber.clone(), + zcashd_subscriber.clone(), + ) + .await; + + let block = zcashd_subscriber + .z_get_block(i.to_string(), Some(1)) + .await + .unwrap(); + + let block_hash = match block { + GetBlock::Object(block) => block.hash(), + GetBlock::Raw(_) => panic!("Expected block object"), + }; + + let zcashd_get_block_header = zcashd_subscriber + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + + let zainod_block_header_response = zaino_subscriber + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + assert_eq!(zcashd_get_block_header, zainod_block_header_response); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_raw_mempool() { + get_raw_mempool_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_mempool_info() { + get_mempool_info_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn z_get_treestate() { + z_get_treestate_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn z_get_subtrees_by_index() { + z_get_subtrees_by_index_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_raw_transaction() { + get_raw_transaction_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_address_tx_ids() { + get_address_tx_ids_inner().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn z_get_address_utxos() { + z_get_address_utxos_inner().await; + } + } +} diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs new file mode 100644 index 000000000..b15560001 --- /dev/null +++ b/integration-tests/tests/state_service.rs @@ -0,0 +1,2969 @@ +use futures::StreamExt; +use zaino_common::network::ActivationHeights; +use zaino_common::{DatabaseConfig, ServiceConfig, StorageConfig}; +use zaino_fetch::jsonrpsee::response::address_deltas::GetAddressDeltasParams; +use zaino_proto::proto::service::{BlockId, BlockRange, PoolType, TransparentAddressBlockFilter}; +use zaino_state::ChainIndex as _; +use zaino_state::{LightWalletService, ZcashService}; + +#[allow(deprecated)] +use zaino_state::{ + FetchService, FetchServiceConfig, FetchServiceSubscriber, LightWalletIndexer, StateService, + StateServiceConfig, StateServiceSubscriber, ZcashIndexer, +}; +use zaino_testutils::{from_inputs, ValidatorExt}; +use zaino_testutils::{TestManager, ValidatorKind, ZEBRAD_TESTNET_CACHE_DIR}; +use zainodlib::config::ZainodConfig; +use zainodlib::error::IndexerError; +use zcash_local_net::validator::{zebrad::Zebrad, Validator}; +use zebra_chain::parameters::NetworkKind; +use zebra_chain::subtree::NoteCommitmentSubtreeIndex; +use zebra_rpc::methods::{GetAddressBalanceRequest, GetAddressTxIdsRequest, GetInfo}; +use zip32::AccountId; + +#[allow(deprecated)] +// NOTE: the fetch and state services each have a seperate chain index to the instance of zaino connected to the lightclients and may be out of sync +// the test manager now includes a service subscriber but not both fetch *and* state which are necessary for these tests. +// syncronicity is ensured in the following tests by calling `generate_blocks_and_poll_all_chain_indexes`. +async fn create_test_manager_and_services( + validator: &ValidatorKind, + chain_cache: Option, + enable_zaino: bool, + enable_clients: bool, + network: Option, +) -> ( + TestManager, + FetchService, + FetchServiceSubscriber, + StateService, + StateServiceSubscriber, +) { + let test_manager = TestManager::::launch( + validator, + network, + None, + chain_cache.clone(), + enable_zaino, + false, + enable_clients, + ) + .await + .unwrap(); + + let network_type = match network { + Some(NetworkKind::Mainnet) => { + println!("Waiting for validator to spawn.."); + tokio::time::sleep(std::time::Duration::from_millis(5000)).await; + zaino_common::Network::Mainnet + } + Some(NetworkKind::Testnet) => { + println!("Waiting for validator to spawn.."); + tokio::time::sleep(std::time::Duration::from_millis(5000)).await; + zaino_common::Network::Testnet + } + _ => zaino_common::Network::Regtest({ + let activation_heights = test_manager.local_net.get_activation_heights().await; + ActivationHeights { + before_overwinter: activation_heights.overwinter(), + overwinter: activation_heights.overwinter(), + sapling: activation_heights.sapling(), + blossom: activation_heights.blossom(), + heartwood: activation_heights.heartwood(), + canopy: activation_heights.canopy(), + nu5: activation_heights.nu5(), + nu6: activation_heights.nu6(), + nu6_1: activation_heights.nu6_1(), + nu7: activation_heights.nu7(), + } + }), + }; + + test_manager.local_net.print_stdout(); + + let fetch_service = FetchService::spawn(FetchServiceConfig::new( + test_manager.full_node_rpc_listen_address.to_string(), + None, + None, + None, + ServiceConfig::default(), + StorageConfig { + database: DatabaseConfig { + path: test_manager + .local_net + .data_dir() + .path() + .to_path_buf() + .join("zaino"), + ..Default::default() + }, + ..Default::default() + }, + network_type, + )) + .await + .unwrap(); + + let fetch_subscriber = fetch_service.get_subscriber().inner(); + + let state_chain_cache_dir = match chain_cache { + Some(dir) => dir, + None => test_manager.data_dir.clone(), + }; + + let state_service = StateService::spawn(StateServiceConfig::new( + zebra_state::Config { + cache_dir: state_chain_cache_dir, + ephemeral: false, + delete_old_database: true, + debug_stop_at_height: None, + debug_validity_check_interval: None, + should_backup_non_finalized_state: false, + debug_skip_non_finalized_state_backup_task: false, + }, + test_manager.full_node_rpc_listen_address.to_string(), + test_manager.full_node_grpc_listen_address, + false, + None, + None, + None, + ServiceConfig::default(), + StorageConfig { + database: DatabaseConfig { + path: test_manager + .local_net + .data_dir() + .path() + .to_path_buf() + .join("zaino"), + ..Default::default() + }, + ..Default::default() + }, + network_type, + )) + .await + .unwrap(); + + let state_subscriber = state_service.get_subscriber().inner(); + + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + ( + test_manager, + fetch_service, + fetch_subscriber, + state_service, + state_subscriber, + ) +} + +#[allow(deprecated)] +async fn generate_blocks_and_poll_all_chain_indexes( + n: u32, + test_manager: &TestManager, + fetch_service_subscriber: FetchServiceSubscriber, + state_service_subscriber: StateServiceSubscriber, +) where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + test_manager.generate_blocks_and_poll(n).await; + test_manager + .generate_blocks_and_poll_indexer(0, &fetch_service_subscriber) + .await; + test_manager + .generate_blocks_and_poll_indexer(0, &state_service_subscriber) + .await; +} +async fn state_service_check_info( + validator: &ValidatorKind, + chain_cache: Option, + network: NetworkKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, chain_cache, false, false, Some(network)) + .await; + + if dbg!(network.to_string()) == *"Regtest" { + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + } + + let fetch_service_info = dbg!(fetch_service_subscriber.get_info().await.unwrap()); + let fetch_service_blockchain_info = dbg!(fetch_service_subscriber + .get_blockchain_info() + .await + .unwrap()); + + let state_service_info = dbg!(state_service_subscriber.get_info().await.unwrap()); + let state_service_blockchain_info = dbg!(state_service_subscriber + .get_blockchain_info() + .await + .unwrap()); + + // Clean timestamp from get_info + let ( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + _, + ) = fetch_service_info.into_parts(); + let cleaned_fetch_info = GetInfo::new( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + 0, + ); + + let ( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + _, + ) = state_service_info.into_parts(); + let cleaned_state_info = GetInfo::new( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + 0, + ); + + assert_eq!(cleaned_fetch_info, cleaned_state_info); + + assert_eq!( + fetch_service_blockchain_info.chain(), + state_service_blockchain_info.chain() + ); + assert_eq!( + fetch_service_blockchain_info.blocks(), + state_service_blockchain_info.blocks() + ); + assert_eq!( + fetch_service_blockchain_info.best_block_hash(), + state_service_blockchain_info.best_block_hash() + ); + assert_eq!( + fetch_service_blockchain_info.estimated_height(), + state_service_blockchain_info.estimated_height() + ); + // TODO: Fix this! (ignored due to [https://github.com/zingolabs/zaino/issues/235]). + // assert_eq!( + // fetch_service_blockchain_info.value_pools(), + // state_service_blockchain_info.value_pools() + // ); + assert_eq!( + fetch_service_blockchain_info.upgrades(), + state_service_blockchain_info.upgrades() + ); + assert_eq!( + fetch_service_blockchain_info.consensus(), + state_service_blockchain_info.consensus() + ); + + test_manager.close().await; +} + +async fn state_service_get_address_balance(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.recipient.sync_and_await().await.unwrap(); + let recipient_balance = clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap(); + + let fetch_service_balance = fetch_service_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap(); + + let state_service_balance = state_service_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_taddr])) + .await + .unwrap(); + + dbg!(&recipient_balance); + dbg!(&fetch_service_balance); + dbg!(&state_service_balance); + + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000, + ); + assert_eq!( + recipient_balance + .confirmed_transparent_balance + .unwrap() + .into_u64(), + fetch_service_balance.balance(), + ); + assert_eq!(fetch_service_balance, state_service_balance); + + test_manager.close().await; +} + +async fn state_service_get_address_balance_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let address = "tmAkxrvJCN75Ty9YkiHccqc1hJmGZpggo6i"; + + let address_request = GetAddressBalanceRequest::new(vec![address.to_string()]); + + let fetch_service_balance = dbg!( + fetch_service_subscriber + .z_get_address_balance(address_request.clone()) + .await + ) + .unwrap(); + + let state_service_balance = dbg!( + state_service_subscriber + .z_get_address_balance(address_request) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_balance, state_service_balance); + + test_manager.close().await; +} + +async fn state_service_get_block_raw( + validator: &ValidatorKind, + chain_cache: Option, + network: NetworkKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + validator, + chain_cache, + false, + false, + Some(network), + ) + .await; + + let height = match network { + NetworkKind::Regtest => "1".to_string(), + _ => "1000000".to_string(), + }; + + let fetch_service_block = dbg!(fetch_service_subscriber + .z_get_block(height.clone(), Some(0)) + .await + .unwrap()); + + let state_service_block = dbg!(state_service_subscriber + .z_get_block(height, Some(0)) + .await + .unwrap()); + + assert_eq!(fetch_service_block, state_service_block); + + test_manager.close().await; +} + +async fn state_service_get_block_object( + validator: &ValidatorKind, + chain_cache: Option, + network: NetworkKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + validator, + chain_cache, + false, + false, + Some(network), + ) + .await; + + let height = match network { + NetworkKind::Regtest => "1".to_string(), + _ => "1000000".to_string(), + }; + + let fetch_service_block = dbg!(fetch_service_subscriber + .z_get_block(height.clone(), Some(1)) + .await + .unwrap()); + + let state_service_block = dbg!(state_service_subscriber + .z_get_block(height, Some(1)) + .await + .unwrap()); + + assert_eq!(fetch_service_block, state_service_block); + + let hash = match fetch_service_block { + zebra_rpc::methods::GetBlock::Raw(_) => panic!("expected object"), + zebra_rpc::methods::GetBlock::Object(obj) => obj.hash().to_string(), + }; + let state_service_get_block_by_hash = state_service_subscriber + .z_get_block(hash.clone(), Some(1)) + .await + .unwrap(); + assert_eq!(state_service_get_block_by_hash, state_service_block); + + test_manager.close().await; +} + +async fn state_service_get_raw_mempool(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let recipient_taddr = clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_taddr, 250_000, None)]) + .await + .unwrap(); + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let mut fetch_service_mempool = fetch_service_subscriber.get_raw_mempool().await.unwrap(); + let mut state_service_mempool = state_service_subscriber.get_raw_mempool().await.unwrap(); + + dbg!(&fetch_service_mempool); + fetch_service_mempool.sort(); + + dbg!(&state_service_mempool); + state_service_mempool.sort(); + + assert_eq!(fetch_service_mempool, state_service_mempool); + + test_manager.close().await; +} + +async fn state_service_get_raw_mempool_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let mut fetch_service_mempool = fetch_service_subscriber.get_raw_mempool().await.unwrap(); + let mut state_service_mempool = state_service_subscriber.get_raw_mempool().await.unwrap(); + + dbg!(&fetch_service_mempool); + fetch_service_mempool.sort(); + + dbg!(&state_service_mempool); + state_service_mempool.sort(); + + assert_eq!(fetch_service_mempool, state_service_mempool); + + test_manager.close().await; +} + +/// Tests whether that calls to `get_block_range` with the same block range are the same when +/// specifying the default `PoolType`s and passing and empty Vec to verify that the method falls +/// back to the default pools when these are not explicitly specified. +async fn state_service_get_block_range_returns_default_pools( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 103; + + let default_pools_request = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(default_pools_request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let explicit_default_pool_request = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], + }; + + let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber + .get_block_range(explicit_default_pool_request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + assert_eq!( + fetch_service_get_block_range, + fetch_service_get_block_range_specifying_pools + ); + + let state_service_get_block_range_specifying_pools = state_service_subscriber + .get_block_range(explicit_default_pool_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let state_service_get_block_range = state_service_subscriber + .get_block_range(default_pools_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + assert_eq!( + state_service_get_block_range, + state_service_get_block_range_specifying_pools + ); + + // check that the block range is the same between fetch service and state service + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 1 transactions + assert_eq!(compact_block.vtx.len(), 1); + + let shielded_tx = compact_block.vtx.first().unwrap(); + assert_eq!(shielded_tx.index, 1); + // tranparent data should not be present when no pool types are requested + assert_eq!( + shielded_tx.vin, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); + assert_eq!( + shielded_tx.vout, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); + test_manager.close().await; +} + +/// tests whether the `GetBlockRange` RPC returns all pools when requested +async fn state_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + }; + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap() + .head; + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 106; + + let block_range = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(block_range.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let state_service_get_block_range = state_service_subscriber + .get_block_range(block_range) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + // check that the block range is the same + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 4 transactions (3 sent + coinbase) + assert_eq!(compact_block.vtx.len(), 4); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} + +async fn state_service_z_get_treestate(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_treestate = dbg!(fetch_service_subscriber + .z_get_treestate("2".to_string()) + .await + .unwrap()); + + let state_service_treestate = dbg!(state_service_subscriber + .z_get_treestate("2".to_string()) + .await + .unwrap()); + + assert_eq!(fetch_service_treestate, state_service_treestate); + + test_manager.close().await; +} + +async fn state_service_z_get_treestate_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let fetch_service_treestate = dbg!( + fetch_service_subscriber + .z_get_treestate("3000000".to_string()) + .await + ) + .unwrap(); + + let state_service_tx_treestate = dbg!( + state_service_subscriber + .z_get_treestate("3000000".to_string()) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_treestate, state_service_tx_treestate); + + test_manager.close().await; +} + +async fn state_service_z_get_subtrees_by_index(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_subtrees = dbg!(fetch_service_subscriber + .z_get_subtrees_by_index("orchard".to_string(), NoteCommitmentSubtreeIndex(0), None) + .await + .unwrap()); + + let state_service_subtrees = dbg!(state_service_subscriber + .z_get_subtrees_by_index("orchard".to_string(), NoteCommitmentSubtreeIndex(0), None) + .await + .unwrap()); + + assert_eq!(fetch_service_subtrees, state_service_subtrees); + + test_manager.close().await; +} + +async fn state_service_z_get_subtrees_by_index_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let fetch_service_sapling_subtrees = dbg!( + fetch_service_subscriber + .z_get_subtrees_by_index("sapling".to_string(), 0.into(), None) + .await + ) + .unwrap(); + + let state_service_sapling_subtrees = dbg!( + state_service_subscriber + .z_get_subtrees_by_index("sapling".to_string(), 0.into(), None) + .await + ) + .unwrap(); + + assert_eq!( + fetch_service_sapling_subtrees, + state_service_sapling_subtrees + ); + + let fetch_service_orchard_subtrees = dbg!( + fetch_service_subscriber + .z_get_subtrees_by_index("orchard".to_string(), 0.into(), None) + .await + ) + .unwrap(); + + let state_service_orchard_subtrees = dbg!( + state_service_subscriber + .z_get_subtrees_by_index("orchard".to_string(), 0.into(), None) + .await + ) + .unwrap(); + + assert_eq!( + fetch_service_orchard_subtrees, + state_service_orchard_subtrees + ); + + test_manager.close().await; +} + +use zcash_local_net::logs::LogsToStdoutAndStderr; +async fn state_service_get_raw_transaction( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await.to_string(); + let tx = from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + test_manager.local_net.print_stdout(); + + let fetch_service_transaction = dbg!(fetch_service_subscriber + .get_raw_transaction(tx.first().to_string(), Some(1)) + .await + .unwrap()); + + let state_service_transaction = dbg!(state_service_subscriber + .get_raw_transaction(tx.first().to_string(), Some(1)) + .await + .unwrap()); + + assert_eq!(fetch_service_transaction, state_service_transaction); + + test_manager.close().await; +} + +async fn state_service_get_raw_transaction_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let txid = "abb0399df392130baa45644c421fab553670a2d0d399c4dd776a8f7862ec289d".to_string(); + + let fetch_service_transaction = dbg!( + fetch_service_subscriber + .get_raw_transaction(txid.clone(), None) + .await + ) + .unwrap(); + + let state_service_tx_transaction = dbg!( + state_service_subscriber + .get_raw_transaction(txid, None) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_transaction, state_service_tx_transaction); + + test_manager.close().await; +} + +async fn state_service_get_address_transactions_regtest( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.local_net.generate_blocks(100).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + clients.faucet.sync_and_await().await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + }; + + let tx = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let chain_height: u32 = fetch_service_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + dbg!(&chain_height); + + let state_service_txids = state_service_subscriber + .get_taddress_transactions(TransparentAddressBlockFilter { + address: recipient_taddr, + range: Some(BlockRange { + start: Some(BlockId { + height: (chain_height - 2) as u64, + hash: vec![], + }), + end: Some(BlockId { + height: chain_height as u64, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }), + }) + .await + .unwrap(); + + dbg!(&tx); + + dbg!(&state_service_txids); + assert!(state_service_txids.count().await > 0); + + test_manager.close().await; +} +async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let chain_height = fetch_service_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + + dbg!(&chain_height); + + let fetch_service_txids = fetch_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr.clone()], + Some(chain_height - 2), + Some(chain_height), + )) + .await + .unwrap(); + + let state_service_txids = state_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr], + Some(chain_height - 2), + Some(chain_height), + )) + .await + .unwrap(); + + dbg!(&tx); + dbg!(&fetch_service_txids); + assert_eq!(tx.first().to_string(), fetch_service_txids[0]); + + dbg!(&state_service_txids); + assert_eq!(fetch_service_txids, state_service_txids); + + test_manager.close().await; +} + +async fn state_service_get_address_tx_ids_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let address = "tmAkxrvJCN75Ty9YkiHccqc1hJmGZpggo6i"; + + let address_request = + GetAddressTxIdsRequest::new(vec![address.to_string()], Some(2000000), Some(3000000)); + + let fetch_service_tx_ids = dbg!( + fetch_service_subscriber + .get_address_tx_ids(address_request.clone()) + .await + ) + .unwrap(); + + let state_service_tx_ids = dbg!( + state_service_subscriber + .get_address_tx_ids(address_request) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_tx_ids, state_service_tx_ids); + + test_manager.close().await; +} + +async fn state_service_get_address_utxos(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let txid_1 = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_utxos = fetch_service_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap(); + let (_, fetch_service_txid, ..) = fetch_service_utxos[0].into_parts(); + + let state_service_utxos = state_service_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr])) + .await + .unwrap(); + let (_, state_service_txid, ..) = state_service_utxos[0].into_parts(); + + dbg!(&txid_1); + dbg!(&fetch_service_utxos); + assert_eq!(txid_1.first().to_string(), fetch_service_txid.to_string()); + + dbg!(&state_service_utxos); + + assert_eq!( + fetch_service_txid.to_string(), + state_service_txid.to_string() + ); + + test_manager.close().await; +} + +async fn state_service_get_address_utxos_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let address = "tmAkxrvJCN75Ty9YkiHccqc1hJmGZpggo6i"; + + let address_request = GetAddressBalanceRequest::new(vec![address.to_string()]); + + let fetch_service_utxos = dbg!( + fetch_service_subscriber + .z_get_address_utxos(address_request.clone()) + .await + ) + .unwrap(); + + let state_service_tx_utxos = dbg!( + state_service_subscriber + .z_get_address_utxos(address_request) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_utxos, state_service_tx_utxos); + + test_manager.close().await; +} + +async fn state_service_get_address_deltas_testnet() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + false, + false, + Some(NetworkKind::Testnet), + ) + .await; + + let address = "tmAkxrvJCN75Ty9YkiHccqc1hJmGZpggo6i"; + + // Test simple response + let simple_request = + GetAddressDeltasParams::new_filtered(vec![address.to_string()], 2000000, 3000000, false); + + let fetch_service_simple_deltas = dbg!( + fetch_service_subscriber + .get_address_deltas(simple_request.clone()) + .await + ) + .unwrap(); + + let state_service_simple_deltas = dbg!( + state_service_subscriber + .get_address_deltas(simple_request) + .await + ) + .unwrap(); + + assert_eq!(fetch_service_simple_deltas, state_service_simple_deltas); + + // Test response with chain info + let chain_info_params = + GetAddressDeltasParams::new_filtered(vec![address.to_string()], 2000000, 3000000, true); + + let fetch_service_chain_info_deltas = dbg!( + fetch_service_subscriber + .get_address_deltas(chain_info_params.clone()) + .await + ) + .unwrap(); + + let state_service_chain_info_deltas = dbg!( + state_service_subscriber + .get_address_deltas(chain_info_params) + .await + ) + .unwrap(); + + assert_eq!( + fetch_service_chain_info_deltas, + state_service_chain_info_deltas + ); + + test_manager.close().await; +} + +mod zebra { + + use super::*; + + pub(crate) mod check_info { + + use super::*; + use zaino_testutils::ZEBRAD_CHAIN_CACHE_DIR; + use zcash_local_net::validator::zebrad::Zebrad; + + #[tokio::test(flavor = "multi_thread")] + async fn regtest_no_cache() { + state_service_check_info::(&ValidatorKind::Zebrad, None, NetworkKind::Regtest) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn state_service_chaintip_update_subscriber() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + let mut chaintip_subscriber = state_service_subscriber.chaintip_update_subscriber(); + for _ in 0..5 { + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + assert_eq!( + chaintip_subscriber.next_tip_hash().await.unwrap().0, + <[u8; 32]>::try_from( + state_service_subscriber + .get_latest_block() + .await + .unwrap() + .hash + ) + .unwrap() + ) + } + } + + #[tokio::test(flavor = "multi_thread")] + #[ignore = "We no longer use chain caches. See zcashd::check_info::regtest_no_cache."] + async fn regtest_with_cache() { + state_service_check_info::( + &ValidatorKind::Zebrad, + ZEBRAD_CHAIN_CACHE_DIR.clone(), + NetworkKind::Regtest, + ) + .await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn testnet() { + state_service_check_info::( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + NetworkKind::Testnet, + ) + .await; + } + } + + pub(crate) mod get { + + use super::*; + use zaino_fetch::jsonrpsee::response::address_deltas::GetAddressDeltasResponse; + use zcash_local_net::validator::zebrad::Zebrad; + + #[tokio::test(flavor = "multi_thread")] + async fn address_utxos() { + state_service_get_address_utxos::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn address_utxos_testnet() { + state_service_get_address_utxos_testnet().await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn taddress_transactions_regtest() { + state_service_get_address_transactions_regtest::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn address_tx_ids_regtest() { + state_service_get_address_tx_ids::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn address_tx_ids_testnet() { + state_service_get_address_tx_ids_testnet().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn raw_transaction_regtest() { + state_service_get_raw_transaction::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn raw_transaction_testnet() { + state_service_get_raw_transaction_testnet().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn best_blockhash() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_bbh = + dbg!(fetch_service_subscriber.get_best_blockhash().await.unwrap()); + let state_service_bbh = + dbg!(state_service_subscriber.get_best_blockhash().await.unwrap()); + assert_eq!(fetch_service_bbh, state_service_bbh); + } + + #[tokio::test(flavor = "multi_thread")] + async fn block_count() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_block_count = + dbg!(fetch_service_subscriber.get_block_count().await.unwrap()); + let state_service_block_count = + dbg!(state_service_subscriber.get_block_count().await.unwrap()); + assert_eq!(fetch_service_block_count, state_service_block_count); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn mining_info() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + false, + false, + Some(NetworkKind::Regtest), + ) + .await; + + let initial_fetch_service_mining_info = + fetch_service_subscriber.get_mining_info().await.unwrap(); + let initial_state_service_mining_info = + state_service_subscriber.get_mining_info().await.unwrap(); + assert_eq!( + initial_fetch_service_mining_info, + initial_state_service_mining_info + ); + + test_manager.local_net.generate_blocks(2).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let final_fetch_service_mining_info = + fetch_service_subscriber.get_mining_info().await.unwrap(); + let final_state_service_mining_info = + state_service_subscriber.get_mining_info().await.unwrap(); + + assert_eq!( + final_fetch_service_mining_info, + final_state_service_mining_info + ); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn difficulty() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + + let initial_fetch_service_difficulty = + fetch_service_subscriber.get_difficulty().await.unwrap(); + let initial_state_service_difficulty = + state_service_subscriber.get_difficulty().await.unwrap(); + assert_eq!( + initial_fetch_service_difficulty, + initial_state_service_difficulty + ); + + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let final_fetch_service_difficulty = + fetch_service_subscriber.get_difficulty().await.unwrap(); + let final_state_service_difficulty = + state_service_subscriber.get_difficulty().await.unwrap(); + assert_eq!( + final_fetch_service_difficulty, + final_state_service_difficulty + ); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_network_sol_ps() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let initial_fetch_service_get_network_sol_ps = fetch_service_subscriber + .get_network_sol_ps(None, None) + .await + .unwrap(); + let initial_state_service_get_network_sol_ps = state_service_subscriber + .get_network_sol_ps(None, None) + .await + .unwrap(); + assert_eq!( + initial_fetch_service_get_network_sol_ps, + initial_state_service_get_network_sol_ps + ); + + test_manager.close().await; + } + + /// A proper test would boot up multiple nodes at the same time, and ask each node + /// for information about its peers. In the current state, this test does nothing. + #[tokio::test(flavor = "multi_thread")] + async fn peer_info() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_peer_info = fetch_service_subscriber.get_peer_info().await.unwrap(); + let state_service_peer_info = state_service_subscriber.get_peer_info().await.unwrap(); + assert_eq!(fetch_service_peer_info, state_service_peer_info); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn block_subsidy_fails_before_first_halving() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + + const BLOCK_LIMIT: u32 = 10; + + for i in 0..BLOCK_LIMIT { + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + let fetch_service_block_subsidy = + fetch_service_subscriber.get_block_subsidy(i).await; + + let state_service_block_subsidy = + state_service_subscriber.get_block_subsidy(i).await; + assert!(fetch_service_block_subsidy.is_err()); + assert!(state_service_block_subsidy.is_err()); + } + } + + mod z { + use zcash_local_net::validator::zebrad::Zebrad; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_no_t_data_regtest() { + state_service_get_block_range_returns_default_pools::( + &ValidatorKind::Zebrad, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_all_pools_regtest() { + state_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtrees_by_index_regtest() { + state_service_z_get_subtrees_by_index::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn subtrees_by_index_testnet() { + state_service_z_get_subtrees_by_index_testnet().await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn treestate_regtest() { + state_service_z_get_treestate::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn treestate_testnet() { + state_service_z_get_treestate_testnet().await; + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn raw_mempool_regtest() { + state_service_get_raw_mempool::(&ValidatorKind::Zebrad).await; + } + + /// `getmempoolinfo` computed from local Broadcast state + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn get_mempool_info() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + None, + ) + .await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + // Let the broadcaster/subscribers observe the new tx + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + // Call the internal mempool info method + let info = state_service_subscriber.get_mempool_info().await.unwrap(); + + // Derive expected values directly from the current mempool contents + let entries = state_service_subscriber.mempool.get_mempool().await; + + assert_eq!(entries.len() as u64, info.size); + assert!(info.size >= 1); + + let expected_bytes: u64 = entries + .iter() + .map(|(_, v)| v.serialized_tx.as_ref().as_ref().len() as u64) + .sum(); + + let expected_key_heap_bytes: u64 = + entries.iter().map(|(k, _)| k.txid.capacity() as u64).sum(); + + let expected_usage = expected_bytes.saturating_add(expected_key_heap_bytes); + + assert!(info.bytes > 0); + assert_eq!(info.bytes, expected_bytes); + + assert!(info.usage >= info.bytes); + assert_eq!(info.usage, expected_usage); + + // Optional: when exactly one tx, its serialized length must equal `bytes` + if info.size == 1 { + let (_, mem_value) = entries[0].clone(); + assert_eq!( + mem_value.serialized_tx.as_ref().as_ref().len() as u64, + expected_bytes + ); + } + + test_manager.close().await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn raw_mempool_testnet() { + state_service_get_raw_mempool_testnet().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn block_object_regtest() { + state_service_get_block_object(&ValidatorKind::Zebrad, None, NetworkKind::Regtest) + .await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn block_object_testnet() { + state_service_get_block_object( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + NetworkKind::Testnet, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn block_raw_regtest() { + state_service_get_block_raw(&ValidatorKind::Zebrad, None, NetworkKind::Regtest).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn block_raw_testnet() { + state_service_get_block_raw( + &ValidatorKind::Zebrad, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + NetworkKind::Testnet, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn address_balance_regtest() { + state_service_get_address_balance::(&ValidatorKind::Zebrad).await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + async fn address_balance_testnet() { + state_service_get_address_balance_testnet().await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test] + async fn address_deltas_testnet() { + state_service_get_address_deltas_testnet().await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn address_deltas() { + address_deltas::main().await; + } + + mod address_deltas; + } + + pub(crate) mod lightwallet_indexer { + use futures::StreamExt as _; + use zaino_proto::proto::{ + service::{ + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, + TxFilter, + }, + utils::pool_types_into_i32_vec, + }; + use zebra_rpc::methods::{GetAddressTxIdsRequest, GetBlock}; + + use super::*; + #[tokio::test(flavor = "multi_thread")] + async fn get_latest_block() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_block = + dbg!(fetch_service_subscriber.get_latest_block().await.unwrap()); + let state_service_block = + dbg!(state_service_subscriber.get_latest_block().await.unwrap()); + assert_eq!(fetch_service_block, state_service_block); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let second_block_by_height = BlockId { + height: 2, + hash: vec![], + }; + let fetch_service_block_by_height = fetch_service_subscriber + .get_block(second_block_by_height.clone()) + .await + .unwrap(); + let state_service_block_by_height = dbg!(state_service_subscriber + .get_block(second_block_by_height) + .await + .unwrap()); + assert_eq!(fetch_service_block_by_height, state_service_block_by_height); + + let hash = fetch_service_block_by_height.hash; + let second_block_by_hash = BlockId { height: 0, hash }; + let fetch_service_block_by_hash = dbg!(fetch_service_subscriber + .get_block(second_block_by_hash.clone()) + .await + .unwrap()); + let state_service_block_by_hash = dbg!(state_service_subscriber + .get_block(second_block_by_hash) + .await + .unwrap()); + assert_eq!(fetch_service_block_by_hash, state_service_block_by_hash); + assert_eq!(state_service_block_by_hash, state_service_block_by_height) + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_header() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + + const BLOCK_LIMIT: u32 = 10; + + for i in 0..BLOCK_LIMIT { + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let block = fetch_service_subscriber + .z_get_block(i.to_string(), Some(1)) + .await + .unwrap(); + + let block_hash = match block { + GetBlock::Object(block) => block.hash(), + GetBlock::Raw(_) => panic!("Expected block object"), + }; + + let fetch_service_get_block_header = fetch_service_subscriber + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + + let state_service_block_header_response = state_service_subscriber + .get_block_header(block_hash.to_string(), false) + .await + .unwrap(); + assert_eq!( + fetch_service_get_block_header, + state_service_block_header_response + ); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_tree_state() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let second_treestate_by_height = BlockId { + height: 2, + hash: vec![], + }; + let fetch_service_treestate_by_height = dbg!(fetch_service_subscriber + .get_tree_state(second_treestate_by_height.clone()) + .await + .unwrap()); + let state_service_treestate_by_height = dbg!(state_service_subscriber + .get_tree_state(second_treestate_by_height) + .await + .unwrap()); + assert_eq!( + fetch_service_treestate_by_height, + state_service_treestate_by_height + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_subtree_roots() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let sapling_subtree_roots_request = GetSubtreeRootsArg { + start_index: 2, + shielded_protocol: 0, + max_entries: 0, + }; + let fetch_service_sapling_subtree_roots = fetch_service_subscriber + .get_subtree_roots(sapling_subtree_roots_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + let state_service_sapling_subtree_roots = state_service_subscriber + .get_subtree_roots(sapling_subtree_roots_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + assert_eq!( + fetch_service_sapling_subtree_roots, + state_service_sapling_subtree_roots + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_latest_tree_state() { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_treestate = fetch_service_subscriber + .get_latest_tree_state() + .await + .unwrap(); + let state_service_treestate = dbg!(state_service_subscriber + .get_latest_tree_state() + .await + .unwrap()); + assert_eq!(fetch_service_treestate, state_service_treestate); + } + + async fn get_block_range_helper(nullifiers_only: bool) { + let ( + test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + false, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 6, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start = Some(BlockId { + height: 2, + hash: vec![], + }); + let end = Some(BlockId { + height: 5, + hash: vec![], + }); + let request = BlockRange { + start, + end, + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; + if nullifiers_only { + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range_nullifiers(request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + let state_service_get_block_range = state_service_subscriber + .get_block_range_nullifiers(request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + } else { + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + let state_service_get_block_range = state_service_subscriber + .get_block_range(request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_range_full() { + get_block_range_helper(false).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_range_nullifiers() { + get_block_range_helper(true).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_transaction() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 2, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let block = BlockId { + height: 103, + hash: vec![], + }; + let state_service_block_by_height = state_service_subscriber + .get_block(block.clone()) + .await + .unwrap(); + let coinbase_tx = state_service_block_by_height.vtx.first().unwrap(); + let hash = coinbase_tx.txid.clone(); + let request = TxFilter { + block: None, + index: 0, + hash, + }; + let fetch_service_raw_transaction = fetch_service_subscriber + .get_transaction(request.clone()) + .await + .unwrap(); + let state_service_raw_transaction = state_service_subscriber + .get_transaction(request) + .await + .unwrap(); + assert_eq!(fetch_service_raw_transaction, state_service_raw_transaction); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_taddress_txids() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let clients = test_manager.clients.take().unwrap(); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let state_service_taddress_txids = state_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![taddr.clone()], + Some(2), + Some(5), + )) + .await + .unwrap(); + dbg!(&state_service_taddress_txids); + let fetch_service_taddress_txids = fetch_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new(vec![taddr], Some(2), Some(5))) + .await + .unwrap(); + dbg!(&fetch_service_taddress_txids); + assert_eq!(fetch_service_taddress_txids, state_service_taddress_txids); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_address_utxos_stream() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + let request = GetAddressUtxosArg { + addresses: vec![taddr], + start_height: 2, + max_entries: 3, + }; + let state_service_address_utxos_streamed = state_service_subscriber + .get_address_utxos_stream(request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + let fetch_service_address_utxos_streamed = fetch_service_subscriber + .get_address_utxos_stream(request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + assert_eq!( + fetch_service_address_utxos_streamed, + state_service_address_utxos_streamed + ); + clients.faucet.sync_and_await().await.unwrap(); + assert_eq!( + fetch_service_address_utxos_streamed.first().unwrap().txid, + clients + .faucet + .transaction_summaries(false) + .await + .unwrap() + .txids()[1] + .as_ref() + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_address_utxos() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + let request = GetAddressUtxosArg { + addresses: vec![taddr], + start_height: 2, + max_entries: 3, + }; + let state_service_address_utxos = state_service_subscriber + .get_address_utxos(request.clone()) + .await + .unwrap(); + let fetch_service_address_utxos = fetch_service_subscriber + .get_address_utxos(request) + .await + .unwrap(); + assert_eq!(fetch_service_address_utxos, state_service_address_utxos); + clients.faucet.sync_and_await().await.unwrap(); + assert_eq!( + fetch_service_address_utxos + .address_utxos + .first() + .unwrap() + .txid, + clients + .faucet + .transaction_summaries(false) + .await + .unwrap() + .txids()[1] + .as_ref() + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_taddress_balance() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let clients = test_manager.clients.take().unwrap(); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let state_service_taddress_balance = state_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr.clone()], + }) + .await + .unwrap(); + let fetch_service_taddress_balance = fetch_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr], + }) + .await + .unwrap(); + assert_eq!( + fetch_service_taddress_balance, + state_service_taddress_balance + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_transparent_data_from_compact_block_when_requested() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let clients = test_manager.clients.take().unwrap(); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let state_service_taddress_balance = state_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr.clone()], + }) + .await + .unwrap(); + let fetch_service_taddress_balance = fetch_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr], + }) + .await + .unwrap(); + assert_eq!( + fetch_service_taddress_balance, + state_service_taddress_balance + ); + + let chain_height = state_service_subscriber + .get_latest_block() + .await + .unwrap() + .height; + + // NOTE / TODO: Zaino can not currently serve non standard script types in compact blocks, + // because of this it does not return the script pub key for the coinbase transaction of the + // genesis block. We should decide whether / how to fix this. + // + // For this reason this test currently does not fetch the genesis block. + // + // Issue: https://github.com/zingolabs/zaino/issues/818 + // + // To see bug update start height of get_block_range to 0. + let compact_block_range = state_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: 1, + hash: Vec::new(), + }), + end: Some(BlockId { + height: chain_height, + hash: Vec::new(), + }), + pool_types: pool_types_into_i32_vec( + [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(), + ), + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + for cb in compact_block_range.into_iter() { + for tx in cb.vtx { + dbg!(&tx); + // script pub key of this transaction is not empty + assert!(!tx.vout.first().unwrap().script_pub_key.is_empty()); + } + } + } + } +} diff --git a/integration-tests/tests/test_vectors.rs b/integration-tests/tests/test_vectors.rs new file mode 100644 index 000000000..2d82e91b2 --- /dev/null +++ b/integration-tests/tests/test_vectors.rs @@ -0,0 +1,847 @@ +//! Holds code used to build test vector data for unit tests. These tests should not be run by default or in CI. + +use anyhow::Context; +use core2::io::{self, Read, Write}; +use futures::TryFutureExt as _; +use std::fs; +use std::fs::File; +use std::io::BufReader; +use std::io::BufWriter; +use std::path::Path; +use std::sync::Arc; +use tower::{Service, ServiceExt as _}; +use zaino_fetch::chain::transaction::FullTransaction; +use zaino_fetch::chain::utils::ParseFromSlice; +use zaino_state::read_u32_le; +use zaino_state::read_u64_le; +use zaino_state::write_u32_le; +use zaino_state::write_u64_le; +use zaino_state::CompactSize; +#[allow(deprecated)] +use zaino_state::StateService; +use zaino_state::ZcashIndexer; +use zaino_state::{ChainWork, IndexedBlock}; +use zaino_testutils::from_inputs; +use zaino_testutils::test_vectors::transactions::get_test_vectors; +use zaino_testutils::{TestManager, ValidatorKind}; +use zcash_local_net::validator::zebrad::Zebrad; +use zebra_chain::serialization::{ZcashDeserialize, ZcashSerialize}; +use zebra_rpc::methods::GetAddressUtxos; +use zebra_rpc::methods::{GetAddressBalanceRequest, GetAddressTxIdsRequest, GetBlockTransaction}; +use zebra_state::HashOrHeight; +use zebra_state::{ReadRequest, ReadResponse}; + +macro_rules! expected_read_response { + ($response:ident, $expected_variant:ident) => { + match $response { + ReadResponse::$expected_variant(inner) => inner, + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + } + }; +} + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "Not a test! Used to build test vector data for zaino_state::chain_index unit tests."] +#[allow(deprecated)] +async fn create_200_block_regtest_chain_vectors() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + + let state_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + let faucet_taddr = clients.get_faucet_address("transparent").await; + let faucet_saddr = clients.get_faucet_address("sapling").await; + let faucet_uaddr = clients.get_faucet_address("unified").await; + + let recipient_taddr = clients.get_recipient_address("transparent").await; + let recipient_saddr = clients.get_recipient_address("sapling").await; + let recipient_uaddr = clients.get_recipient_address("unified").await; + + clients.faucet.sync_and_await().await.unwrap(); + + // *** Mine 100 blocks to finalise first block reward *** + test_manager + .generate_blocks_and_poll_indexer(100, &state_service_subscriber) + .await; + + // *** Build 100 block chain holding transparent, sapling, and orchard transactions *** + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_uaddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + clients.recipient.sync_and_await().await.unwrap(); + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_uaddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_taddr.as_str(), 200_000, None)], + ) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + clients.recipient.sync_and_await().await.unwrap(); + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + clients + .recipient + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_uaddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + for _i in 0..48 { + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + clients.recipient.sync_and_await().await.unwrap(); + + tokio::time::sleep(std::time::Duration::from_millis(2000)).await; + let chain_height = dbg!(state_service_subscriber.chain_height().await.unwrap()); + if chain_height.0 >= 200 { + break; + } + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + clients + .recipient + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_uaddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_taddr.as_str(), 200_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_uaddr.as_str(), 200_000, None)], + ) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + // sync wallets + clients.faucet.sync_and_await().await.unwrap(); + clients.recipient.sync_and_await().await.unwrap(); + + tokio::time::sleep(std::time::Duration::from_millis(2000)).await; + let chain_height = dbg!(state_service_subscriber.chain_height().await.unwrap()); + if chain_height.0 >= 200 { + break; + } + + // create transactions + clients + .faucet + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + clients + .recipient + .quick_shield(zip32::AccountId::ZERO) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_saddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_uaddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + from_inputs::quick_send( + &mut clients.recipient, + vec![(faucet_saddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + + // Generate block + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + } + tokio::time::sleep(std::time::Duration::from_millis(10000)).await; + + // *** Fetch chain data *** + let chain_height = dbg!(state_service_subscriber.chain_height().await.unwrap()); + + //fetch and build block data + let block_data = { + let mut data = Vec::new(); + let mut parent_chain_work = ChainWork::from_u256(0.into()); + let mut parent_block_sapling_tree_size: u32 = 0; + let mut parent_block_orchard_tree_size: u32 = 0; + + for height in 0..=chain_height.0 { + let (chain_block, zebra_block, block_roots, block_treestate) = { + // Fetch block data + let (_hash, tx, _trees) = state_service_subscriber + .z_get_block(height.to_string(), Some(1)) + .await + .and_then(|response| match response { + zebra_rpc::methods::GetBlock::Raw(_) => { + Err(zaino_state::StateServiceError::Custom( + "Found transaction of `Raw` type, expected only `Object` types." + .to_string(), + )) + } + zebra_rpc::methods::GetBlock::Object(block_obj) => Ok(( + block_obj.hash() , + block_obj.tx().iter() + .map(|item| { + match item { + GetBlockTransaction::Hash(h) => Ok(h.0.to_vec()), + GetBlockTransaction::Object(_) => Err( + zaino_state::StateServiceError::Custom( + "Found transaction of `Object` type, expected only `Hash` types." + .to_string(), + ), + ), + } + }) + .collect::, _>>() + .unwrap(), + (block_obj.trees().sapling(), block_obj.trees().orchard()), + )), + }) + .unwrap(); + + let block_data = state_service_subscriber + .z_get_block(height.to_string(), Some(0)) + .await + .and_then(|response| match response { + zebra_rpc::methods::GetBlock::Object { .. } => { + Err(zaino_state::StateServiceError::Custom( + "Found transaction of `Object` type, expected only `Raw` types." + .to_string(), + )) + } + zebra_rpc::methods::GetBlock::Raw(block_hex) => Ok(block_hex), + }) + .unwrap(); + + let mut state = state_service_subscriber.read_state_service.clone(); + let (sapling_root, orchard_root) = { + let (sapling_tree_response, orchard_tree_response) = futures::future::join( + state.clone().call(zebra_state::ReadRequest::SaplingTree( + HashOrHeight::Height(zebra_chain::block::Height(height)), + )), + state.clone().call(zebra_state::ReadRequest::OrchardTree( + HashOrHeight::Height(zebra_chain::block::Height(height)), + )), + ) + .await; + let (sapling_tree, orchard_tree) = match ( + //TODO: Better readstateservice error handling + sapling_tree_response.unwrap(), + orchard_tree_response.unwrap(), + ) { + ( + zebra_state::ReadResponse::SaplingTree(saptree), + zebra_state::ReadResponse::OrchardTree(orctree), + ) => (saptree, orctree), + (_, _) => panic!("Bad response"), + }; + + ( + sapling_tree + .as_deref() + .map(|tree| (tree.root(), tree.count())) + .unwrap(), + orchard_tree + .as_deref() + .map(|tree| (tree.root(), tree.count())) + .unwrap(), + ) + }; + + let sapling_treestate = match zebra_chain::parameters::NetworkUpgrade::Sapling + .activation_height(&state_service_subscriber.network().to_zebra_network()) + { + Some(activation_height) if height >= activation_height.0 => Some( + state + .ready() + .and_then(|service| { + service.call(ReadRequest::SaplingTree(HashOrHeight::Height( + zebra_chain::block::Height(height), + ))) + }) + .await + .unwrap(), + ), + _ => Some(zebra_state::ReadResponse::SaplingTree(Some(Arc::new( + zebra_chain::sapling::tree::NoteCommitmentTree::default(), + )))), + } + .and_then(|sap_response| { + expected_read_response!(sap_response, SaplingTree) + .map(|tree| tree.to_rpc_bytes()) + }) + .unwrap(); + let orchard_treestate = match zebra_chain::parameters::NetworkUpgrade::Nu5 + .activation_height(&state_service_subscriber.network().to_zebra_network()) + { + Some(activation_height) if height >= activation_height.0 => Some( + state + .ready() + .and_then(|service| { + service.call(ReadRequest::OrchardTree(HashOrHeight::Height( + zebra_chain::block::Height(height), + ))) + }) + .await + .unwrap(), + ), + _ => Some(zebra_state::ReadResponse::OrchardTree(Some(Arc::new( + zebra_chain::orchard::tree::NoteCommitmentTree::default(), + )))), + } + .and_then(|orch_response| { + expected_read_response!(orch_response, OrchardTree) + .map(|tree| tree.to_rpc_bytes()) + }) + .unwrap(); + + // Build block data + let full_block = zaino_fetch::chain::block::FullBlock::parse_from_hex( + block_data.as_ref(), + Some(display_txids_to_server(tx.clone())), + ) + .unwrap(); + + let chain_block = IndexedBlock::try_from(( + full_block.clone(), + parent_chain_work, + sapling_root.0.into(), + orchard_root.0.into(), + parent_block_sapling_tree_size, + parent_block_orchard_tree_size, + )) + .unwrap(); + + let zebra_block = + zebra_chain::block::Block::zcash_deserialize(block_data.as_ref()).unwrap(); + + let block_roots = ( + sapling_root.0, + chain_block.commitment_tree_data().sizes().sapling() as u64, + orchard_root.0, + chain_block.commitment_tree_data().sizes().orchard() as u64, + ); + + let block_treestate = (sapling_treestate, orchard_treestate); + + (chain_block, zebra_block, block_roots, block_treestate) + }; + + // Update parent block + parent_block_sapling_tree_size = chain_block.commitment_tree_data().sizes().sapling(); + parent_block_orchard_tree_size = chain_block.commitment_tree_data().sizes().orchard(); + parent_chain_work = *chain_block.index().chainwork(); + + data.push((height, zebra_block, block_roots, block_treestate)); + } + data + }; + + // Fetch and build wallet addr transparent data + let faucet_data = { + let faucet_txids = state_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![faucet_taddr.clone()], + Some(0), + Some(chain_height.0), + )) + .await + .unwrap(); + + let faucet_utxos = state_service_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![faucet_taddr.clone()])) + .await + .unwrap(); + + let faucet_balance = state_service_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![faucet_taddr.clone()])) + .await + .unwrap() + .balance(); + + (faucet_txids, faucet_utxos, faucet_balance) + }; + + // fetch recipient addr transparent data + let recipient_data = { + let recipient_txids = state_service_subscriber + .get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![recipient_taddr.clone()], + Some(0), + Some(chain_height.0), + )) + .await + .unwrap(); + + let recipient_utxos = state_service_subscriber + .z_get_address_utxos(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap(); + + let recipient_balance = state_service_subscriber + .z_get_address_balance(GetAddressBalanceRequest::new(vec![recipient_taddr.clone()])) + .await + .unwrap() + .balance(); + + (recipient_txids, recipient_utxos, recipient_balance) + }; + + // *** Save chain vectors to disk *** + + let vec_dir = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("vectors_tmp"); + if vec_dir.exists() { + fs::remove_dir_all(&vec_dir).unwrap(); + } + + write_vectors_to_file(&vec_dir, &block_data, &faucet_data, &recipient_data).unwrap(); + + // *** Read data from files to validate write format. + + let (re_blocks, re_faucet, re_recipient) = read_vectors_from_file(&vec_dir).unwrap(); + + for ((h_orig, zebra_orig, roots_orig, trees_orig), (h_new, zebra_new, roots_new, trees_new)) in + block_data.iter().zip(re_blocks.iter()) + { + assert_eq!(h_orig, h_new, "height mismatch at block {h_orig}"); + assert_eq!( + zebra_orig, zebra_new, + "zebra_chain::block::Block serialisation mismatch at height {h_orig}" + ); + assert_eq!( + roots_orig, roots_new, + "block root serialisation mismatch at height {h_orig}" + ); + assert_eq!( + trees_orig, trees_new, + "block treestate serialisation mismatch at height {h_orig}" + ); + } + + assert_eq!(faucet_data, re_faucet, "faucet tuple mismatch"); + assert_eq!(recipient_data, re_recipient, "recipient tuple mismatch"); +} + +/// Test-only helper: takes big-endian hex‐encoded txids (`Vec>`) +/// and returns them as little-endian raw-byte vectors. +fn display_txids_to_server(txids: Vec>) -> Vec> { + txids + .into_iter() + .map(|mut t| { + t.reverse(); + t + }) + .collect() +} + +#[allow(clippy::type_complexity)] +pub fn write_vectors_to_file>( + base_dir: P, + block_data: &[( + u32, + zebra_chain::block::Block, + ( + zebra_chain::sapling::tree::Root, + u64, + zebra_chain::orchard::tree::Root, + u64, + ), + (Vec, Vec), + )], + faucet_data: &(Vec, Vec, u64), + recipient_data: &(Vec, Vec, u64), +) -> io::Result<()> { + let base = base_dir.as_ref(); + fs::create_dir_all(base)?; + + // zcash_blocks.dat + let mut zb_out = BufWriter::new(File::create(base.join("zcash_blocks.dat"))?); + for (h, zcash_block, _roots, _treestate) in block_data { + write_u32_le(&mut zb_out, *h)?; + let mut bytes = Vec::new(); + zcash_block.zcash_serialize(&mut bytes)?; + CompactSize::write(&mut zb_out, bytes.len())?; + zb_out.write_all(&bytes)?; + } + + // tree_roots.dat + let mut tr_out = BufWriter::new(File::create(base.join("tree_roots.dat"))?); + for (h, _blocks, (sapling_root, sapling_size, orchard_root, orchard_size), _treestate) in + block_data + { + write_u32_le(&mut tr_out, *h)?; + tr_out.write_all(&<[u8; 32]>::from(*sapling_root))?; + write_u64_le(&mut tr_out, *sapling_size)?; + tr_out.write_all(&<[u8; 32]>::from(*orchard_root))?; + write_u64_le(&mut tr_out, *orchard_size)?; + } + + // tree_states.dat + let mut ts_out = BufWriter::new(File::create(base.join("tree_states.dat"))?); + for (h, _blocks, _roots, (sapling_treestate, orchard_treestate)) in block_data { + write_u32_le(&mut ts_out, *h)?; + // Write length-prefixed treestate bytes (variable length) + CompactSize::write(&mut ts_out, sapling_treestate.len())?; + ts_out.write_all(sapling_treestate)?; + CompactSize::write(&mut ts_out, orchard_treestate.len())?; + ts_out.write_all(orchard_treestate)?; + } + + // faucet_data.json + serde_json::to_writer_pretty(File::create(base.join("faucet_data.json"))?, faucet_data)?; + + // recipient_data.json + serde_json::to_writer_pretty( + File::create(base.join("recipient_data.json"))?, + recipient_data, + )?; + + Ok(()) +} + +#[allow(clippy::type_complexity)] +pub fn read_vectors_from_file>( + base_dir: P, +) -> io::Result<( + Vec<( + u32, + zebra_chain::block::Block, + ( + zebra_chain::sapling::tree::Root, + u64, + zebra_chain::orchard::tree::Root, + u64, + ), + (Vec, Vec), + )>, + (Vec, Vec, u64), + (Vec, Vec, u64), +)> { + let base = base_dir.as_ref(); + + // zebra_blocks.dat + let mut zebra_blocks = Vec::<(u32, zebra_chain::block::Block)>::new(); + { + let mut r = BufReader::new(File::open(base.join("zcash_blocks.dat"))?); + loop { + let height = match read_u32_le(&mut r) { + Ok(h) => h, + Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, + Err(e) => return Err(e), + }; + + let len: usize = CompactSize::read_t(&mut r)?; + let mut buf = vec![0u8; len]; + r.read_exact(&mut buf)?; + + let zcash_block = zebra_chain::block::Block::zcash_deserialize(&*buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + zebra_blocks.push((height, zcash_block)); + } + } + + // tree_roots.dat + let mut blocks_and_roots = Vec::with_capacity(zebra_blocks.len()); + { + let mut r = BufReader::new(File::open(base.join("tree_roots.dat"))?); + for (height, zebra_block) in zebra_blocks { + let h2 = read_u32_le(&mut r)?; + if height != h2 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "height mismatch in tree_roots.dat", + )); + } + let mut sapling_bytes = [0u8; 32]; + r.read_exact(&mut sapling_bytes)?; + let sapling_root = zebra_chain::sapling::tree::Root::try_from(sapling_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let sapling_size = read_u64_le(&mut r)?; + + let mut orchard_bytes = [0u8; 32]; + r.read_exact(&mut orchard_bytes)?; + let orchard_root = zebra_chain::orchard::tree::Root::try_from(orchard_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let orchard_size = read_u64_le(&mut r)?; + + blocks_and_roots.push(( + height, + zebra_block, + (sapling_root, sapling_size, orchard_root, orchard_size), + )); + } + } + + // tree_states.dat + let mut full_data = Vec::with_capacity(blocks_and_roots.len()); + { + let mut r = BufReader::new(File::open(base.join("tree_states.dat"))?); + for (height, zebra_block, roots) in blocks_and_roots { + let h2 = read_u32_le(&mut r)?; + if height != h2 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "height mismatch in tree_states.dat", + )); + } + + let sapling_len: usize = CompactSize::read_t(&mut r)?; + let mut sapling_state = vec![0u8; sapling_len]; + r.read_exact(&mut sapling_state)?; + + let orchard_len: usize = CompactSize::read_t(&mut r)?; + let mut orchard_state = vec![0u8; orchard_len]; + r.read_exact(&mut orchard_state)?; + + full_data.push((height, zebra_block, roots, (sapling_state, orchard_state))); + } + } + + // faucet_data.json + let faucet = serde_json::from_reader(File::open(base.join("faucet_data.json"))?) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + // recipient_data.json + let recipient = serde_json::from_reader(File::open(base.join("recipient_data.json"))?) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + Ok((full_data, faucet, recipient)) +} + +#[tokio::test(flavor = "multi_thread")] +async fn pre_v4_txs_parsing() -> anyhow::Result<()> { + let test_vectors = get_test_vectors(); + + for (i, test_vector) in test_vectors.iter().filter(|v| v.version < 4).enumerate() { + let description = test_vector.description; + let version = test_vector.version; + let raw_tx = test_vector.tx.clone(); + let txid = test_vector.txid; + // todo!: add an 'is_coinbase' method to the transaction struct to check thid + let _is_coinbase = test_vector.is_coinbase; + let has_sapling = test_vector.has_sapling; + let has_orchard = test_vector.has_orchard; + let transparent_inputs = test_vector.transparent_inputs; + let transparent_outputs = test_vector.transparent_outputs; + + let deserialized_tx = + FullTransaction::parse_from_slice(&raw_tx, Some(vec![txid.to_vec()]), None) + .with_context(|| { + format!("Failed to deserialize transaction with description: {description:?}") + })?; + + let tx = deserialized_tx.1; + + assert_eq!( + tx.version(), + version, + "Version mismatch for transaction #{i} ({description})" + ); + assert_eq!( + tx.tx_id(), + txid, + "TXID mismatch for transaction #{i} ({description})" + ); + // Check Sapling spends (v4+ transactions) + if version >= 4 { + assert_eq!( + !tx.shielded_spends().is_empty(), + has_sapling != 0, + "Sapling spends mismatch for transaction #{i} ({description})" + ); + } else { + // v1-v3 transactions should not have Sapling spends + assert!( + tx.shielded_spends().is_empty(), + "Transaction #{i} ({description}) version {version} should not have Sapling spends" + ); + } + + // Check Orchard actions (v5+ transactions) + if version >= 5 { + assert_eq!( + !tx.orchard_actions().is_empty(), + has_orchard != 0, + "Orchard actions mismatch for transaction #{i} ({description})" + ); + } else { + // v1-v4 transactions should not have Orchard actions + assert!( + tx.orchard_actions().is_empty(), + "Transaction #{i} ({description}) version {version} should not have Orchard actions" + ); + } + assert_eq!( + !tx.transparent_inputs().is_empty(), + transparent_inputs > 0, + "Transparent inputs presence mismatch for transaction #{i} ({description})" + ); + assert_eq!( + !tx.transparent_outputs().is_empty(), + transparent_outputs > 0, + "Transparent outputs presence mismatch for transaction #{i} ({description})" + ); + + // dbg!(tx); + } + Ok(()) +} diff --git a/integration-tests/tests/wallet_to_validator.rs b/integration-tests/tests/wallet_to_validator.rs new file mode 100644 index 000000000..615203162 --- /dev/null +++ b/integration-tests/tests/wallet_to_validator.rs @@ -0,0 +1,717 @@ +//! Holds wallet-to-validator tests for Zaino. + +#![forbid(unsafe_code)] + +use zaino_fetch::jsonrpsee::connector::test_node_and_return_url; +use zaino_state::LightWalletService; +use zaino_state::ZcashIndexer; +use zaino_state::ZcashService; +use zaino_testutils::from_inputs; +use zaino_testutils::TestManager; +use zaino_testutils::ValidatorExt; +use zaino_testutils::ValidatorKind; +use zainodlib::config::ZainodConfig; +use zainodlib::error::IndexerError; +use zip32::AccountId; + +async fn connect_to_node_get_info_for_validator(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.do_info().await; + clients.recipient.do_info().await; + + test_manager.close().await; +} + +async fn send_to_orchard(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await.to_string(); + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .total_orchard_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; +} + +async fn send_to_sapling(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_zaddr = clients.get_recipient_address("sapling").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_zaddr, 250_000, None)]) + .await + .unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .total_sapling_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; +} + +async fn send_to_transparent(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_taddr = clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_taddr, 250_000, None)]) + .await + .unwrap(); + + test_manager.generate_blocks_and_poll(1).await; + + let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + println!("\n\nFetching Chain Height!\n"); + + let height = dbg!(fetch_service.get_blockchain_info().await.unwrap().blocks.0); + + println!("\n\nFetching Tx From Unfinalized Chain!\n"); + + let unfinalised_transactions = fetch_service + .get_address_txids( + vec![clients.get_recipient_address("transparent").await], + height, + height, + ) + .await + .unwrap(); + + dbg!(unfinalised_transactions.clone()); + test_manager.generate_blocks_and_poll(99).await; + + println!("\n\nFetching Tx From Finalized Chain!\n"); + + let finalised_transactions = fetch_service + .get_address_txids( + vec![clients.get_recipient_address("transparent").await], + height, + height, + ) + .await + .unwrap(); + + dbg!(finalised_transactions.clone()); + + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000 + ); + + assert_eq!(unfinalised_transactions, finalised_transactions); + // test_manager.local_net.print_stdout(); + + test_manager.close().await; +} + +async fn send_to_all(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + test_manager.generate_blocks_and_poll(2).await; + clients.faucet.sync_and_await().await.unwrap(); + + // "Create" 3 orchard notes in faucet. + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + let recipient_zaddr = clients.get_recipient_address("sapling").await; + let recipient_taddr = clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_zaddr, 250_000, None)]) + .await + .unwrap(); + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_taddr, 250_000, None)]) + .await + .unwrap(); + test_manager.generate_blocks_and_poll(100).await; + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .total_orchard_balance + .unwrap() + .into_u64(), + 250_000 + ); + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .total_sapling_balance + .unwrap() + .into_u64(), + 250_000 + ); + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; +} + +async fn shield_for_validator(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_taddr = clients.get_recipient_address("transparent").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_taddr, 250_000, None)]) + .await + .unwrap(); + test_manager.generate_blocks_and_poll(100).await; + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64(), + 250_000 + ); + + clients + .recipient + .quick_shield(AccountId::ZERO) + .await + .unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .total_orchard_balance + .unwrap() + .into_u64(), + 235_000 + ); + + test_manager.close().await; +} + +async fn monitor_unverified_mempool_for_validator(validator: &ValidatorKind) +where + V: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let txid_1 = from_inputs::quick_send( + &mut clients.faucet, + vec![( + &zaino_testutils::get_base_address_macro!(&mut clients.recipient, "unified"), + 250_000, + None, + )], + ) + .await + .unwrap(); + let txid_2 = from_inputs::quick_send( + &mut clients.faucet, + vec![( + &zaino_testutils::get_base_address_macro!(&mut clients.recipient, "sapling"), + 250_000, + None, + )], + ) + .await + .unwrap(); + + println!("\n\nStarting Mempool!\n"); + clients.recipient.wallet.write().await.clear_all(); + clients.recipient.sync_and_await().await.unwrap(); + + // test_manager.local_net.print_stdout(); + + let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + &test_manager.full_node_rpc_listen_address.to_string(), + None, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await + .unwrap(), + "xxxxxx".to_string(), + "xxxxxx".to_string(), + ) + .unwrap(); + + println!("\n\nFetching Raw Mempool!\n"); + let mempool_txids = fetch_service.get_raw_mempool().await.unwrap(); + dbg!(txid_1); + dbg!(txid_2); + dbg!(mempool_txids.clone()); + + println!("\n\nFetching Mempool Tx 1!\n"); + let _transaction_1 = dbg!( + fetch_service + .get_raw_transaction(mempool_txids.transactions[0].clone(), Some(1)) + .await + ); + + println!("\n\nFetching Mempool Tx 2!\n"); + let _transaction_2 = dbg!( + fetch_service + .get_raw_transaction(mempool_txids.transactions[1].clone(), Some(1)) + .await + ); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .unconfirmed_orchard_balance + .unwrap() + .into_u64(), + 250_000 + ); + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .unconfirmed_sapling_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.generate_blocks_and_poll(1).await; + + println!("\n\nFetching Mined Tx 1!\n"); + let _transaction_1 = dbg!( + fetch_service + .get_raw_transaction(mempool_txids.transactions[0].clone(), Some(1)) + .await + ); + + println!("\n\nFetching Mined Tx 2!\n"); + let _transaction_2 = dbg!( + fetch_service + .get_raw_transaction(mempool_txids.transactions[1].clone(), Some(1)) + .await + ); + + clients.recipient.sync_and_await().await.unwrap(); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_orchard_balance + .unwrap() + .into_u64(), + 250_000 + ); + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_orchard_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; +} + +mod zcashd { + #[allow(deprecated)] + use zaino_state::FetchService; + use zcash_local_net::validator::zcashd::Zcashd; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn connect_to_node_get_info() { + connect_to_node_get_info_for_validator::(&ValidatorKind::Zcashd) + .await; + } + + mod sent_to { + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn orchard() { + send_to_orchard::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn sapling() { + send_to_sapling::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn transparent() { + send_to_transparent::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn all() { + send_to_all::(&ValidatorKind::Zcashd).await; + } + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn shield() { + shield_for_validator::(&ValidatorKind::Zcashd).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn monitor_unverified_mempool() { + monitor_unverified_mempool_for_validator::(&ValidatorKind::Zcashd) + .await; + } +} + +mod zebrad { + use super::*; + + mod fetch_service { + use zcash_local_net::validator::zebrad::Zebrad; + + use super::*; + #[allow(deprecated)] + use zaino_state::FetchService; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn connect_to_node_get_info() { + connect_to_node_get_info_for_validator::(&ValidatorKind::Zebrad) + .await; + } + mod send_to { + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn sapling() { + send_to_sapling::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn orchard() { + send_to_orchard::(&ValidatorKind::Zebrad).await; + } + + /// Bug documented in https://github.com/zingolabs/zaino/issues/145. + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn transparent() { + send_to_transparent::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn all() { + send_to_all::(&ValidatorKind::Zebrad).await; + } + } + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn shield() { + shield_for_validator::(&ValidatorKind::Zebrad).await; + } + /// Bug documented in https://github.com/zingolabs/zaino/issues/144. + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn monitor_unverified_mempool() { + monitor_unverified_mempool_for_validator::( + &ValidatorKind::Zebrad, + ) + .await; + } + } + + mod state_service { + use zcash_local_net::validator::zebrad::Zebrad; + + use super::*; + #[allow(deprecated)] + use zaino_state::StateService; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn connect_to_node_get_info() { + connect_to_node_get_info_for_validator::(&ValidatorKind::Zebrad) + .await; + } + mod send_to { + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn sapling() { + send_to_sapling::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn orchard() { + send_to_orchard::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn transparent() { + send_to_transparent::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn all() { + send_to_all::(&ValidatorKind::Zebrad).await; + } + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn shield() { + shield_for_validator::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + async fn monitor_unverified_mempool() { + monitor_unverified_mempool_for_validator::( + &ValidatorKind::Zebrad, + ) + .await; + } + } +} diff --git a/integration-tests/tests/zebra/get/address_deltas.rs b/integration-tests/tests/zebra/get/address_deltas.rs new file mode 100644 index 000000000..e97c8abb3 --- /dev/null +++ b/integration-tests/tests/zebra/get/address_deltas.rs @@ -0,0 +1,284 @@ +//! Integration tests for the `getaddressdeltas` RPC method. +//! +//! # Test Chain Scenario +//! +//! The test chain is constructed as follows: +//! +//! 1. **Blocks 1-100**: Initial block generation (via `generate_blocks(100)`) +//! 2. **Block 101**: Faucet shields its transparent funds +//! 3. **Block 102**: Faucet sends 250,000 zatoshis to recipient's transparent address +//! 4. **Final state**: Chain height = 102 (approximately, may vary slightly) +//! +//! # Test Constants +//! +//! ## EXPECTED_TX_HEIGHT = 102 +//! The block height where the test transaction (250k zatoshis to recipient) is expected to land. +//! +//! **Invariant**: Must be >= 102 based on setup (100 initial blocks + 1 shield + 1 transaction). +//! +//! ## EXPECTED_CHAIN_TIP = 104 +//! The expected final chain height after all setup operations complete. +//! +//! **Invariants**: +//! - Must be >= EXPECTED_TX_HEIGHT +//! - Small buffer above EXPECTED_TX_HEIGHT to account for any additional blocks +//! - Used as `end` parameter in range queries to ensure we capture the test transaction +//! +//! ## HEIGHT_BEYOND_TIP = 200 +//! A height value intentionally beyond the actual chain tip, used to test height clamping behavior. +//! +//! **Invariant**: Must be > EXPECTED_CHAIN_TIP to properly test that the implementation +//! clamps the requested end height to the actual chain tip. +//! +//! ## NON_EXISTENT_ADDRESS +//! A valid testnet transparent address that is guaranteed to have no deltas in this test chain. +//! Used to verify that queries for non-existent addresses return empty results gracefully. +//! +//! # Modifying Parameters +//! +//! If you need to modify the chain setup: +//! - Changing block generation count requires updating EXPECTED_TX_HEIGHT accordingly +//! - EXPECTED_CHAIN_TIP should always be slightly above the actual final height +//! - HEIGHT_BEYOND_TIP must remain larger than EXPECTED_CHAIN_TIP +//! - Test assertions reference these constants, so they'll automatically adjust + +use super::*; + +// Test constants (see module documentation above for details) +const EXPECTED_TX_HEIGHT: u32 = 102; +const EXPECTED_CHAIN_TIP: u32 = 104; +const HEIGHT_BEYOND_TIP: u32 = 200; +const NON_EXISTENT_ADDRESS: &str = "tmVqEASZxBNKFTbmASZikGa5fPLkd68iJyx"; + +#[allow(deprecated)] // StateService +async fn setup_chain( + test_manager: &mut TestManager, +) -> (String, String) { + let state_service_subscriber = test_manager.service_subscriber.clone().unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + let faucet_taddr = clients.get_faucet_address("transparent").await; + + clients.faucet.sync_and_await().await.unwrap(); + + // Generate blocks and perform transaction + test_manager + .generate_blocks_and_poll_indexer(100, &state_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + + from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &state_service_subscriber) + .await; + + clients.recipient.sync_and_await().await.unwrap(); + + (recipient_taddr, faucet_taddr) +} + +#[allow(deprecated)] +async fn test_simple_query(subscriber: &StateServiceSubscriber, recipient_taddr: &str) { + let params = GetAddressDeltasParams::Address(recipient_taddr.to_string()); + let response = subscriber.get_address_deltas(params).await.unwrap(); + + if let GetAddressDeltasResponse::Simple(address_deltas) = response { + assert!(!address_deltas.is_empty(), "Expected at least one delta"); + let recipient_delta = address_deltas + .iter() + .find(|d| d.height >= EXPECTED_TX_HEIGHT) + .expect("Should find recipient transaction delta"); + assert!( + recipient_delta.height >= EXPECTED_TX_HEIGHT, + "Transaction should be at expected height" + ); + assert_eq!(recipient_delta.index, 0, "Expected output index 0"); + } else { + panic!("Expected Simple variant"); + } +} + +#[allow(deprecated)] +async fn test_filtered_start_zero( + subscriber: &StateServiceSubscriber, + recipient_taddr: &str, + faucet_taddr: &str, +) { + let start_height = 0; + let end_height = EXPECTED_CHAIN_TIP; + + let params = GetAddressDeltasParams::Filtered { + addresses: vec![recipient_taddr.to_string(), faucet_taddr.to_string()], + start: start_height, + end: end_height, + chain_info: true, + }; + let response = subscriber.get_address_deltas(params).await.unwrap(); + + if let GetAddressDeltasResponse::Simple(address_deltas) = response { + assert!( + !address_deltas.is_empty(), + "Expected deltas for both addresses" + ); + assert!( + address_deltas.len() >= 2, + "Expected deltas from multiple addresses" + ); + } else { + panic!("Expected Simple variant"); + } +} + +#[allow(deprecated)] +async fn test_with_chaininfo( + subscriber: &StateServiceSubscriber, + recipient_taddr: &str, + faucet_taddr: &str, +) { + let start_height = 1; + let end_height = EXPECTED_CHAIN_TIP; + + let params = GetAddressDeltasParams::Filtered { + addresses: vec![recipient_taddr.to_string(), faucet_taddr.to_string()], + start: start_height, + end: end_height, + chain_info: true, + }; + let response = subscriber.get_address_deltas(params).await.unwrap(); + + if let GetAddressDeltasResponse::WithChainInfo { deltas, start, end } = response { + assert!(!deltas.is_empty(), "Expected deltas with chain info"); + assert_eq!( + start.height, start_height, + "Start block should match request" + ); + assert_eq!(end.height, end_height, "End block should match request"); + assert!( + start.height < end.height, + "Start height should be less than end height" + ); + } else { + panic!("Expected WithChainInfo variant"); + } +} + +#[allow(deprecated)] +async fn test_height_clamping( + subscriber: &StateServiceSubscriber, + recipient_taddr: &str, + faucet_taddr: &str, +) { + let start_height = 1; + let end_height = HEIGHT_BEYOND_TIP; + + let params = GetAddressDeltasParams::Filtered { + addresses: vec![recipient_taddr.to_string(), faucet_taddr.to_string()], + start: start_height, + end: end_height, + chain_info: true, + }; + let response = subscriber.get_address_deltas(params).await.unwrap(); + + if let GetAddressDeltasResponse::WithChainInfo { deltas, start, end } = response { + assert!(!deltas.is_empty(), "Expected deltas with clamped range"); + assert_eq!(start.height, start_height, "Start should match request"); + assert!( + end.height < end_height, + "End height should be clamped below requested value" + ); + assert!( + end.height <= EXPECTED_CHAIN_TIP, + "End height should not exceed chain tip region" + ); + } else { + panic!("Expected WithChainInfo variant"); + } +} + +#[allow(deprecated)] +async fn test_non_existent_address(subscriber: &StateServiceSubscriber) { + let start_height = 1; + let end_height = HEIGHT_BEYOND_TIP; + + let params = GetAddressDeltasParams::Filtered { + addresses: vec![NON_EXISTENT_ADDRESS.to_string()], + start: start_height, + end: end_height, + chain_info: true, + }; + let response = subscriber.get_address_deltas(params).await.unwrap(); + + if let GetAddressDeltasResponse::WithChainInfo { deltas, start, end } = response { + assert!( + deltas.is_empty(), + "Non-existent address should have no deltas" + ); + assert_eq!( + start.height, start_height, + "Start height should match request" + ); + assert!(end.height > 0, "End height should be set"); + } else { + panic!("Expected WithChainInfo variant"); + } +} + +#[allow(deprecated)] +pub(super) async fn main() { + let ( + mut test_manager, + _fetch_service, + _fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = super::create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + None, + ) + .await; + + let (recipient_taddr, faucet_taddr) = setup_chain(&mut test_manager).await; + + // ============================================================ + // Test 1: Simple address query (single address, no filters) + // ============================================================ + test_simple_query(&state_service_subscriber, &recipient_taddr).await; + + // ============================================================ + // Test 2: Filtered query with start=0 (should return Simple variant) + // ============================================================ + test_filtered_start_zero(&state_service_subscriber, &recipient_taddr, &faucet_taddr).await; + + // ============================================================ + // Test 3: Filtered query with start>0 and chain_info=true + // ============================================================ + test_with_chaininfo(&state_service_subscriber, &recipient_taddr, &faucet_taddr).await; + + // ============================================================ + // Test 4: Height clamping (end beyond chain tip) + // ============================================================ + test_height_clamping(&state_service_subscriber, &recipient_taddr, &faucet_taddr).await; + + // ============================================================ + // Test 5: Non-existent address (should return empty deltas) + // ============================================================ + test_non_existent_address(&state_service_subscriber).await; + + test_manager.close().await; +} diff --git a/makefiles/lints.toml b/makefiles/lints.toml new file mode 100644 index 000000000..87d9d9da0 --- /dev/null +++ b/makefiles/lints.toml @@ -0,0 +1,34 @@ +[tasks.fmt] +command = "cargo" +args = ["fmt", "--all", "--", "--check"] + +[tasks.clippy] +command = "cargo" +args = ["clippy", "--all-targets", "--all-features", "--", "-D", "warnings"] + +[tasks.doc] +env = { RUSTDOCFLAGS = "-D warnings" } +command = "cargo" +args = ["doc", "--no-deps", "--all-features", "--document-private-items"] + +[tasks.lint] +description = "Run all lints. Use as a pre-commit hook." +dependencies = ["fmt", "clippy", "doc"] + +[tasks.toggle-hooks] +description = "Toggles the git config for core.hooksPath" +script = [ + ''' + set -euo pipefail + + current_path=$(git config --local --get core.hooksPath || echo "unset") + new_path=".githooks/" + if [ "$current_path" = "$new_path" ]; then + echo "Disabling custom hooks path" + git config --local --unset core.hooksPath + else + echo "Enabling custom hooks path" + git config --local core.hooksPath $new_path + fi + ''' +] diff --git a/makefiles/notify.toml b/makefiles/notify.toml new file mode 100644 index 000000000..d85509f17 --- /dev/null +++ b/makefiles/notify.toml @@ -0,0 +1,17 @@ +[tasks.notify] +description = "Play a sound notification (used after long-running tasks)" +script_runner = "bash" +script = ''' +SOUNDS="/usr/share/sounds/freedesktop/stereo/complete.oga /usr/share/sounds/freedesktop/stereo/bell.oga" + +for player in paplay pw-play aplay; do + if command -v "$player" &>/dev/null; then + for sound in $SOUNDS; do + [[ -f "$sound" ]] && "$player" "$sound" 2>/dev/null && exit 0 + done + fi +done + +# Terminal bell as last resort +printf '\a' +''' diff --git a/rust-toolchain.toml b/rust-toolchain.toml index f05db1f3d..05dded7f8 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,6 +1,3 @@ [toolchain] -channel = "1.81.0" +channel = "1.92" components = ["rustfmt", "clippy"] - -[profile] -minimal = true diff --git a/test_binaries/bins/.gitinclude b/test_binaries/bins/.gitinclude new file mode 100644 index 000000000..e69de29bb diff --git a/test_environment/Dockerfile b/test_environment/Dockerfile new file mode 100644 index 000000000..60591534d --- /dev/null +++ b/test_environment/Dockerfile @@ -0,0 +1,176 @@ +# syntax=docker/dockerfile:1.4 +# Begin storing hash-object for this file + +######################## +# === GLOBAL ARGS === +######################## +ARG RUST_VERSION +ARG ZCASH_VERSION +ARG ZEBRA_VERSION +ARG UID=1000 +ARG GID=${UID} +ARG USER=container_user +ARG HOME="/home/container_user" +ARG CARGO_HOME="${HOME}/.cargo" +ARG CARGO_TARGET_DIR="${HOME}/target" + +######################## +# === DOWNLOADERS === +######################## + +FROM zfnd/zebra:${ZEBRA_VERSION} AS zebra-downloader +FROM electriccoinco/zcashd:v${ZCASH_VERSION} AS zcashd-downloader + +######################## +# === BUILDERS === +######################## + +FROM rust:${RUST_VERSION}-trixie AS zebra-builder + +ARG ZEBRA_VERSION +RUN apt-get update +RUN apt-get install -y git clang cmake pkg-config libssl-dev protobuf-compiler libstdc++6 +WORKDIR /zebra +RUN git clone https://github.com/ZcashFoundation/zebra . +RUN git checkout "${ZEBRA_VERSION}" +# Use persistent cache for cargo +# RUN --mount=type=cache,target=/usr/local/cargo/registry \ +# --mount=type=cache,target=/usr/local/cargo/git \ +# --mount=type=cache,target=/zebra/target \ +RUN cargo build --release --bin zebrad +RUN cp target/release/zebrad /tmp/zebrad + +FROM debian:trixie AS zcashd-builder + +ARG ZCASH_VERSION +RUN apt-get update && apt-get install -y \ + git build-essential pkg-config libssl-dev automake autoconf \ + libtool bsdmainutils curl + +WORKDIR /zcash +RUN git clone --depth 1 --branch "v${ZCASH_VERSION}" https://github.com/zcash/zcash . +RUN ./zcutil/fetch-params.sh && ./zcutil/build.sh -j$(nproc) +RUN cp src/zcashd src/zcash-cli /tmp/ + +######################## +# === BASE RUNTIME IMAGE === +######################## + +FROM rust:${RUST_VERSION}-trixie AS base + +ARG UID +ARG GID +ARG USER +ARG HOME +ARG CARGO_HOME +ARG CARGO_TARGET_DIR + +ENV CARGO_HOME=${CARGO_HOME} +ENV CARGO_TARGET_DIR=${CARGO_TARGET_DIR} +ENV CARGO_TERM_COLOR=always +ENV PROTOC=/usr/bin/protoc + + +LABEL maintainer="nachog00" +LABEL org.zaino.test-artifacts.versions.rust="${RUST_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + libssl-dev \ + curl \ + libclang-dev \ + build-essential \ + cmake \ + libsnappy-dev \ + zlib1g-dev \ + libbz2-dev \ + liblz4-dev \ + libzstd-dev \ + protobuf-compiler \ + && rm -rf /var/lib/apt/lists/* + +# Create container_user +RUN groupadd --gid ${GID} ${USER} && \ + useradd --uid ${UID} --gid ${GID} --home-dir ${HOME} --create-home ${USER} + +# Install rustup (Rust toolchain manager) and set up the Rust toolchain +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +RUN rustup update stable + +# Create artifacts directory +RUN mkdir -p /home/container_user/artifacts + +# Copy and prepare entrypoint script +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +WORKDIR ${HOME}/zaino +RUN chown -R ${UID}:${GID} ${HOME} +USER ${USER} + +######################## +# === FINAL TARGETS === +######################## + +FROM base AS final-prebuilt +ARG ZCASH_VERSION +ARG ZEBRA_VERSION +LABEL org.zaino.test-artifacts.versions.zcashd="${ZCASH_VERSION}" +LABEL org.zaino.test-artifacts.versions.zebra="${ZEBRA_VERSION}" +USER root +COPY --from=zcashd-downloader /usr/bin/zcashd ${HOME}/artifacts/ +COPY --from=zcashd-downloader /usr/bin/zcash-cli ${HOME}/artifacts/ +COPY --from=zebra-downloader /usr/local/bin/zebrad ${HOME}/artifacts/ +RUN chmod +x ${HOME}/artifacts/zcashd ${HOME}/artifacts/zcash-cli ${HOME}/artifacts/zebrad +RUN cargo install cargo-nextest --locked --verbose --root /usr/local && \ + cargo install rust-script --locked --verbose --root /usr/local + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["/bin/bash"] + +FROM base AS final-zcashd-source +ARG ZCASH_VERSION +ARG ZEBRA_VERSION +LABEL org.zaino.test-artifacts.versions.zcashd="${ZCASH_VERSION}" +LABEL org.zaino.test-artifacts.versions.zebra="${ZEBRA_VERSION}" +USER root +COPY --from=zcashd-builder /tmp/zcashd ${HOME}/artifacts/ +COPY --from=zcashd-builder /tmp/zcash-cli ${HOME}/artifacts/ +COPY --from=zebra-downloader /usr/local/bin/zebrad ${HOME}/artifacts/ +RUN chmod +x ${HOME}/artifacts/zcashd ${HOME}/artifacts/zcash-cli ${HOME}/artifacts/zebrad +RUN cargo install cargo-nextest --locked --verbose --root /usr/local && \ + cargo install rust-script --locked --verbose --root /usr/local + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["/bin/bash"] + +FROM base AS final-zebrad-source +ARG ZCASH_VERSION +ARG ZEBRA_VERSION +LABEL org.zaino.test-artifacts.versions.zcashd="${ZCASH_VERSION}" +LABEL org.zaino.test-artifacts.versions.zebra="${ZEBRA_VERSION}" +USER root +COPY --from=zcashd-downloader /usr/bin/zcashd ${HOME}/artifacts/ +COPY --from=zcashd-downloader /usr/bin/zcash-cli ${HOME}/artifacts/ +COPY --from=zebra-builder /tmp/zebrad ${HOME}/artifacts/ +RUN chmod +x ${HOME}/artifacts/zcashd ${HOME}/artifacts/zcash-cli ${HOME}/artifacts/zebrad +RUN cargo install cargo-nextest --locked --verbose --root /usr/local && \ + cargo install rust-script --locked --verbose --root /usr/local + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["/bin/bash"] + +FROM base AS final-all-source +ARG ZCASH_VERSION +ARG ZEBRA_VERSION +LABEL org.zaino.test-artifacts.versions.zcashd="${ZCASH_VERSION}" +LABEL org.zaino.test-artifacts.versions.zebra="${ZEBRA_VERSION}" +USER root +COPY --from=zcashd-builder /tmp/zcashd ${HOME}/artifacts/ +COPY --from=zcashd-builder /tmp/zcash-cli ${HOME}/artifacts/ +COPY --from=zebra-builder /tmp/zebrad ${HOME}/artifacts/ +RUN chmod +x ${HOME}/artifacts/zcashd ${HOME}/artifacts/zcash-cli ${HOME}/artifacts/zebrad +RUN cargo install cargo-nextest --locked --verbose --root /usr/local && \ + cargo install rust-script --locked --verbose --root /usr/local + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["/bin/bash"] diff --git a/test_environment/entrypoint.sh b/test_environment/entrypoint.sh new file mode 100755 index 000000000..27c1f3c82 --- /dev/null +++ b/test_environment/entrypoint.sh @@ -0,0 +1,79 @@ +#!/bin/bash +set -e + +# Usage: link_to_binaries.sh [REPO_ROOT] [ZCASHD_PATH] [ZEBRAD_PATH] [ZCASH_CLI_PATH] +# +# Arguments: +# REPO_ROOT - Repository root directory (default: /home/container_user/zaino) +# ZCASHD_PATH - Path to zcashd binary (default: /home/container_user/artifacts/zcashd) +# ZEBRAD_PATH - Path to zebrad binary (default: /home/container_user/artifacts/zebrad) +# ZCASH_CLI_PATH - Path to zcash-cli binary (default: /home/container_user/artifacts/zcash-cli) + +# Check if this script is being called with arguments that are actually a command to execute +# If the first argument looks like a command (doesn't start with / or .), then skip the setup +if [ $# -gt 0 ] && [[ ! "$1" =~ ^[/\.] ]]; then + # This is a command, not a path argument - skip setup and execute + exec "$@" +fi + +# Use provided arguments or defaults +REPO_ROOT="${1:-/home/container_user/zaino}" +ZCASHD_PATH="${2:-/home/container_user/artifacts/zcashd}" +ZEBRAD_PATH="${3:-/home/container_user/artifacts/zebrad}" +ZCASH_CLI_PATH="${4:-/home/container_user/artifacts/zcash-cli}" + + +# Try to create necessary directories for cargo if they don't exist +# This helps with permission issues in CI environments +mkdir -p "${HOME}/.cargo" "${HOME}/target" 2>/dev/null || true + +# If running in GitHub Actions, try to ensure workspace is usable +if [ -n "${GITHUB_WORKSPACE}" ] && [ -d "${GITHUB_WORKSPACE}" ]; then + # Try to create .cargo directories if possible + mkdir -p "${GITHUB_WORKSPACE}/.cargo" 2>/dev/null || true + mkdir -p "${GITHUB_WORKSPACE}/target" 2>/dev/null || true +fi + +# Check if test_binaries/bins directory exists and create symlinks if binaries are missing +BINS_DIR="${REPO_ROOT}/test_binaries/bins" + +# Create the bins directory if it doesn't exist +if [ ! -d "$BINS_DIR" ]; then + echo "Creating $BINS_DIR directory..." + mkdir -p "$BINS_DIR" +fi + +echo "Checking for test binaries in $BINS_DIR..." + +# Check and create symlink for zcashd +if [ ! -f "$BINS_DIR/zcashd" ] && [ ! -L "$BINS_DIR/zcashd" ]; then + echo "zcashd not found in $BINS_DIR, creating symlink..." + ln -s "$ZCASHD_PATH" "$BINS_DIR/zcashd" + # Fix ownership to match the directory owner + if [ "$(id -u)" = "0" ]; then + chown --reference="$BINS_DIR" "$BINS_DIR/zcashd" 2>/dev/null || true + fi +fi + +# Check and create symlink for zebrad +if [ ! -f "$BINS_DIR/zebrad" ] && [ ! -L "$BINS_DIR/zebrad" ]; then + echo "zebrad not found in $BINS_DIR, creating symlink..." + ln -s "$ZEBRAD_PATH" "$BINS_DIR/zebrad" + # Fix ownership to match the directory owner + if [ "$(id -u)" = "0" ]; then + chown --reference="$BINS_DIR" "$BINS_DIR/zebrad" 2>/dev/null || true + fi +fi + +# Check and create symlink for zcash-cli +if [ ! -f "$BINS_DIR/zcash-cli" ] && [ ! -L "$BINS_DIR/zcash-cli" ]; then + echo "zcash-cli not found in $BINS_DIR, creating symlink..." + ln -s "$ZCASH_CLI_PATH" "$BINS_DIR/zcash-cli" + # Fix ownership to match the directory owner + if [ "$(id -u)" = "0" ]; then + chown --reference="$BINS_DIR" "$BINS_DIR/zcash-cli" 2>/dev/null || true + fi +fi + +echo "Binary setup complete. Contents of $BINS_DIR:" +ls -la "$BINS_DIR" \ No newline at end of file diff --git a/test_environment/test-docker-permissions.sh b/test_environment/test-docker-permissions.sh new file mode 100755 index 000000000..23aeaa29c --- /dev/null +++ b/test_environment/test-docker-permissions.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# Local Docker permission tests for zaino image. +# These tests verify the entrypoint correctly handles volume mounts +# with various ownership scenarios. +# +# Usage: ./test-docker-permissions.sh [image-name] +# Default image: zaino:test-entrypoint + +set -uo pipefail + +IMAGE="${1:-zaino:test-entrypoint}" +TEST_DIR="/tmp/zaino-docker-tests-$$" +PASSED=0 +FAILED=0 + +cleanup() { + echo "Cleaning up ${TEST_DIR}..." + rm -rf "${TEST_DIR}" +} +trap cleanup EXIT + +mkdir -p "${TEST_DIR}" + +pass() { + echo "✅ $1" + ((PASSED++)) +} + +fail() { + echo "❌ $1" + ((FAILED++)) +} + +run_test() { + local name="$1" + shift + echo "--- Testing: ${name} ---" + if "$@"; then + pass "${name}" + else + fail "${name}" + fi + echo +} + +# Basic smoke tests +run_test "help command" \ + docker run --rm "${IMAGE}" --help + +run_test "version command" \ + docker run --rm "${IMAGE}" --version + +run_test "generate-config command" \ + docker run --rm "${IMAGE}" generate-config + +run_test "start --help command" \ + docker run --rm "${IMAGE}" start --help + +# Volume mount tests - using /app paths +test_config_mount() { + local dir="${TEST_DIR}/config" + mkdir -p "${dir}" + docker run --rm -v "${dir}:/app/config" "${IMAGE}" generate-config + test -f "${dir}/zainod.toml" +} +run_test "config dir mount (/app/config)" test_config_mount + +test_data_mount() { + local dir="${TEST_DIR}/data" + mkdir -p "${dir}" + docker run --rm -v "${dir}:/app/data" "${IMAGE}" --version +} +run_test "data dir mount (/app/data)" test_data_mount + +# File ownership verification +test_file_ownership() { + local dir="${TEST_DIR}/ownership-test" + mkdir -p "${dir}" + docker run --rm -v "${dir}:/app/config" "${IMAGE}" generate-config + # File should be owned by UID 1000 (container_user) + local uid + uid=$(stat -c '%u' "${dir}/zainod.toml" 2>/dev/null || stat -f '%u' "${dir}/zainod.toml") + test "${uid}" = "1000" +} +run_test "files created with correct UID (1000)" test_file_ownership + +# Root-owned directory tests (requires sudo) +if command -v sudo &>/dev/null && sudo -n true 2>/dev/null; then + test_root_owned_mount() { + local dir="${TEST_DIR}/root-owned" + sudo mkdir -p "${dir}" + sudo chown root:root "${dir}" + docker run --rm -v "${dir}:/app/data" "${IMAGE}" --version + # Entrypoint should have chowned it + local uid + uid=$(stat -c '%u' "${dir}" 2>/dev/null || stat -f '%u' "${dir}") + test "${uid}" = "1000" + } + run_test "root-owned dir gets chowned" test_root_owned_mount + + test_root_owned_config_write() { + local dir="${TEST_DIR}/root-config" + sudo mkdir -p "${dir}" + sudo chown root:root "${dir}" + docker run --rm -v "${dir}:/app/config" "${IMAGE}" generate-config + test -f "${dir}/zainod.toml" + } + run_test "write to root-owned config dir" test_root_owned_config_write +else + echo "⚠️ Skipping root-owned tests (sudo not available or requires password)" +fi + +# Read-only config mount +test_readonly_config() { + local dir="${TEST_DIR}/ro-config" + mkdir -p "${dir}" + # First generate a config + docker run --rm -v "${dir}:/app/config" "${IMAGE}" generate-config + # Then mount it read-only and verify we can still run + docker run --rm -v "${dir}:/app/config:ro" "${IMAGE}" --version +} +run_test "read-only config mount" test_readonly_config + +# Summary +echo "=========================================" +echo "Results: ${PASSED} passed, ${FAILED} failed" +echo "=========================================" + +if [[ ${FAILED} -gt 0 ]]; then + exit 1 +fi diff --git a/utils/benchmark-testing.sh b/utils/benchmark-testing.sh new file mode 100755 index 000000000..e168abd3a --- /dev/null +++ b/utils/benchmark-testing.sh @@ -0,0 +1,20 @@ +#!/bin/bash +for i in {5..10} +do + echo "-----------" >> benchmark_for_tests.txt + echo "starting runs for thread count of*************************:" >> benchmark_for_tests.txt + echo $i >> benchmark_for_tests.txt + echo "*****************-----------************" >> benchmark_for_tests.txt + for x in {1..10} + do + echo "-----------" >> benchmark_for_tests.txt + echo "try:" >> benchmark_for_tests.txt + echo $x >> benchmark_for_tests.txt + cargo nextest run --test-threads=$i --cargo-quiet --cargo-quiet --failure-output final --status-level none --final-status-level slow --hide-progress-bar &>> benchmark_for_tests.txt + echo "-----------" >> benchmark_for_tests.txt + done + echo "-----------" >> benchmark_for_tests.txt + echo "ending runs for thread count of:" >> benchmark_for_tests.txt + echo $i >> benchmark_for_tests.txt + echo "-----------" >> benchmark_for_tests.txt +done diff --git a/utils/check_package.sh b/utils/check_package.sh new file mode 100755 index 000000000..9fa1d7bdb --- /dev/null +++ b/utils/check_package.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Check if package name is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo "Example: $0 zaino-state" + exit 1 +fi + +PACKAGE_NAME="$1" + +# Run all cargo commands for the specified package +set -e # Exit on first error + +echo "Running checks for package: $PACKAGE_NAME" + +cargo check --package "$PACKAGE_NAME" && \ +cargo check --all-features --package "$PACKAGE_NAME" && \ +cargo check --tests --package "$PACKAGE_NAME" && \ +cargo check --tests --all-features --package "$PACKAGE_NAME" && \ +cargo fmt --package "$PACKAGE_NAME" && \ +cargo clippy --package "$PACKAGE_NAME" && \ +cargo nextest run --test-threads $(nproc) --package "$PACKAGE_NAME" diff --git a/utils/functions.sh b/utils/functions.sh new file mode 100644 index 000000000..063acfee0 --- /dev/null +++ b/utils/functions.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Shared utility functions for build scripts + +get_docker_hash() { + local git_root + git_root=$(git rev-parse --show-toplevel) + cd "$git_root" + git ls-tree HEAD test_environment | git hash-object --stdin | cut -c1-14 +} \ No newline at end of file diff --git a/utils/get-ci-image-tag.sh b/utils/get-ci-image-tag.sh new file mode 100755 index 000000000..850678271 --- /dev/null +++ b/utils/get-ci-image-tag.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Source shared utility functions +source "$(dirname "${BASH_SOURCE[0]}")/functions.sh" + +# Accepts env vars already loaded in the calling context +main() { + local docker_hash + docker_hash=$(get_docker_hash) + + local tag_vars="RUST_$RUST_VERSION-ZCASH_$ZCASH_VERSION-ZEBRA_$ZEBRA_VERSION-DOCKER_$docker_hash" + local tag + tag=$(echo "$tag_vars" | tr ' ' '\n' | sort | sha256sum | cut -c1-12) + # echo "VERSIONS: $tag_vars" + # echo "TAG: $tag" + echo "$tag_vars" +} + +main "$@" + diff --git a/utils/get-docker-hash.sh b/utils/get-docker-hash.sh new file mode 100755 index 000000000..fdfa33b5d --- /dev/null +++ b/utils/get-docker-hash.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Source shared utility functions +source "$(dirname "${BASH_SOURCE[0]}")/functions.sh" + +# Execute the function and output result +get_docker_hash + + diff --git a/utils/helpers.sh b/utils/helpers.sh new file mode 100755 index 000000000..b705c5422 --- /dev/null +++ b/utils/helpers.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# ------- HELPERS ------------ + +info() { + echo -e "\033[1;36m\033[1m>>> $1\033[0m" +} + +warn() { + echo -e "\033[1;33m\033[1m>>> $1\033[0m" +} + +err() { + echo -e "\033[1;31m\033[1m>>> $1\033[0m" +} + +is_tag() { + [[ "$1" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] +} + +resolve_build_target() { + local zcash="$1" + local zebra="$2" + + if is_tag "$zcash" && is_tag "$zebra"; then + echo "final-prebuilt" + elif ! is_tag "$zcash" && is_tag "$zebra"; then + echo "final-zcashd-source" + elif is_tag "$zcash" && ! is_tag "$zebra"; then + echo "final-zebrad-source" + else + echo "final-all-source" + fi +} + diff --git a/utils/precommit-check.sh b/utils/precommit-check.sh new file mode 100755 index 000000000..2d834848c --- /dev/null +++ b/utils/precommit-check.sh @@ -0,0 +1 @@ +cargo check --all-features && cargo check --tests --all-features && cargo fmt && cargo clippy && ./utils/trailing-whitespace.sh reject diff --git a/utils/trailing-whitespace.sh b/utils/trailing-whitespace.sh new file mode 100755 index 000000000..812e8a779 --- /dev/null +++ b/utils/trailing-whitespace.sh @@ -0,0 +1,73 @@ +#!/bin/bash +set -efuo pipefail + +function main +{ + [ $# -eq 1 ] || usage-error 'expected a single arg' + + # cd to repo dir: + cd "$(git rev-parse --show-toplevel)" + + case "$1" in + fix) fix ;; + reject) reject ;; + *) usage-error "unknown command: $1" ;; + esac +} + +function fix +{ + process-well-known-text-files sed -i 's/ *$//' +} + +function reject +{ + local F="$(mktemp --tmpdir zingolib-trailing-whitespace.XXX)" + + process-well-known-text-files grep -E --with-filename ' +$' \ + | sed 's/$/\\n/' \ + | tee "$F" + + local NOISE="$(cat "$F" | wc -l)" + rm "$F" + + if [ "$NOISE" -eq 0 ] + then + echo 'No trailing whitespace detected.' + else + echo -e '\nRejecting trailing whitespace above.' + exit 1 + fi +} + +function process-well-known-text-files +{ + find . \ + \( -type d \ + \( \ + -name '.git' \ + -o -name 'target' \ + \) \ + -prune \ + \) \ + -o \( \ + -type f \ + \( \ + -name '*.rs' \ + -o -name '*.md' \ + -o -name '*.toml' \ + -o -name '*.yaml' \ + \) \ + -exec "$@" '{}' \; \ + \) +} + +function usage-error +{ + echo "usage error: $*" + echo + echo "usage: $0 ( fix | reject )" + exit 1 +} + +main "$@" diff --git a/zaino-common/Cargo.toml b/zaino-common/Cargo.toml new file mode 100644 index 000000000..b485b8e7b --- /dev/null +++ b/zaino-common/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "zaino-common" +description = "Common types and configurations shared across Zaino crates." +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } + +[dependencies] +# Zebra +zebra-chain = { workspace = true } + +# Serialization +serde = { workspace = true, features = ["derive"] } + +# Error handling +thiserror = { workspace = true } + +# Zingo +zingo_common_components = { workspace = true } diff --git a/zaino-common/src/config.rs b/zaino-common/src/config.rs new file mode 100644 index 000000000..46fa5efae --- /dev/null +++ b/zaino-common/src/config.rs @@ -0,0 +1,4 @@ +pub mod network; +pub mod service; +pub mod storage; +pub mod validator; diff --git a/zaino-common/src/config/network.rs b/zaino-common/src/config/network.rs new file mode 100644 index 000000000..a935eabe9 --- /dev/null +++ b/zaino-common/src/config/network.rs @@ -0,0 +1,334 @@ +//! Network type for Zaino configuration. + +use serde::{Deserialize, Serialize}; +use zebra_chain::parameters::testnet::ConfiguredActivationHeights; + +pub const ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS: ActivationHeights = ActivationHeights { + overwinter: Some(1), + before_overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(2), + nu6: Some(2), + nu6_1: Some(1000), + nu7: None, +}; + +/// Network type for Zaino configuration. +#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(from = "NetworkSerde", into = "NetworkSerde")] +pub enum Network { + /// Mainnet network + Mainnet, + /// Testnet network + Testnet, + /// Regtest network (for local testing) + Regtest(ActivationHeights), +} + +/// Helper type for Network serialization/deserialization. +/// +/// This allows Network to serialize as simple strings ("Mainnet", "Testnet", "Regtest") +/// while the actual Network::Regtest variant carries activation heights internally. +#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +enum NetworkSerde { + Mainnet, + Testnet, + Regtest, +} + +impl From for Network { + fn from(value: NetworkSerde) -> Self { + match value { + NetworkSerde::Mainnet => Network::Mainnet, + NetworkSerde::Testnet => Network::Testnet, + NetworkSerde::Regtest => Network::Regtest(ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS), + } + } +} + +impl From for NetworkSerde { + fn from(value: Network) -> Self { + match value { + Network::Mainnet => NetworkSerde::Mainnet, + Network::Testnet => NetworkSerde::Testnet, + Network::Regtest(_) => NetworkSerde::Regtest, + } + } +} + +/// Configurable activation heights for Regtest and configured Testnets. +/// +/// We use our own type instead of the zebra type +/// as the zebra type is missing a number of useful +/// traits, notably Debug, PartialEq, and Eq +/// +/// This also allows us to define our own set +/// of defaults +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Copy)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct ActivationHeights { + /// Activation height for `BeforeOverwinter` network upgrade. + pub before_overwinter: Option, + /// Activation height for `Overwinter` network upgrade. + pub overwinter: Option, + /// Activation height for `Sapling` network upgrade. + pub sapling: Option, + /// Activation height for `Blossom` network upgrade. + pub blossom: Option, + /// Activation height for `Heartwood` network upgrade. + pub heartwood: Option, + /// Activation height for `Canopy` network upgrade. + pub canopy: Option, + /// Activation height for `NU5` network upgrade. + #[serde(rename = "NU5")] + pub nu5: Option, + /// Activation height for `NU6` network upgrade. + #[serde(rename = "NU6")] + pub nu6: Option, + /// Activation height for `NU6.1` network upgrade. + /// see for info on NU6.1 + #[serde(rename = "NU6.1")] + pub nu6_1: Option, + /// Activation height for `NU7` network upgrade. + #[serde(rename = "NU7")] + pub nu7: Option, +} + +impl Default for ActivationHeights { + fn default() -> Self { + ActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(2), + nu6: Some(2), + nu6_1: Some(2), + nu7: None, + } + } +} + +impl Into for ActivationHeights { + fn into(self) -> zingo_common_components::protocol::ActivationHeights { + zingo_common_components::protocol::ActivationHeightsBuilder::new() + .set_overwinter(self.overwinter) + .set_sapling(self.sapling) + .set_blossom(self.blossom) + .set_heartwood(self.heartwood) + .set_canopy(self.canopy) + .set_nu5(self.nu5) + .set_nu6(self.nu6) + .set_nu6_1(self.nu6_1) + .set_nu7(self.nu7) + .build() + } +} + +impl From for ActivationHeights { + fn from( + ConfiguredActivationHeights { + before_overwinter, + overwinter, + sapling, + blossom, + heartwood, + canopy, + nu5, + nu6, + nu6_1, + nu7, + }: ConfiguredActivationHeights, + ) -> Self { + Self { + before_overwinter, + overwinter, + sapling, + blossom, + heartwood, + canopy, + nu5, + nu6, + nu6_1, + nu7, + } + } +} +impl From for ConfiguredActivationHeights { + fn from( + ActivationHeights { + before_overwinter, + overwinter, + sapling, + blossom, + heartwood, + canopy, + nu5, + nu6, + nu6_1, + nu7, + }: ActivationHeights, + ) -> Self { + Self { + before_overwinter, + overwinter, + sapling, + blossom, + heartwood, + canopy, + nu5, + nu6, + nu6_1, + nu7, + } + } +} + +impl From for ActivationHeights { + fn from( + activation_heights: zingo_common_components::protocol::ActivationHeights + ) -> Self { + ActivationHeights { + before_overwinter: activation_heights.overwinter(), + overwinter: activation_heights.overwinter(), + sapling: activation_heights.sapling(), + blossom: activation_heights.blossom(), + heartwood: activation_heights.heartwood(), + canopy: activation_heights.canopy(), + nu5: activation_heights.nu5(), + nu6: activation_heights.nu6(), + nu6_1: activation_heights.nu6_1(), + nu7: activation_heights.nu7(), + } + } +} + +impl Network { + /// Convert to Zebra's network type for internal use (alias for to_zebra_default). + pub fn to_zebra_network(&self) -> zebra_chain::parameters::Network { + self.into() + } + + /// Get the standard regtest activation heights used by Zaino. + pub fn zaino_regtest_heights() -> ConfiguredActivationHeights { + ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + nu6_1: None, + nu7: None, + } + } + + /// Determines if we should wait for the server to fully sync. Used for testing + /// + /// - Mainnet/Testnet: Skip sync (false) because we don't want to sync real chains in tests + /// - Regtest: Enable sync (true) because regtest is local and fast to sync + pub fn wait_on_server_sync(&self) -> bool { + match self { + Network::Mainnet | Network::Testnet => false, // Real networks - don't try to sync the whole chain + Network::Regtest(_) => true, // Local network - safe and fast to sync + } + } + + pub fn from_network_kind_and_activation_heights( + network: &zebra_chain::parameters::NetworkKind, + activation_heights: &ActivationHeights, + ) -> Self { + match network { + zebra_chain::parameters::NetworkKind::Mainnet => Network::Mainnet, + zebra_chain::parameters::NetworkKind::Testnet => Network::Testnet, + zebra_chain::parameters::NetworkKind::Regtest => Network::Regtest(*activation_heights), + } + } +} + +impl From for Network { + fn from(value: zebra_chain::parameters::Network) -> Self { + match value { + zebra_chain::parameters::Network::Mainnet => Network::Mainnet, + zebra_chain::parameters::Network::Testnet(parameters) => { + if parameters.is_regtest() { + let mut activation_heights = ActivationHeights { + before_overwinter: None, + overwinter: None, + sapling: None, + blossom: None, + heartwood: None, + canopy: None, + nu5: None, + nu6: None, + nu6_1: None, + nu7: None, + }; + for (height, upgrade) in parameters.activation_heights().iter() { + match upgrade { + zebra_chain::parameters::NetworkUpgrade::Genesis => (), + zebra_chain::parameters::NetworkUpgrade::BeforeOverwinter => { + activation_heights.before_overwinter = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Overwinter => { + activation_heights.overwinter = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Sapling => { + activation_heights.sapling = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Blossom => { + activation_heights.blossom = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Heartwood => { + activation_heights.heartwood = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Canopy => { + activation_heights.canopy = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Nu5 => { + activation_heights.nu5 = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Nu6 => { + activation_heights.nu6 = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Nu6_1 => { + activation_heights.nu6_1 = Some(height.0) + } + zebra_chain::parameters::NetworkUpgrade::Nu7 => { + activation_heights.nu7 = Some(height.0) + } + } + } + Network::Regtest(activation_heights) + } else { + Network::Testnet + } + } + } + } +} + +impl From for zebra_chain::parameters::Network { + fn from(val: Network) -> Self { + match val { + Network::Regtest(activation_heights) => zebra_chain::parameters::Network::new_regtest( + Into::::into(activation_heights).into(), + ), + Network::Testnet => zebra_chain::parameters::Network::new_default_testnet(), + Network::Mainnet => zebra_chain::parameters::Network::Mainnet, + } + } +} + +impl From<&Network> for zebra_chain::parameters::Network { + fn from(val: &Network) -> Self { + (*val).into() + } +} diff --git a/zaino-common/src/config/service.rs b/zaino-common/src/config/service.rs new file mode 100644 index 000000000..2be271855 --- /dev/null +++ b/zaino-common/src/config/service.rs @@ -0,0 +1,22 @@ +//! Service-level configuration shared across Zaino services. + +/// Service-level configuration for timeouts and channels. +/// +/// This configuration is used by multiple Zaino services that need to configure +/// RPC timeouts and channel buffer sizes. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct ServiceConfig { + /// Service RPC timeout in seconds + pub timeout: u32, + /// Service RPC maximum channel size + pub channel_size: u32, +} + +impl Default for ServiceConfig { + fn default() -> Self { + Self { + timeout: 30, + channel_size: 32, + } + } +} diff --git a/zaino-common/src/config/storage.rs b/zaino-common/src/config/storage.rs new file mode 100644 index 000000000..79f6446c3 --- /dev/null +++ b/zaino-common/src/config/storage.rs @@ -0,0 +1,92 @@ +//! Storage configuration types shared across Zaino services. + +use std::path::PathBuf; + +use crate::xdg::resolve_path_with_xdg_cache_defaults; + +/// Cache configuration for DashMaps. +/// +/// Used by the mempool and BlockCache non-finalized state (FetchService backend). +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] +pub struct CacheConfig { + /// Capacity of the DashMaps used for caching. + pub capacity: usize, + /// Power of 2 for number of shards (e.g., 4 means 16 shards). + /// + /// The actual shard count will be 2^shard_power. + /// Valid range is typically 0-8 (1 to 256 shards). + /// Must be greater than 0. + pub shard_power: u8, +} + +impl CacheConfig { + /// Get the actual number of shards (2^shard_power) + pub fn shard_count(&self) -> u32 { + // // 'a< Self { + Self { + capacity: 10000, // Default capacity + shard_power: 4, // Default to 16 shards + } + } +} + +/// Database size limit in gigabytes. +#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(transparent)] +pub struct DatabaseSize(pub usize); + +impl Default for DatabaseSize { + fn default() -> Self { + DatabaseSize(384) // Default to 384 GB + } +} + +impl DatabaseSize { + /// Convert to bytes. + pub fn to_byte_count(&self) -> usize { + self.0 * 1024 * 1024 * 1024 + } +} + +/// Database configuration. +/// +/// Configures the file path and size limits for persistent storage +/// used by Zaino services. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] +pub struct DatabaseConfig { + /// Database file path. + pub path: PathBuf, + /// Database size limit. Defaults to 128 GB. + #[serde(default)] + pub size: DatabaseSize, +} + +impl Default for DatabaseConfig { + fn default() -> Self { + Self { + path: resolve_path_with_xdg_cache_defaults("zaino"), + size: DatabaseSize::default(), + } + } +} + +/// Storage configuration combining cache and database settings. +/// +/// This is used by services that need both in-memory caching and persistent storage. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize, Default)] +pub struct StorageConfig { + /// Cache configuration. Uses defaults if not specified in TOML. + #[serde(default)] + pub cache: CacheConfig, + /// Database configuration + pub database: DatabaseConfig, +} diff --git a/zaino-common/src/config/validator.rs b/zaino-common/src/config/validator.rs new file mode 100644 index 000000000..c8ffcbd2a --- /dev/null +++ b/zaino-common/src/config/validator.rs @@ -0,0 +1,44 @@ +//! Validator type for Zaino configuration. + +// use serde::{Deserialize, Serialize}; +// use zebra_chain::parameters::testnet::ConfiguredActivationHeights; +use std::path::PathBuf; + +/// Validator (full-node) connection settings. +/// +/// Configures how Zaino connects to the backing validator (Zebra or Zcashd). +#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(default)] +pub struct ValidatorConfig { + /// Validator gRPC listen address (Zebra only). + /// + /// Must be a "private" address as defined in IETF RFC 1918 (IPv4) or RFC 4193 (IPv6). + /// Cookie or user/password authentication is recommended for non-localhost addresses. + pub validator_grpc_listen_address: Option, + /// Validator JSON-RPC listen address. + /// + /// Supports hostname:port or ip:port format. + /// Must be a "private" address as defined in IETF RFC 1918 (IPv4) or RFC 4193 (IPv6). + pub validator_jsonrpc_listen_address: String, + /// Path to the validator cookie file for cookie-based authentication. + /// + /// When set, enables cookie authentication instead of user/password. + pub validator_cookie_path: Option, + /// Validator RPC username for user/password authentication. + pub validator_user: Option, + /// Validator RPC password for user/password authentication. + pub validator_password: Option, +} + +/// Required by `#[serde(default)]` to fill missing fields when deserializing partial TOML configs. +impl Default for ValidatorConfig { + fn default() -> Self { + Self { + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), + } + } +} diff --git a/zaino-common/src/lib.rs b/zaino-common/src/lib.rs new file mode 100644 index 000000000..934ef27d5 --- /dev/null +++ b/zaino-common/src/lib.rs @@ -0,0 +1,26 @@ +//! Common types and configurations shared across Zaino crates. +//! +//! This crate provides shared configuration types, network abstractions, +//! and common utilities used across the Zaino blockchain indexer ecosystem. + +pub mod config; +pub mod net; +pub mod probing; +pub mod status; +pub mod xdg; + +// Re-export network utilities +pub use net::{resolve_socket_addr, try_resolve_address, AddressResolution}; + +// Re-export commonly used config types at crate root for backward compatibility. +// This allows existing code using `use zaino_common::Network` to continue working. +pub use config::network::{ActivationHeights, Network, ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS}; +pub use config::service::ServiceConfig; +pub use config::storage::{CacheConfig, DatabaseConfig, DatabaseSize, StorageConfig}; +pub use config::validator::ValidatorConfig; + +// Keep submodule access available for more specific imports if needed +pub use config::network; +pub use config::service; +pub use config::storage; +pub use config::validator; diff --git a/zaino-common/src/net.rs b/zaino-common/src/net.rs new file mode 100644 index 000000000..d53912b14 --- /dev/null +++ b/zaino-common/src/net.rs @@ -0,0 +1,329 @@ +//! Network utilities for Zaino. + +use std::net::{SocketAddr, ToSocketAddrs}; + +/// Result of attempting to resolve an address string. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AddressResolution { + /// Successfully resolved to a socket address. + Resolved(SocketAddr), + /// Address appears to be a valid hostname:port format but DNS lookup failed. + /// This is acceptable for deferred resolution (e.g., Docker DNS). + UnresolvedHostname { + /// The original address string. + address: String, + /// The DNS error message. + error: String, + }, + /// Address format is invalid (missing port, garbage input, etc.). + /// This should always be treated as an error. + InvalidFormat { + /// The original address string. + address: String, + /// Description of what's wrong with the format. + reason: String, + }, +} + +impl AddressResolution { + /// Returns the resolved address if available. + pub fn resolved(&self) -> Option { + match self { + AddressResolution::Resolved(addr) => Some(*addr), + _ => None, + } + } + + /// Returns true if the address was successfully resolved. + pub fn is_resolved(&self) -> bool { + matches!(self, AddressResolution::Resolved(_)) + } + + /// Returns true if the address has a valid format but couldn't be resolved. + /// This is acceptable for deferred resolution scenarios like Docker DNS. + pub fn is_unresolved_hostname(&self) -> bool { + matches!(self, AddressResolution::UnresolvedHostname { .. }) + } + + /// Returns true if the address format is invalid. + pub fn is_invalid_format(&self) -> bool { + matches!(self, AddressResolution::InvalidFormat { .. }) + } +} + +/// Validates that an address string has a valid format (host:port). +/// +/// This performs basic format validation without DNS lookup: +/// - Must contain exactly one `:` separator (or be IPv6 format `[...]:port`) +/// - Port must be a valid number +/// - Host part must not be empty +fn validate_address_format(address: &str) -> Result<(), String> { + let address = address.trim(); + + if address.is_empty() { + return Err("Address cannot be empty".to_string()); + } + + // Handle IPv6 format: [::1]:port + if address.starts_with('[') { + let Some(bracket_end) = address.find(']') else { + return Err("IPv6 address missing closing bracket".to_string()); + }; + + if bracket_end + 1 >= address.len() { + return Err("Missing port after IPv6 address".to_string()); + } + + let after_bracket = &address[bracket_end + 1..]; + if !after_bracket.starts_with(':') { + return Err("Expected ':' after IPv6 address bracket".to_string()); + } + + let port_str = &after_bracket[1..]; + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + return Ok(()); + } + + // Handle IPv4/hostname format: host:port + let parts: Vec<&str> = address.rsplitn(2, ':').collect(); + if parts.len() != 2 { + return Err("Missing port (expected format: 'host:port')".to_string()); + } + + let port_str = parts[0]; + let host = parts[1]; + + if host.is_empty() { + return Err("Host cannot be empty".to_string()); + } + + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + Ok(()) +} + +/// Attempts to resolve an address string, returning detailed information about the result. +/// +/// This function distinguishes between: +/// - Successfully resolved addresses +/// - Valid hostname:port format that failed DNS lookup (acceptable for Docker DNS) +/// - Invalid address format (always an error) +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::{try_resolve_address, AddressResolution}; +/// +/// // IP:port format resolves immediately +/// let result = try_resolve_address("127.0.0.1:8080"); +/// assert!(result.is_resolved()); +/// +/// // Invalid format is detected +/// let result = try_resolve_address("no-port-here"); +/// assert!(result.is_invalid_format()); +/// ``` +pub fn try_resolve_address(address: &str) -> AddressResolution { + // First validate the format + if let Err(reason) = validate_address_format(address) { + return AddressResolution::InvalidFormat { + address: address.to_string(), + reason, + }; + } + + // Try parsing as SocketAddr first (handles ip:port format directly) + if let Ok(addr) = address.parse::() { + return AddressResolution::Resolved(addr); + } + + // Fall back to DNS resolution for hostname:port format + match address.to_socket_addrs() { + Ok(mut addrs) => { + let addrs_vec: Vec = addrs.by_ref().collect(); + + // Prefer IPv4 if available (more compatible, especially in Docker) + if let Some(ipv4_addr) = addrs_vec.iter().find(|addr| addr.is_ipv4()) { + AddressResolution::Resolved(*ipv4_addr) + } else if let Some(addr) = addrs_vec.into_iter().next() { + AddressResolution::Resolved(addr) + } else { + AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: "DNS returned no addresses".to_string(), + } + } + } + Err(e) => AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: e.to_string(), + }, + } +} + +/// Resolves an address string to a [`SocketAddr`]. +/// +/// Accepts both IP:port format (e.g., "127.0.0.1:8080") and hostname:port format +/// (e.g., "zebra:18232" for Docker DNS resolution). +/// +/// When both IPv4 and IPv6 addresses are available, IPv4 is preferred. +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::resolve_socket_addr; +/// +/// // IP:port format +/// let addr = resolve_socket_addr("127.0.0.1:8080").unwrap(); +/// assert_eq!(addr.port(), 8080); +/// +/// // Hostname resolution (localhost) +/// let addr = resolve_socket_addr("localhost:8080").unwrap(); +/// assert!(addr.ip().is_loopback()); +/// ``` +/// +/// # Errors +/// +/// Returns an error if: +/// - The address format is invalid (missing port, invalid IP, etc.) +/// - The hostname cannot be resolved (DNS lookup failure) +/// - No addresses are returned from resolution +pub fn resolve_socket_addr(address: &str) -> Result { + match try_resolve_address(address) { + AddressResolution::Resolved(addr) => Ok(addr), + AddressResolution::UnresolvedHostname { address, error } => Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("Cannot resolve hostname '{address}': {error}"), + )), + AddressResolution::InvalidFormat { address, reason } => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("Invalid address format '{address}': {reason}"), + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::Ipv4Addr; + + // === Format validation tests (no DNS, always reliable) === + + #[test] + fn test_resolve_ipv4_address() { + let result = resolve_socket_addr("127.0.0.1:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip().to_string(), "127.0.0.1"); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_ipv4_any_address() { + let result = resolve_socket_addr("0.0.0.0:18232"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip(), Ipv4Addr::UNSPECIFIED); + assert_eq!(addr.port(), 18232); + } + + #[test] + fn test_resolve_ipv6_localhost() { + let result = resolve_socket_addr("[::1]:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert!(addr.is_ipv6()); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_missing_port() { + let result = try_resolve_address("127.0.0.1"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_string() { + let result = try_resolve_address(""); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_invalid_port() { + let result = try_resolve_address("127.0.0.1:invalid"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_port_too_large() { + let result = try_resolve_address("127.0.0.1:99999"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_host() { + let result = try_resolve_address(":8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_port() { + let result = try_resolve_address("[::1]"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_bracket() { + let result = try_resolve_address("[::1:8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_valid_hostname_format() { + // This hostname has valid format but won't resolve + let result = try_resolve_address("nonexistent-host.invalid:8080"); + // Should be unresolved hostname, not invalid format + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname, got {:?}", + result + ); + } + + #[test] + fn test_docker_style_hostname_format() { + // Docker-style hostnames have valid format + let result = try_resolve_address("zebra:18232"); + // Can't resolve in unit tests, but format is valid + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname for Docker-style hostname, got {:?}", + result + ); + } + + // === DNS-dependent tests (may be flaky in CI) === + + #[test] + #[ignore = "DNS-dependent: may be flaky in CI environments without reliable DNS"] + fn test_resolve_hostname_localhost() { + // "localhost" should resolve to 127.0.0.1 or ::1 + let result = resolve_socket_addr("localhost:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.port(), 8080); + assert!(addr.ip().is_loopback()); + } + + #[test] + #[ignore = "DNS-dependent: behavior varies by system DNS configuration"] + fn test_resolve_invalid_hostname_dns() { + // This test verifies DNS lookup failure for truly invalid hostnames + let result = resolve_socket_addr("this-hostname-does-not-exist.invalid:8080"); + assert!(result.is_err()); + } +} diff --git a/zaino-common/src/probing.rs b/zaino-common/src/probing.rs new file mode 100644 index 000000000..d6825bc2f --- /dev/null +++ b/zaino-common/src/probing.rs @@ -0,0 +1,71 @@ +//! Service health and readiness probing traits. +//! +//! This module provides decoupled traits for health and readiness checks, +//! following the Kubernetes probe model: +//! +//! - [`Liveness`]: Is the component alive and functioning? +//! - [`Readiness`]: Is the component ready to serve requests? +//! - [`VitalsProbe`]: Combined trait for components supporting both probes. +//! +//! These traits are intentionally simple (returning `bool`) and decoupled +//! from any specific status type, allowing flexible implementation across +//! different components. +//! +//! # Example +//! +//! ``` +//! use zaino_common::probing::{Liveness, Readiness, VitalsProbe}; +//! +//! struct MyService { +//! connected: bool, +//! synced: bool, +//! } +//! +//! impl Liveness for MyService { +//! fn is_live(&self) -> bool { +//! self.connected +//! } +//! } +//! +//! impl Readiness for MyService { +//! fn is_ready(&self) -> bool { +//! self.connected && self.synced +//! } +//! } +//! +//! // VitalsProbe is automatically implemented via blanket impl +//! fn check_service(service: &impl VitalsProbe) { +//! println!("Live: {}, Ready: {}", service.is_live(), service.is_ready()); +//! } +//! ``` + +/// Liveness probe: Is this component alive and functioning? +/// +/// A component is considered "live" if it is not in a broken or +/// unrecoverable state. This corresponds to Kubernetes liveness probes. +/// +/// Failure to be live typically means the component should be restarted. +pub trait Liveness { + /// Returns `true` if the component is alive and functioning. + fn is_live(&self) -> bool; +} + +/// Readiness probe: Is this component ready to serve requests? +/// +/// A component is considered "ready" if it can accept and process +/// requests. This corresponds to Kubernetes readiness probes. +/// +/// A component may be live but not ready (e.g., still syncing). +pub trait Readiness { + /// Returns `true` if the component is ready to serve requests. + fn is_ready(&self) -> bool; +} + +/// Combined vitals probe for components supporting both liveness and readiness. +/// +/// This trait is automatically implemented for any type that implements +/// both [`Liveness`] and [`Readiness`]. +pub trait VitalsProbe: Liveness + Readiness {} + +// Blanket implementation: anything with Liveness + Readiness gets VitalsProbe +impl VitalsProbe for T {} diff --git a/zaino-common/src/status.rs b/zaino-common/src/status.rs new file mode 100644 index 000000000..e544c6b5f --- /dev/null +++ b/zaino-common/src/status.rs @@ -0,0 +1,146 @@ +//! Service status types and traits. +//! +//! This module provides: +//! - [`StatusType`]: An enum representing service operational states +//! - [`Status`]: A trait for types that can report their status +//! +//! Types implementing [`Status`] automatically gain [`Liveness`](crate::probing::Liveness) +//! and [`Readiness`](crate::probing::Readiness) implementations via blanket impls. + +use std::fmt; + +use crate::probing::{Liveness, Readiness}; + +/// Status of a service component. +/// +/// Represents the operational state of a component. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum StatusType { + /// Running initial startup routine. + Spawning = 0, + /// Back-end process is currently syncing. + Syncing = 1, + /// Process is ready. + Ready = 2, + /// Process is busy working. + Busy = 3, + /// Running shutdown routine. + Closing = 4, + /// Offline. + Offline = 5, + /// Non-critical errors. + RecoverableError = 6, + /// Critical errors. + CriticalError = 7, +} + +impl From for StatusType { + fn from(value: usize) -> Self { + match value { + 0 => StatusType::Spawning, + 1 => StatusType::Syncing, + 2 => StatusType::Ready, + 3 => StatusType::Busy, + 4 => StatusType::Closing, + 5 => StatusType::Offline, + 6 => StatusType::RecoverableError, + _ => StatusType::CriticalError, + } + } +} + +impl From for usize { + fn from(status: StatusType) -> Self { + status as usize + } +} + +impl fmt::Display for StatusType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let status_str = match self { + StatusType::Spawning => "Spawning", + StatusType::Syncing => "Syncing", + StatusType::Ready => "Ready", + StatusType::Busy => "Busy", + StatusType::Closing => "Closing", + StatusType::Offline => "Offline", + StatusType::RecoverableError => "RecoverableError", + StatusType::CriticalError => "CriticalError", + }; + write!(f, "{status_str}") + } +} + +impl StatusType { + /// Returns the corresponding status symbol for the StatusType. + pub fn get_status_symbol(&self) -> String { + let (symbol, color_code) = match self { + // Yellow Statuses + StatusType::Syncing => ("\u{1F7E1}", "\x1b[33m"), + // Cyan Statuses + StatusType::Spawning | StatusType::Busy => ("\u{1F7E1}", "\x1b[36m"), + // Green Status + StatusType::Ready => ("\u{1F7E2}", "\x1b[32m"), + // Grey Statuses + StatusType::Closing | StatusType::Offline => ("\u{26AB}", "\x1b[90m"), + // Red Error Statuses + StatusType::RecoverableError | StatusType::CriticalError => ("\u{1F534}", "\x1b[31m"), + }; + + format!("{}{}{}", color_code, symbol, "\x1b[0m") + } + + /// Look at two statuses, and return the more 'severe' of the two. + pub fn combine(self, other: StatusType) -> StatusType { + match (self, other) { + // If either is Closing, return Closing. + (StatusType::Closing, _) | (_, StatusType::Closing) => StatusType::Closing, + // If either is Offline or CriticalError, return CriticalError. + (StatusType::Offline, _) + | (_, StatusType::Offline) + | (StatusType::CriticalError, _) + | (_, StatusType::CriticalError) => StatusType::CriticalError, + // If either is RecoverableError, return RecoverableError. + (StatusType::RecoverableError, _) | (_, StatusType::RecoverableError) => { + StatusType::RecoverableError + } + // If either is Spawning, return Spawning. + (StatusType::Spawning, _) | (_, StatusType::Spawning) => StatusType::Spawning, + // If either is Syncing, return Syncing. + (StatusType::Syncing, _) | (_, StatusType::Syncing) => StatusType::Syncing, + // Otherwise, return Ready. + _ => StatusType::Ready, + } + } + + /// Returns `true` if this status indicates the component is alive (liveness probe). + pub fn is_live(self) -> bool { + !matches!(self, StatusType::Offline | StatusType::CriticalError) + } + + /// Returns `true` if this status indicates the component is ready to serve (readiness probe). + pub fn is_ready(self) -> bool { + matches!(self, StatusType::Ready | StatusType::Busy) + } +} + +/// Trait for types that can report their [`StatusType`]. +/// +/// Implementing this trait automatically provides [`Liveness`] and [`Readiness`] +/// implementations via blanket impls. +pub trait Status { + /// Returns the current status of this component. + fn status(&self) -> StatusType; +} + +impl Liveness for T { + fn is_live(&self) -> bool { + self.status().is_live() + } +} + +impl Readiness for T { + fn is_ready(&self) -> bool { + self.status().is_ready() + } +} diff --git a/zaino-common/src/xdg.rs b/zaino-common/src/xdg.rs new file mode 100644 index 000000000..4a530cecc --- /dev/null +++ b/zaino-common/src/xdg.rs @@ -0,0 +1,203 @@ +//! XDG Base Directory utilities for consistent path resolution. +//! +//! This module provides a centralized policy for resolving default paths +//! following the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/latest/). +//! +//! # Resolution Policy +//! +//! Paths are resolved in the following order: +//! 1. Check the XDG environment variable (e.g., `XDG_CONFIG_HOME`) +//! 2. Fall back to `$HOME/{subdir}` (e.g., `$HOME/.config`) +//! 3. Fall back to `/tmp/zaino/{subdir}` if HOME is not set +//! +//! # Example +//! +//! ``` +//! use zaino_common::xdg::{resolve_path_with_xdg_cache_defaults, resolve_path_with_xdg_config_defaults}; +//! +//! // Resolves to $XDG_CONFIG_HOME/zaino/zainod.toml, or ~/.config/zaino/zainod.toml +//! let config_path = resolve_path_with_xdg_config_defaults("zaino/zainod.toml"); +//! +//! // Resolves to $XDG_CACHE_HOME/zaino, or ~/.cache/zaino +//! let cache_path = resolve_path_with_xdg_cache_defaults("zaino"); +//! ``` + +use std::path::PathBuf; + +/// XDG Base Directory categories. +/// +/// Each variant corresponds to an XDG environment variable and its +/// standard fallback location relative to `$HOME`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum XdgDir { + /// `XDG_CONFIG_HOME` - User configuration files. + /// + /// Default: `$HOME/.config` + Config, + + /// `XDG_CACHE_HOME` - Non-essential cached data. + /// + /// Default: `$HOME/.cache` + Cache, + + /// `XDG_RUNTIME_DIR` - Runtime files (sockets, locks, cookies). + /// + /// Per XDG spec, there is no standard default if unset. + /// We fall back to `/tmp` for practical usability. + Runtime, + // /// `XDG_DATA_HOME` - User data files. + // /// + // /// Default: `$HOME/.local/share` + // Data, + + // /// `XDG_STATE_HOME` - Persistent state (logs, history). + // /// + // /// Default: `$HOME/.local/state` + // State, +} + +impl XdgDir { + /// Returns the environment variable name for this XDG directory. + pub fn env_var(&self) -> &'static str { + match self { + Self::Config => "XDG_CONFIG_HOME", + Self::Cache => "XDG_CACHE_HOME", + Self::Runtime => "XDG_RUNTIME_DIR", + } + } + + /// Returns the fallback subdirectory relative to `$HOME`. + /// + /// Note: `Runtime` returns `None` as XDG spec defines no $HOME fallback for it. + pub fn home_subdir(&self) -> Option<&'static str> { + match self { + Self::Config => Some(".config"), + Self::Cache => Some(".cache"), + Self::Runtime => None, + } + } +} + +/// Resolves a path using XDG Base Directory defaults. +/// +/// # Resolution Order +/// +/// For `Config` and `Cache`: +/// 1. If the XDG environment variable is set, uses that as the base +/// 2. Falls back to `$HOME/{xdg_subdir}/{subpath}` +/// 3. Falls back to `/tmp/zaino/{xdg_subdir}/{subpath}` if HOME is unset +/// +/// For `Runtime`: +/// 1. If `XDG_RUNTIME_DIR` is set, uses that as the base +/// 2. Falls back to `/tmp/{subpath}` (no $HOME fallback per XDG spec) +fn resolve_path_with_xdg_defaults(dir: XdgDir, subpath: &str) -> PathBuf { + // Try XDG environment variable first + if let Ok(xdg_base) = std::env::var(dir.env_var()) { + return PathBuf::from(xdg_base).join(subpath); + } + + // Runtime has no $HOME fallback per XDG spec + if dir == XdgDir::Runtime { + return PathBuf::from("/tmp").join(subpath); + } + + // Fall back to $HOME/{subdir} for Config and Cache + if let Ok(home) = std::env::var("HOME") { + if let Some(subdir) = dir.home_subdir() { + return PathBuf::from(home).join(subdir).join(subpath); + } + } + + // Final fallback to /tmp/zaino/{subdir} + PathBuf::from("/tmp") + .join("zaino") + .join(dir.home_subdir().unwrap_or("")) + .join(subpath) +} + +/// Resolves a path using `XDG_CONFIG_HOME` defaults. +/// +/// Convenience wrapper for [`resolve_path_with_xdg_defaults`] with [`XdgDir::Config`]. +/// +/// # Example +/// +/// ``` +/// use zaino_common::xdg::resolve_path_with_xdg_config_defaults; +/// +/// let path = resolve_path_with_xdg_config_defaults("zaino/zainod.toml"); +/// // Returns: $XDG_CONFIG_HOME/zaino/zainod.toml +/// // or: $HOME/.config/zaino/zainod.toml +/// // or: /tmp/zaino/.config/zaino/zainod.toml +/// ``` +pub fn resolve_path_with_xdg_config_defaults(subpath: &str) -> PathBuf { + resolve_path_with_xdg_defaults(XdgDir::Config, subpath) +} + +/// Resolves a path using `XDG_CACHE_HOME` defaults. +/// +/// Convenience wrapper for [`resolve_path_with_xdg_defaults`] with [`XdgDir::Cache`]. +/// +/// # Example +/// +/// ``` +/// use zaino_common::xdg::resolve_path_with_xdg_cache_defaults; +/// +/// let path = resolve_path_with_xdg_cache_defaults("zaino"); +/// // Returns: $XDG_CACHE_HOME/zaino +/// // or: $HOME/.cache/zaino +/// // or: /tmp/zaino/.cache/zaino +/// ``` +pub fn resolve_path_with_xdg_cache_defaults(subpath: &str) -> PathBuf { + resolve_path_with_xdg_defaults(XdgDir::Cache, subpath) +} + +/// Resolves a path using `XDG_RUNTIME_DIR` defaults. +/// +/// Convenience wrapper for [`resolve_path_with_xdg_defaults`] with [`XdgDir::Runtime`]. +/// +/// Note: Per XDG spec, `XDG_RUNTIME_DIR` has no `$HOME` fallback. If unset, +/// this falls back directly to `/tmp/{subpath}`. +/// +/// # Example +/// +/// ``` +/// use zaino_common::xdg::resolve_path_with_xdg_runtime_defaults; +/// +/// let path = resolve_path_with_xdg_runtime_defaults("zaino/.cookie"); +/// // Returns: $XDG_RUNTIME_DIR/zaino/.cookie +/// // or: /tmp/zaino/.cookie +/// ``` +pub fn resolve_path_with_xdg_runtime_defaults(subpath: &str) -> PathBuf { + resolve_path_with_xdg_defaults(XdgDir::Runtime, subpath) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_xdg_dir_env_vars() { + assert_eq!(XdgDir::Config.env_var(), "XDG_CONFIG_HOME"); + assert_eq!(XdgDir::Cache.env_var(), "XDG_CACHE_HOME"); + assert_eq!(XdgDir::Runtime.env_var(), "XDG_RUNTIME_DIR"); + } + + #[test] + fn test_xdg_dir_home_subdirs() { + assert_eq!(XdgDir::Config.home_subdir(), Some(".config")); + assert_eq!(XdgDir::Cache.home_subdir(), Some(".cache")); + assert_eq!(XdgDir::Runtime.home_subdir(), None); + } + + #[test] + fn test_resolved_paths_end_with_subpath() { + let config_path = resolve_path_with_xdg_config_defaults("zaino/zainod.toml"); + assert!(config_path.ends_with("zaino/zainod.toml")); + + let cache_path = resolve_path_with_xdg_cache_defaults("zaino"); + assert!(cache_path.ends_with("zaino")); + + let runtime_path = resolve_path_with_xdg_runtime_defaults("zaino/.cookie"); + assert!(runtime_path.ends_with("zaino/.cookie")); + } +} diff --git a/zaino-fetch/Cargo.toml b/zaino-fetch/Cargo.toml index 17e078057..d86190fde 100644 --- a/zaino-fetch/Cargo.toml +++ b/zaino-fetch/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "zaino-fetch" description = "A mempool-fetching, chain-fetching and transaction submission service that uses zebra's RPC interface." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } [dependencies] -zaino-proto = { path = "../zaino-proto" } - -# Librustzcash -zcash_protocol = { workspace = true } +zaino-common = { workspace = true } +zaino-proto = { workspace = true } # Zebra zebra-chain = { workspace = true } zebra-rpc = { workspace = true } +# Tracing +tracing = { workspace = true } + # Miscellaneous Workspace tokio = { workspace = true, features = ["full"] } -tonic = { workspace = true } +tonic = { workspace = true, features = ["tls-native-roots"] } http = { workspace = true } thiserror = { workspace = true } @@ -26,11 +29,15 @@ thiserror = { workspace = true } prost = { workspace = true } reqwest = { workspace = true } url = { workspace = true } -serde_json = { workspace = true, features = ["preserve_order"] } # The preserve_order feature in serde_jsonn is a dependency of jsonrpc-core +serde_json = { workspace = true, features = ["preserve_order"] } serde = { workspace = true, features = ["derive"] } hex = { workspace = true, features = ["serde"] } indexmap = { workspace = true, features = ["serde"] } base64 = { workspace = true } byteorder = { workspace = true } sha2 = { workspace = true } +jsonrpsee-types = { workspace = true } +derive_more = { workspace = true, features = ["from"] } +[dev-dependencies] +zaino-testvectors = { workspace = true } diff --git a/zaino-fetch/src/chain.rs b/zaino-fetch/src/chain.rs index 3023830cb..04422890c 100644 --- a/zaino-fetch/src/chain.rs +++ b/zaino-fetch/src/chain.rs @@ -2,6 +2,5 @@ pub mod block; pub mod error; -pub mod mempool; pub mod transaction; pub mod utils; diff --git a/zaino-fetch/src/chain/block.rs b/zaino-fetch/src/chain/block.rs index 06c66feeb..2c8bd1e19 100644 --- a/zaino-fetch/src/chain/block.rs +++ b/zaino-fetch/src/chain/block.rs @@ -1,20 +1,15 @@ //! Block fetching and deserialization functionality. -use crate::{ - chain::{ - error::{BlockCacheError, ParseError}, - transaction::FullTransaction, - utils::{ - display_txids_to_server, read_bytes, read_i32, read_u32, read_zcash_script_i64, - CompactSize, ParseFromSlice, - }, - }, - jsonrpc::{connector::JsonRpcConnector, response::GetBlockResponse}, +use crate::chain::{ + error::ParseError, + transaction::FullTransaction, + utils::{read_bytes, read_i32, read_u32, read_zcash_script_i64, CompactSize, ParseFromSlice}, }; use sha2::{Digest, Sha256}; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ - ChainMetadata, CompactBlock, CompactOrchardAction, CompactTx, +use zaino_proto::proto::{ + compact_formats::{ChainMetadata, CompactBlock}, + utils::PoolTypeFilter, }; /// A block header, containing metadata about a block. @@ -34,7 +29,7 @@ struct BlockHeaderData { /// a free field. The only constraint is that it must be at least `4` when /// interpreted as an `i32`. /// - /// Size [bytes]: 4 + /// Size \[bytes\]: 4 version: i32, /// The hash of the previous block, used to create a chain of blocks back to @@ -43,7 +38,7 @@ struct BlockHeaderData { /// This ensures no previous block can be changed without also changing this /// block's header. /// - /// Size [bytes]: 32 + /// Size \[bytes\]: 32 hash_prev_block: Vec, /// The root of the Bitcoin-inherited transaction Merkle tree, binding the @@ -55,21 +50,21 @@ struct BlockHeaderData { /// transactions with the same Merkle root, although only one set will be /// valid. /// - /// Size [bytes]: 32 + /// Size \[bytes\]: 32 hash_merkle_root: Vec, - /// [Pre-Sapling] A reserved field which should be ignored. - /// [Sapling onward] The root LEBS2OSP_256(rt) of the Sapling note + /// \[Pre-Sapling\] A reserved field which should be ignored. + /// \[Sapling onward\] The root LEBS2OSP_256(rt) of the Sapling note /// commitment tree corresponding to the final Sapling treestate of this /// block. /// - /// Size [bytes]: 32 + /// Size \[bytes\]: 32 hash_final_sapling_root: Vec, /// The block timestamp is a Unix epoch time (UTC) when the miner /// started hashing the header (according to the miner). /// - /// Size [bytes]: 4 + /// Size \[bytes\]: 4 time: u32, /// An encoded version of the target threshold this block's header @@ -81,19 +76,19 @@ struct BlockHeaderData { /// /// [Bitcoin-nBits](https://bitcoin.org/en/developer-reference#target-nbits) /// - /// Size [bytes]: 4 + /// Size \[bytes\]: 4 n_bits_bytes: Vec, /// An arbitrary field that miners can change to modify the header /// hash in order to produce a hash less than or equal to the /// target threshold. /// - /// Size [bytes]: 32 + /// Size \[bytes\]: 32 nonce: Vec, /// The Equihash solution. /// - /// Size [bytes]: CompactLength + /// Size \[bytes\]: CompactLength solution: Vec, } @@ -224,6 +219,11 @@ impl FullBlockHeader { self.raw_block_header.hash_merkle_root.clone() } + /// Returns the final sapling root of the block. + pub fn final_sapling_root(&self) -> Vec { + self.raw_block_header.hash_final_sapling_root.clone() + } + /// Returns the time when the miner started hashing the header (according to the miner). pub fn time(&self) -> u32 { self.raw_block_header.time @@ -255,7 +255,7 @@ impl FullBlockHeader { pub struct FullBlock { /// The block header, containing block metadata. /// - /// Size [bytes]: 140+CompactLength + /// Size \[bytes\]: 140+CompactLength hdr: FullBlockHeader, /// The block transactions. @@ -329,7 +329,7 @@ impl ParseFromSlice for FullBlock { /// Genesis block special case. /// /// From LightWalletD: -/// see https://github.com/zcash/lightwalletd/issues/17#issuecomment-467110828. +/// see . const GENESIS_TARGET_DIFFICULTY: u32 = 520617983; impl FullBlock { @@ -348,6 +348,16 @@ impl FullBlock { self.height } + /// Returns the Orchard `authDataRoot` of the block, taken from the coinbase transaction's anchorOrchard field. + /// + /// If the coinbase transaction is v5 and includes an Orchard bundle, this is the root of the Orchard commitment tree + /// after applying all Orchard actions in the block. + /// + /// Returns `Some(Vec)` if present, else `None`. + pub fn auth_data_root(&self) -> Option> { + self.vtx.first().and_then(|tx| tx.anchor_orchard()) + } + /// Decodes a hex encoded zcash full block into a FullBlock struct. pub fn parse_from_hex(data: &[u8], txid: Option>>) -> Result { let (remaining_data, full_block) = Self::parse_from_slice(data, txid, None)?; @@ -355,27 +365,41 @@ impl FullBlock { return Err(ParseError::InvalidData(format!( "Error decoding full block - {} bytes of Remaining data. Compact Block Created: ({:?})", remaining_data.len(), - full_block.into_compact(0, 0) + full_block.into_compact_block(0, 0, PoolTypeFilter::includes_all()) ))); } Ok(full_block) } - /// Converts a zcash full block into a compact block. - pub fn into_compact( + /// Turns this Block into a Compact Block according to the Lightclient protocol [ZIP-307](https://zips.z.cash/zip-0307) + /// callers can choose which pools to include in this compact block by specifying a + /// `PoolTypeFilter` accordingly. + pub fn into_compact_block( self, sapling_commitment_tree_size: u32, orchard_commitment_tree_size: u32, + pool_types: PoolTypeFilter, ) -> Result { let vtx = self .vtx .into_iter() .enumerate() .filter_map(|(index, tx)| { - if tx.has_shielded_elements() { - Some(tx.to_compact(index as u64)) - } else { - None + match tx.to_compact_tx(Some(index as u64), &pool_types) { + Ok(compact_tx) => { + // Omit transactions that have no elements in any requested pool type. + if !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + { + Some(Ok(compact_tx)) + } else { + None + } + } + Err(parse_error) => Some(Err(parse_error)), } }) .collect::, _>>()?; @@ -385,7 +409,7 @@ impl FullBlock { let header = Vec::new(); let compact_block = CompactBlock { - proto_version: 0, + proto_version: 4, height: self.height as u64, hash: self.hdr.cached_hash.clone(), prev_hash: self.hdr.raw_block_header.hash_prev_block.clone(), @@ -401,10 +425,25 @@ impl FullBlock { Ok(compact_block) } + #[deprecated] + /// Converts a zcash full block into a **legacy** compact block. + pub fn into_compact( + self, + sapling_commitment_tree_size: u32, + orchard_commitment_tree_size: u32, + ) -> Result { + self.into_compact_block( + sapling_commitment_tree_size, + orchard_commitment_tree_size, + PoolTypeFilter::default(), + ) + } + /// Extracts the block height from the coinbase transaction. fn get_block_height(transactions: &[FullTransaction]) -> Result { let transparent_inputs = transactions[0].transparent_inputs(); - let coinbase_script = transparent_inputs[0].as_slice(); + let (_, _, script_sig) = transparent_inputs[0].clone(); + let coinbase_script = script_sig.as_slice(); let mut cursor = Cursor::new(coinbase_script); @@ -422,107 +461,3 @@ impl FullBlock { Ok(height_num as i32) } } - -/// Returns a compact block. -/// -/// Retrieves a full block from zebrad/zcashd using 2 get_block calls. -/// This is because a get_block verbose = 1 call is require to fetch txids. -/// TODO: Save retrieved CompactBlock to the BlockCache. -/// TODO: Return more representative error type. -pub async fn get_block_from_node( - zebra_uri: &http::Uri, - height: &u32, -) -> Result { - let zebrad_client = JsonRpcConnector::new( - zebra_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let block_1 = zebrad_client.get_block(height.to_string(), Some(1)).await; - match block_1 { - Ok(GetBlockResponse::Object { - hash, - confirmations: _, - height: _, - time: _, - tx, - trees, - }) => { - let block_0 = zebrad_client.get_block(hash.0.to_string(), Some(0)).await; - match block_0 { - Ok(GetBlockResponse::Object { - hash: _, - confirmations: _, - height: _, - time: _, - tx: _, - trees: _, - }) => Err(BlockCacheError::ParseError(ParseError::InvalidData( - "Received object block type, this should not be possible here.".to_string(), - ))), - Ok(GetBlockResponse::Raw(block_hex)) => Ok(FullBlock::parse_from_hex( - block_hex.as_ref(), - Some(display_txids_to_server(tx)?), - )? - .into_compact( - u32::try_from(trees.sapling()).map_err(ParseError::from)?, - u32::try_from(trees.orchard()).map_err(ParseError::from)?, - )?), - Err(e) => Err(e.into()), - } - } - Ok(GetBlockResponse::Raw(_)) => Err(BlockCacheError::ParseError(ParseError::InvalidData( - "Received raw block type, this should not be possible here.".to_string(), - ))), - Err(e) => Err(e.into()), - } -} - -/// Returns a compact block holding only action nullifiers. -/// -/// Retrieves a full block from zebrad/zcashd using 2 get_block calls. -/// This is because a get_block verbose = 1 call is require to fetch txids. -/// -/// TODO / NOTE: This should be rewritten when the BlockCache is added. -pub async fn get_nullifiers_from_node( - zebra_uri: &http::Uri, - height: &u32, -) -> Result { - match get_block_from_node(zebra_uri, height).await { - Ok(block) => Ok(CompactBlock { - proto_version: block.proto_version, - height: block.height, - hash: block.hash, - prev_hash: block.prev_hash, - time: block.time, - header: block.header, - vtx: block - .vtx - .into_iter() - .map(|tx| CompactTx { - index: tx.index, - hash: tx.hash, - fee: tx.fee, - spends: tx.spends, - outputs: Vec::new(), - actions: tx - .actions - .into_iter() - .map(|action| CompactOrchardAction { - nullifier: action.nullifier, - cmx: Vec::new(), - ephemeral_key: Vec::new(), - ciphertext: Vec::new(), - }) - .collect(), - }) - .collect(), - chain_metadata: Some(ChainMetadata { - sapling_commitment_tree_size: 0, - orchard_commitment_tree_size: 0, - }), - }), - Err(e) => Err(e), - } -} diff --git a/zaino-fetch/src/chain/error.rs b/zaino-fetch/src/chain/error.rs index 7790ef9b6..2d1d6ac19 100644 --- a/zaino-fetch/src/chain/error.rs +++ b/zaino-fetch/src/chain/error.rs @@ -1,7 +1,5 @@ //! Hold error types for the BlockCache and related functionality. -use crate::jsonrpc::error::JsonRpcConnectorError; - /// Parser Error Type. #[derive(Debug, thiserror::Error)] pub enum ParseError { @@ -32,23 +30,34 @@ pub enum ParseError { /// Integer conversion error. #[error("Integer conversion error: {0}")] TryFromIntError(#[from] std::num::TryFromIntError), -} - -/// Parser Error Type. -#[derive(Debug, thiserror::Error)] -pub enum BlockCacheError { - /// Serialization and deserialization error. - #[error("Parser Error: {0}")] - ParseError(#[from] ParseError), - /// Errors from the JsonRPC client. - #[error("JsonRPC Connector Error: {0}")] - JsonRpcError(#[from] JsonRpcConnectorError), -} -/// Mempool Error struct. -#[derive(thiserror::Error, Debug)] -pub enum MempoolError { - /// Errors from the JsonRPC client. - #[error("JsonRPC Connector Error: {0}")] - JsonRpcError(#[from] JsonRpcConnectorError), + /// Unecpected read order for sequential binary data + #[error("Sequential binary data read: field {field} expected on position {expected_order} of transaction, read on {actual_order}")] + InvalidParseOrder { + /// the noncomplient field + field: &'static str, + /// TODO: What does this mean + expected_order: u8, + /// TODO: What does this mean + actual_order: u8, + }, + + /// Unexpected field size during parsing + #[error("Field {field} expected size {expected} bytes, but advanced {actual} bytes")] + UnexpectedFieldSize { + /// the noncomplient field + field: &'static str, + /// size (in bytes) the field should have been + expected: usize, + /// size (in bytes) the field actually was + actual: usize, + }, + + /// Field not found in reader + #[error("Field not found: {0}")] + FieldNotFound(String), + + /// Field not parsed yet + #[error("Field not parsed: {0}")] + FieldNotParsed(&'static str), } diff --git a/zaino-fetch/src/chain/mempool.rs b/zaino-fetch/src/chain/mempool.rs deleted file mode 100644 index 481399c07..000000000 --- a/zaino-fetch/src/chain/mempool.rs +++ /dev/null @@ -1,158 +0,0 @@ -//! Zingo-Indexer mempool state functionality. - -use std::{collections::HashSet, time::SystemTime}; -use tokio::sync::{Mutex, RwLock}; - -use crate::{chain::error::MempoolError, jsonrpc::connector::JsonRpcConnector}; - -/// Mempool state information. -pub struct Mempool { - /// Txids currently in the mempool. - txids: RwLock>, - /// Txids that have already been added to Zingo-Indexer's mempool. - txids_seen: Mutex>, - /// System time when the mempool was last updated. - last_sync_time: Mutex, - /// Blockchain data, used to check when a new block has been mined. - best_block_hash: RwLock>, -} - -impl Default for Mempool { - fn default() -> Self { - Self::new() - } -} - -impl Mempool { - /// Returns an empty mempool. - pub fn new() -> Self { - Mempool { - txids: RwLock::new(Vec::new()), - txids_seen: Mutex::new(HashSet::new()), - last_sync_time: Mutex::new(SystemTime::now()), - best_block_hash: RwLock::new(None), - } - } - - /// Updates the mempool, returns true if the current block in the mempool has been mined. - pub async fn update(&self, zebrad_uri: &http::Uri) -> Result { - self.update_last_sync_time().await?; - let mined = self.check_and_update_best_block_hash(zebrad_uri).await?; - if mined { - self.reset_txids().await?; - self.update_txids(zebrad_uri).await?; - Ok(true) - } else { - self.update_txids(zebrad_uri).await?; - Ok(false) - } - } - - /// Updates the txids in the mempool. - async fn update_txids(&self, zebrad_uri: &http::Uri) -> Result<(), MempoolError> { - let node_txids = JsonRpcConnector::new( - zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_raw_mempool() - .await? - .transactions; - let mut txids_seen = self.txids_seen.lock().await; - let mut txids = self.txids.write().await; - for txid in node_txids { - if !txids_seen.contains(&txid) { - txids.push(txid.clone()); - } - txids_seen.insert(txid); - } - Ok(()) - } - - /// Updates the system last sync time. - async fn update_last_sync_time(&self) -> Result<(), MempoolError> { - let mut last_sync_time = self.last_sync_time.lock().await; - *last_sync_time = SystemTime::now(); - Ok(()) - } - - /// Updates the mempool blockchain info, returns true if the current block in the mempool has been mined. - async fn check_and_update_best_block_hash( - &self, - zebrad_uri: &http::Uri, - ) -> Result { - let node_best_block_hash = JsonRpcConnector::new( - zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await? - .best_block_hash; - - let mut last_best_block_hash = self.best_block_hash.write().await; - - if let Some(ref last_hash) = *last_best_block_hash { - if node_best_block_hash == *last_hash { - return Ok(false); - } - } - - *last_best_block_hash = Some(node_best_block_hash); - Ok(true) - } - - /// Clears the txids currently held in the mempool. - async fn reset_txids(&self) -> Result<(), MempoolError> { - let mut txids = self.txids.write().await; - txids.clear(); - let mut txids_seen = self.txids_seen.lock().await; - txids_seen.clear(); - Ok(()) - } - - /// Returns the txids currently in the mempool. - pub async fn get_mempool_txids(&self) -> Result, MempoolError> { - let txids = self.txids.read().await; - Ok(txids.clone()) - } - - /// Returns the txids currently in the mempool, filtered by exclude_txids. - /// - /// NOTE: THIS impl is inefficient and should be refactored with the addition of the internal mempool. - pub async fn get_filtered_mempool_txids( - &self, - exclude_txids: Vec, - ) -> Result, MempoolError> { - let mempool_txids = self.txids.read().await.clone(); - - let mut txids_to_exclude: HashSet = HashSet::new(); - for exclude_txid in &exclude_txids { - let matching_txids: Vec<&String> = mempool_txids - .iter() - .filter(|txid| txid.starts_with(exclude_txid)) - .collect(); - - if matching_txids.len() == 1 { - txids_to_exclude.insert(matching_txids[0].clone()); - } - } - - let filtered_txids: Vec = mempool_txids - .into_iter() - .filter(|txid| !txids_to_exclude.contains(txid)) - .collect(); - - Ok(filtered_txids) - } - - /// Returns the hash of the block currently in the mempool. - pub async fn get_best_block_hash( - &self, - ) -> Result, MempoolError> { - let best_block_hash = self.best_block_hash.read().await; - Ok(*best_block_hash) - } -} diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index 4df03477c..db9a9f2b7 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -2,28 +2,40 @@ use crate::chain::{ error::ParseError, - utils::{read_bytes, read_u32, read_u64, skip_bytes, CompactSize, ParseFromSlice}, + utils::{read_bytes, read_i64, read_u32, read_u64, skip_bytes, CompactSize, ParseFromSlice}, }; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, +use zaino_proto::proto::{ + compact_formats::{ + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, + TxOut as CompactTxOut, + }, + utils::PoolTypeFilter, }; -/// Txin format as described in https://en.bitcoin.it/wiki/Transaction +/// Txin format as described in #[derive(Debug, Clone)] -struct TxIn { - // PrevTxHash [IGNORED] - Size[bytes]: 32 - // PrevTxOutIndex [IGNORED] - Size[bytes]: 4 +pub struct TxIn { + // PrevTxHash - Size\[bytes\]: 32 + prev_txid: Vec, + // PrevTxOutIndex - Size\[bytes\]: 4 + prev_index: u32, /// CompactSize-prefixed, could be a pubkey or a script /// - /// Size[bytes]: CompactSize + /// Size\[bytes\]: CompactSize script_sig: Vec, - // SequenceNumber [IGNORED] - Size[bytes]: 4 + // SequenceNumber \[IGNORED\] - Size\[bytes\]: 4 } impl TxIn { - fn into_inner(self) -> Vec { - self.script_sig + fn into_inner(self) -> (Vec, u32, Vec) { + (self.prev_txid, self.prev_index, self.script_sig) + } + + /// Returns `true` if this `OutPoint` is "null" in the Bitcoin sense: it has txid set to + /// all-zeroes and output index set to `u32::MAX`. + fn is_null(&self) -> bool { + self.prev_txid.as_slice() == [0u8; 32] && self.prev_index == u32::MAX } } @@ -45,8 +57,8 @@ impl ParseFromSlice for TxIn { } let mut cursor = Cursor::new(data); - skip_bytes(&mut cursor, 32, "Error skipping TxIn::PrevTxHash")?; - skip_bytes(&mut cursor, 4, "Error skipping TxIn::PrevTxOutIndex")?; + let prev_txid = read_bytes(&mut cursor, 32, "Error reading TxIn::PrevTxHash")?; + let prev_index = read_u32(&mut cursor, "Error reading TxIn::PrevTxOutIndex")?; let script_sig = { let compact_length = CompactSize::read(&mut cursor)?; read_bytes( @@ -57,23 +69,31 @@ impl ParseFromSlice for TxIn { }; skip_bytes(&mut cursor, 4, "Error skipping TxIn::SequenceNumber")?; - Ok((&data[cursor.position() as usize..], TxIn { script_sig })) + Ok(( + &data[cursor.position() as usize..], + TxIn { + prev_txid, + prev_index, + script_sig, + }, + )) } } -/// Txout format as described in https://en.bitcoin.it/wiki/Transaction +/// Txout format as described in #[derive(Debug, Clone)] -struct TxOut { +pub struct TxOut { /// Non-negative int giving the number of zatoshis to be transferred /// - /// Size[bytes]: 8 + /// Size\[bytes\]: 8 value: u64, - // Script [IGNORED] - Size[bytes]: CompactSize + // Script - Size\[bytes\]: CompactSize + script_hash: Vec, } impl TxOut { - fn into_inner(self) -> u64 { - self.value + fn into_inner(self) -> (u64, Vec) { + (self.value, self.script_hash) } } @@ -96,14 +116,19 @@ impl ParseFromSlice for TxOut { let mut cursor = Cursor::new(data); let value = read_u64(&mut cursor, "Error TxOut::reading Value")?; - let compact_length = CompactSize::read(&mut cursor)?; - skip_bytes( - &mut cursor, - compact_length as usize, - "Error skipping TxOut::Script", - )?; + let script_hash = { + let compact_length = CompactSize::read(&mut cursor)?; + read_bytes( + &mut cursor, + compact_length as usize, + "Error reading TxOut::ScriptHash", + )? + }; - Ok((&data[cursor.position() as usize..], TxOut { value })) + Ok(( + &data[cursor.position() as usize..], + TxOut { script_hash, value }, + )) } } @@ -131,19 +156,19 @@ fn parse_transparent(data: &[u8]) -> Result<(&[u8], Vec, Vec), Pars Ok((&data[cursor.position() as usize..], tx_ins, tx_outs)) } -/// spend is a Sapling Spend Description as described in 7.3 of the Zcash +/// Spend is a Sapling Spend Description as described in 7.3 of the Zcash /// protocol specification. #[derive(Debug, Clone)] -struct Spend { - // Cv [IGNORED] - Size[bytes]: 32 - // Anchor [IGNORED] - Size[bytes]: 32 +pub struct Spend { + // Cv \[IGNORED\] - Size\[bytes\]: 32 + // Anchor \[IGNORED\] - Size\[bytes\]: 32 /// A nullifier to a sapling note. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 nullifier: Vec, - // Rk [IGNORED] - Size[bytes]: 32 - // Zkproof [IGNORED] - Size[bytes]: 192 - // SpendAuthSig [IGNORED] - Size[bytes]: 64 + // Rk \[IGNORED\] - Size\[bytes\]: 32 + // Zkproof \[IGNORED\] - Size\[bytes\]: 192 + // SpendAuthSig \[IGNORED\] - Size\[bytes\]: 64 } impl Spend { @@ -188,23 +213,23 @@ impl ParseFromSlice for Spend { /// output is a Sapling Output Description as described in section 7.4 of the /// Zcash protocol spec. #[derive(Debug, Clone)] -struct Output { - // Cv [IGNORED] - Size[bytes]: 32 +pub struct Output { + // Cv \[IGNORED\] - Size\[bytes\]: 32 /// U-coordinate of the note commitment, derived from the note's value, recipient, and a /// random value. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 cmu: Vec, /// Ephemeral public key for Diffie-Hellman key exchange. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 ephemeral_key: Vec, /// Encrypted transaction details including value transferred and an optional memo. /// - /// Size[bytes]: 580 + /// Size\[bytes\]: 580 enc_ciphertext: Vec, - // OutCiphertext [IGNORED] - Size[bytes]: 80 - // Zkproof [IGNORED] - Size[bytes]: 192 + // OutCiphertext \[IGNORED\] - Size\[bytes\]: 80 + // Zkproof \[IGNORED\] - Size\[bytes\]: 192 } impl Output { @@ -258,16 +283,16 @@ impl ParseFromSlice for Output { /// NOTE: Legacy, no longer used but included for consistency. #[derive(Debug, Clone)] struct JoinSplit { - //vpubOld [IGNORED] - Size[bytes]: 8 - //vpubNew [IGNORED] - Size[bytes]: 8 - //anchor [IGNORED] - Size[bytes]: 32 - //nullifiers [IGNORED] - Size[bytes]: 64/32 - //commitments [IGNORED] - Size[bytes]: 64/32 - //ephemeralKey [IGNORED] - Size[bytes]: 32 - //randomSeed [IGNORED] - Size[bytes]: 32 - //vmacs [IGNORED] - Size[bytes]: 64/32 - //proofGroth16 [IGNORED] - Size[bytes]: 192 - //encCiphertexts [IGNORED] - Size[bytes]: 1202 + //vpubOld \[IGNORED\] - Size\[bytes\]: 8 + //vpubNew \[IGNORED\] - Size\[bytes\]: 8 + //anchor \[IGNORED\] - Size\[bytes\]: 32 + //nullifiers \[IGNORED\] - Size\[bytes\]: 64/32 + //commitments \[IGNORED\] - Size\[bytes\]: 64/32 + //ephemeralKey \[IGNORED\] - Size\[bytes\]: 32 + //randomSeed \[IGNORED\] - Size\[bytes\]: 32 + //vmacs \[IGNORED\] - Size\[bytes\]: 64/32 + //proofGroth16 \[IGNORED\] - Size\[bytes\]: 192 + //encCiphertexts \[IGNORED\] - Size\[bytes\]: 1202 } impl ParseFromSlice for JoinSplit { @@ -281,11 +306,16 @@ impl ParseFromSlice for JoinSplit { "txid must be None for JoinSplit::parse_from_slice".to_string(), )); } - if tx_version.is_some() { - return Err(ParseError::InvalidData( - "tx_version must be None for JoinSplit::parse_from_slice".to_string(), - )); - } + let proof_size = match tx_version { + Some(2) | Some(3) => 296, // BCTV14 proof for v2/v3 transactions + Some(4) => 192, // Groth16 proof for v4 transactions + None => 192, // Default to Groth16 for unknown versions + _ => { + return Err(ParseError::InvalidData(format!( + "Unsupported tx_version {tx_version:?} for JoinSplit::parse_from_slice" + ))) + } + }; let mut cursor = Cursor::new(data); skip_bytes(&mut cursor, 8, "Error skipping JoinSplit::vpubOld")?; @@ -296,7 +326,11 @@ impl ParseFromSlice for JoinSplit { skip_bytes(&mut cursor, 32, "Error skipping JoinSplit::ephemeralKey")?; skip_bytes(&mut cursor, 32, "Error skipping JoinSplit::randomSeed")?; skip_bytes(&mut cursor, 64, "Error skipping JoinSplit::vmacs")?; - skip_bytes(&mut cursor, 192, "Error skipping JoinSplit::proofGroth16")?; + skip_bytes( + &mut cursor, + proof_size, + &format!("Error skipping JoinSplit::proof (size {proof_size})"), + )?; skip_bytes( &mut cursor, 1202, @@ -310,25 +344,25 @@ impl ParseFromSlice for JoinSplit { /// An Orchard action. #[derive(Debug, Clone)] struct Action { - // Cv [IGNORED] - Size[bytes]: 32 + // Cv \[IGNORED\] - Size\[bytes\]: 32 /// A nullifier to a orchard note. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 nullifier: Vec, - // Rk [IGNORED] - Size[bytes]: 32 + // Rk \[IGNORED\] - Size\[bytes\]: 32 /// X-coordinate of the commitment to the note. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 cmx: Vec, /// Ephemeral public key. /// - /// Size[bytes]: 32 + /// Size\[bytes\]: 32 ephemeral_key: Vec, /// Encrypted details of the new note, including its value and recipient's data. /// - /// Size[bytes]: 580 + /// Size\[bytes\]: 580 enc_ciphertext: Vec, - // OutCiphertext [IGNORED] - Size[bytes]: 80 + // OutCiphertext \[IGNORED\] - Size\[bytes\]: 80 } impl Action { @@ -380,59 +414,261 @@ impl ParseFromSlice for Action { } } -/// Full Zcash Transactrion data. +/// Full Zcash transaction data. #[derive(Debug, Clone)] struct TransactionData { /// Indicates if the transaction is an Overwinter-enabled transaction. /// - /// Size[bytes]: [in 4 byte header] + /// Size\[bytes\]: [in 4 byte header] f_overwintered: bool, /// The transaction format version. /// - /// Size[bytes]: [in 4 byte header] + /// Size\[bytes\]: [in 4 byte header] version: u32, /// Version group ID, used to specify transaction type and validate its components. /// - /// Size[bytes]: 4 - n_version_group_id: u32, + /// Size\[bytes\]: 4 + n_version_group_id: Option, /// Consensus branch ID, used to identify the network upgrade that the transaction is valid for. /// - /// Size[bytes]: 4 + /// Size\[bytes\]: 4 consensus_branch_id: u32, /// List of transparent inputs in a transaction. /// - /// Size[bytes]: Vec<40+CompactSize> + /// Size\[bytes\]: Vec<40+CompactSize> transparent_inputs: Vec, /// List of transparent outputs in a transaction. /// - /// Size[bytes]: Vec<8+CompactSize> + /// Size\[bytes\]: Vec<8+CompactSize> transparent_outputs: Vec, - // NLockTime [IGNORED] - Size[bytes]: 4 - // NExpiryHeight [IGNORED] - Size[bytes]: 4 - // ValueBalanceSapling [IGNORED] - Size[bytes]: 8 + // NLockTime \[IGNORED\] - Size\[bytes\]: 4 + // NExpiryHeight \[IGNORED\] - Size\[bytes\]: 4 + // ValueBalanceSapling - Size\[bytes\]: 8 + /// Value balance for the Sapling pool (v4/v5). None if not present. + value_balance_sapling: Option, /// List of shielded spends from the Sapling pool /// - /// Size[bytes]: Vec<384> + /// Size\[bytes\]: Vec<384> shielded_spends: Vec, /// List of shielded outputs from the Sapling pool /// - /// Size[bytes]: Vec<948> + /// Size\[bytes\]: Vec<948> shielded_outputs: Vec, /// List of JoinSplit descriptions in a transaction, no longer supported. /// - /// Size[bytes]: Vec<1602-1698> + /// Size\[bytes\]: Vec<1602-1698> #[allow(dead_code)] join_splits: Vec, - //joinSplitPubKey [IGNORED] - Size[bytes]: 32 - //joinSplitSig [IGNORED] - Size[bytes]: 64 - //bindingSigSapling [IGNORED] - Size[bytes]: 64 - ///List of Orchard actions. + /// joinSplitPubKey \[IGNORED\] - Size\[bytes\]: 32 + /// joinSplitSig \[IGNORED\] - Size\[bytes\]: 64 + /// bindingSigSapling \[IGNORED\] - Size\[bytes\]: 64 + /// List of Orchard actions. /// - /// Size[bytes]: Vec<820> + /// Size\[bytes\]: Vec<820> orchard_actions: Vec, + /// ValueBalanceOrchard - Size\[bytes\]: 8 + /// Value balance for the Orchard pool (v5 only). None if not present. + value_balance_orchard: Option, + /// AnchorOrchard - Size\[bytes\]: 32 + /// In non-coinbase transactions, this is the anchor (authDataRoot) of a prior block's Orchard note commitment tree. + /// In the coinbase transaction, this commits to the final Orchard tree state for the current block — i.e., it *is* the block's authDataRoot. + /// Present in v5 transactions only, if any Orchard actions exist in the block. + anchor_orchard: Option>, } impl TransactionData { + /// Parses a v1 transaction. + /// + /// A v1 transaction contains the following fields: + /// + /// - header: u32 + /// - tx_in_count: usize + /// - tx_in: tx_in + /// - tx_out_count: usize + /// - tx_out: tx_out + /// - lock_time: u32 + pub(crate) fn parse_v1(data: &[u8], version: u32) -> Result<(&[u8], Self), ParseError> { + let mut cursor = Cursor::new(data); + + let (remaining_data, transparent_inputs, transparent_outputs) = + parse_transparent(&data[cursor.position() as usize..])?; + cursor.set_position(data.len() as u64 - remaining_data.len() as u64); + + // let lock_time = read_u32(&mut cursor, "Error reading TransactionData::lock_time")?; + skip_bytes(&mut cursor, 4, "Error skipping TransactionData::nLockTime")?; + + Ok(( + &data[cursor.position() as usize..], + TransactionData { + f_overwintered: true, + version, + consensus_branch_id: 0, + transparent_inputs, + transparent_outputs, + // lock_time: Some(lock_time), + n_version_group_id: None, + value_balance_sapling: None, + shielded_spends: Vec::new(), + shielded_outputs: Vec::new(), + join_splits: Vec::new(), + orchard_actions: Vec::new(), + value_balance_orchard: None, + anchor_orchard: None, + }, + )) + } + + /// Parses a v2 transaction. + /// + /// A v2 transaction contains the following fields: + /// + /// - header: u32 + /// - tx_in_count: usize + /// - tx_in: tx_in + /// - tx_out_count: usize + /// - tx_out: tx_out + /// - lock_time: u32 + /// - nJoinSplit: compactSize <- New + /// - vJoinSplit: JSDescriptionBCTV14\[nJoinSplit\] <- New + /// - joinSplitPubKey: byte\[32\] <- New + /// - joinSplitSig: byte\[64\] <- New + pub(crate) fn parse_v2(data: &[u8], version: u32) -> Result<(&[u8], Self), ParseError> { + let mut cursor = Cursor::new(data); + + let (remaining_data, transparent_inputs, transparent_outputs) = + parse_transparent(&data[cursor.position() as usize..])?; + cursor.set_position(data.len() as u64 - remaining_data.len() as u64); + + skip_bytes(&mut cursor, 4, "Error skipping TransactionData::nLockTime")?; + + let join_split_count = CompactSize::read(&mut cursor)?; + let mut join_splits = Vec::with_capacity(join_split_count as usize); + for _ in 0..join_split_count { + let (remaining_data, join_split) = JoinSplit::parse_from_slice( + &data[cursor.position() as usize..], + None, + Some(version), + )?; + join_splits.push(join_split); + cursor.set_position(data.len() as u64 - remaining_data.len() as u64); + } + + if join_split_count > 0 { + skip_bytes( + &mut cursor, + 32, + "Error skipping TransactionData::joinSplitPubKey", + )?; + skip_bytes( + &mut cursor, + 64, + "could not skip TransactionData::joinSplitSig", + )?; + } + + Ok(( + &data[cursor.position() as usize..], + TransactionData { + f_overwintered: true, + version, + consensus_branch_id: 0, + transparent_inputs, + transparent_outputs, + join_splits, + n_version_group_id: None, + value_balance_sapling: None, + shielded_spends: Vec::new(), + shielded_outputs: Vec::new(), + orchard_actions: Vec::new(), + value_balance_orchard: None, + anchor_orchard: None, + }, + )) + } + + /// Parses a v3 transaction. + /// + /// A v3 transaction contains the following fields: + /// + /// - header: u32 + /// - nVersionGroupId: u32 = 0x03C48270 <- New + /// - tx_in_count: usize + /// - tx_in: tx_in + /// - tx_out_count: usize + /// - tx_out: tx_out + /// - lock_time: u32 + /// - nExpiryHeight: u32 <- New + /// - nJoinSplit: compactSize + /// - vJoinSplit: JSDescriptionBCTV14\[nJoinSplit\] + /// - joinSplitPubKey: byte\[32\] + /// - joinSplitSig: byte\[64\] + pub(crate) fn parse_v3( + data: &[u8], + version: u32, + n_version_group_id: u32, + ) -> Result<(&[u8], Self), ParseError> { + if n_version_group_id != 0x03C48270 { + return Err(ParseError::InvalidData( + "n_version_group_id must be 0x03C48270".to_string(), + )); + } + let mut cursor = Cursor::new(data); + + let (remaining_data, transparent_inputs, transparent_outputs) = + parse_transparent(&data[cursor.position() as usize..])?; + cursor.set_position(data.len() as u64 - remaining_data.len() as u64); + + skip_bytes(&mut cursor, 4, "Error skipping TransactionData::nLockTime")?; + skip_bytes( + &mut cursor, + 4, + "Error skipping TransactionData::nExpiryHeight", + )?; + + let join_split_count = CompactSize::read(&mut cursor)?; + let mut join_splits = Vec::with_capacity(join_split_count as usize); + for _ in 0..join_split_count { + let (remaining_data, join_split) = JoinSplit::parse_from_slice( + &data[cursor.position() as usize..], + None, + Some(version), + )?; + join_splits.push(join_split); + cursor.set_position(data.len() as u64 - remaining_data.len() as u64); + } + + if join_split_count > 0 { + skip_bytes( + &mut cursor, + 32, + "Error skipping TransactionData::joinSplitPubKey", + )?; + skip_bytes( + &mut cursor, + 64, + "could not skip TransactionData::joinSplitSig", + )?; + } + Ok(( + &data[cursor.position() as usize..], + TransactionData { + f_overwintered: true, + version, + consensus_branch_id: 0, + transparent_inputs, + transparent_outputs, + join_splits, + n_version_group_id: None, + value_balance_sapling: None, + shielded_spends: Vec::new(), + shielded_outputs: Vec::new(), + orchard_actions: Vec::new(), + value_balance_orchard: None, + anchor_orchard: None, + }, + )) + } + fn parse_v4( data: &[u8], version: u32, @@ -440,8 +676,7 @@ impl TransactionData { ) -> Result<(&[u8], Self), ParseError> { if n_version_group_id != 0x892F2085 { return Err(ParseError::InvalidData(format!( - "version group ID {:x} must be 0x892F2085 for v4 transactions", - n_version_group_id + "version group ID {n_version_group_id:x} must be 0x892F2085 for v4 transactions" ))); } let mut cursor = Cursor::new(data); @@ -456,11 +691,10 @@ impl TransactionData { 4, "Error skipping TransactionData::nExpiryHeight", )?; - skip_bytes( + let value_balance_sapling = Some(read_i64( &mut cursor, - 8, - "Error skipping TransactionData::valueBalance", - )?; + "Error reading TransactionData::valueBalanceSapling", + )?); let spend_count = CompactSize::read(&mut cursor)?; let mut shielded_spends = Vec::with_capacity(spend_count as usize); @@ -481,8 +715,11 @@ impl TransactionData { let join_split_count = CompactSize::read(&mut cursor)?; let mut join_splits = Vec::with_capacity(join_split_count as usize); for _ in 0..join_split_count { - let (remaining_data, join_split) = - JoinSplit::parse_from_slice(&data[cursor.position() as usize..], None, None)?; + let (remaining_data, join_split) = JoinSplit::parse_from_slice( + &data[cursor.position() as usize..], + None, + Some(version), + )?; join_splits.push(join_split); cursor.set_position(data.len() as u64 - remaining_data.len() as u64); } @@ -512,14 +749,17 @@ impl TransactionData { TransactionData { f_overwintered: true, version, - n_version_group_id, + n_version_group_id: Some(n_version_group_id), consensus_branch_id: 0, transparent_inputs, transparent_outputs, + value_balance_sapling, shielded_spends, shielded_outputs, join_splits, orchard_actions: Vec::new(), + value_balance_orchard: None, + anchor_orchard: None, }, )) } @@ -531,8 +771,7 @@ impl TransactionData { ) -> Result<(&[u8], Self), ParseError> { if n_version_group_id != 0x26A7270A { return Err(ParseError::InvalidData(format!( - "version group ID {:x} must be 0x892F2085 for v5 transactions", - n_version_group_id + "version group ID {n_version_group_id:x} must be 0x892F2085 for v5 transactions" ))); } let mut cursor = Cursor::new(data); @@ -556,8 +795,7 @@ impl TransactionData { let spend_count = CompactSize::read(&mut cursor)?; if spend_count >= (1 << 16) { return Err(ParseError::InvalidData(format!( - "spendCount ({}) must be less than 2^16", - spend_count + "spendCount ({spend_count}) must be less than 2^16" ))); } let mut shielded_spends = Vec::with_capacity(spend_count as usize); @@ -570,8 +808,7 @@ impl TransactionData { let output_count = CompactSize::read(&mut cursor)?; if output_count >= (1 << 16) { return Err(ParseError::InvalidData(format!( - "outputCount ({}) must be less than 2^16", - output_count + "outputCount ({output_count}) must be less than 2^16" ))); } let mut shielded_outputs = Vec::with_capacity(output_count as usize); @@ -582,13 +819,14 @@ impl TransactionData { cursor.set_position(data.len() as u64 - remaining_data.len() as u64); } - if spend_count + output_count > 0 { - skip_bytes( + let value_balance_sapling = if spend_count + output_count > 0 { + Some(read_i64( &mut cursor, - 8, - "Error skipping TransactionData::valueBalance", - )?; - } + "Error reading TransactionData::valueBalanceSapling", + )?) + } else { + None + }; if spend_count > 0 { skip_bytes( &mut cursor, @@ -624,8 +862,7 @@ impl TransactionData { let actions_count = CompactSize::read(&mut cursor)?; if actions_count >= (1 << 16) { return Err(ParseError::InvalidData(format!( - "actionsCount ({}) must be less than 2^16", - actions_count + "actionsCount ({actions_count}) must be less than 2^16" ))); } let mut orchard_actions = Vec::with_capacity(actions_count as usize); @@ -636,23 +873,23 @@ impl TransactionData { cursor.set_position(data.len() as u64 - remaining_data.len() as u64); } + let mut value_balance_orchard = None; + let mut anchor_orchard = None; if actions_count > 0 { skip_bytes( &mut cursor, 1, "Error skipping TransactionData::flagsOrchard", )?; - skip_bytes( + value_balance_orchard = Some(read_i64( &mut cursor, - 8, - "Error skipping TransactionData::valueBalanceOrchard", - )?; - skip_bytes( + "Error reading TransactionData::valueBalanceOrchard", + )?); + anchor_orchard = Some(read_bytes( &mut cursor, 32, - "Error skipping TransactionData::anchorOrchard", - )?; - + "Error reading TransactionData::anchorOrchard", + )?); let proofs_count = CompactSize::read(&mut cursor)?; skip_bytes( &mut cursor, @@ -676,14 +913,17 @@ impl TransactionData { TransactionData { f_overwintered: true, version, - n_version_group_id, + n_version_group_id: Some(n_version_group_id), consensus_branch_id, transparent_inputs, transparent_outputs, + value_balance_sapling, shielded_spends, shielded_outputs, join_splits: Vec::new(), orchard_actions, + value_balance_orchard, + anchor_orchard, }, )) } @@ -713,6 +953,7 @@ impl ParseFromSlice for FullTransaction { "txid must be used for FullTransaction::parse_from_slice".to_string(), ) })?; + // TODO: 🤯 if tx_version.is_some() { return Err(ParseError::InvalidData( "tx_version must be None for FullTransaction::parse_from_slice".to_string(), @@ -722,35 +963,63 @@ impl ParseFromSlice for FullTransaction { let header = read_u32(&mut cursor, "Error reading FullTransaction::header")?; let f_overwintered = (header >> 31) == 1; - if !f_overwintered { - return Err(ParseError::InvalidData( - "fOverwinter flag must be set".to_string(), - )); - } + let version = header & 0x7FFFFFFF; - if version < 4 { - return Err(ParseError::InvalidData(format!( - "version number {} must be greater or equal to 4", - version - ))); + + match version { + 1 | 2 => { + if f_overwintered { + return Err(ParseError::InvalidData( + "fOverwintered must be unset for tx versions 1 and 2".to_string(), + )); + } + } + 3..=5 => { + if !f_overwintered { + return Err(ParseError::InvalidData( + "fOverwintered must be set for tx versions 3 and above".to_string(), + )); + } + } + _ => { + return Err(ParseError::InvalidData(format!( + "Unsupported tx version {version}" + ))) + } } - let n_version_group_id = read_u32( - &mut cursor, - "Error reading FullTransaction::n_version_group_id", - )?; - let (remaining_data, transaction_data) = if version <= 4 { - TransactionData::parse_v4( + let n_version_group_id: Option = match version { + 3..=5 => Some(read_u32( + &mut cursor, + "Error reading FullTransaction::n_version_group_id", + )?), + _ => None, + }; + + let (remaining_data, transaction_data) = match version { + 1 => TransactionData::parse_v1(&data[cursor.position() as usize..], version)?, + 2 => TransactionData::parse_v2(&data[cursor.position() as usize..], version)?, + 3 => TransactionData::parse_v3( &data[cursor.position() as usize..], version, - n_version_group_id, - )? - } else { - TransactionData::parse_v5( + n_version_group_id.unwrap(), // This won't fail, because of the above match + )?, + 4 => TransactionData::parse_v4( &data[cursor.position() as usize..], version, - n_version_group_id, - )? + n_version_group_id.unwrap(), // This won't fail, because of the above match + )?, + 5 => TransactionData::parse_v5( + &data[cursor.position() as usize..], + version, + n_version_group_id.unwrap(), // This won't fail, because of the above match + )?, + + _ => { + return Err(ParseError::InvalidData(format!( + "Unsupported tx version {version}" + ))) + } }; let full_transaction = FullTransaction { @@ -775,7 +1044,7 @@ impl FullTransaction { } /// Returns the transaction version group id. - pub fn n_version_group_id(&self) -> u32 { + pub fn n_version_group_id(&self) -> Option { self.raw_transaction.n_version_group_id } @@ -784,8 +1053,8 @@ impl FullTransaction { self.raw_transaction.consensus_branch_id } - /// Returns a vec of transparent input script_sigs for the transaction. - pub fn transparent_inputs(&self) -> Vec> { + /// Returns a vec of transparent inputs: (prev_txid, prev_index, script_sig). + pub fn transparent_inputs(&self) -> Vec<(Vec, u32, Vec)> { self.raw_transaction .transparent_inputs .iter() @@ -793,15 +1062,25 @@ impl FullTransaction { .collect() } - /// Returns a vec of transparent output values for the transaction. - pub fn transparent_outputs(&self) -> Vec { + /// Returns a vec of transparent outputs: (value, script_hash). + pub fn transparent_outputs(&self) -> Vec<(u64, Vec)> { self.raw_transaction .transparent_outputs .iter() - .map(|input| input.clone().into_inner()) + .map(|output| output.clone().into_inner()) .collect() } + /// Returns sapling and orchard value balances for the transaction. + /// + /// Returned as (Option\, Option\). + pub fn value_balances(&self) -> (Option, Option) { + ( + self.raw_transaction.value_balance_sapling, + self.raw_transaction.value_balance_orchard, + ) + } + /// Returns a vec of sapling nullifiers for the transaction. pub fn shielded_spends(&self) -> Vec> { self.raw_transaction @@ -835,6 +1114,13 @@ impl FullTransaction { .collect() } + /// Returns the orchard anchor of the transaction. + /// + /// If this is the Coinbase transaction then this returns the AuthDataRoot of the block. + pub fn anchor_orchard(&self) -> Option> { + self.raw_transaction.anchor_orchard.clone() + } + /// Returns the transaction as raw bytes. pub fn raw_bytes(&self) -> Vec { self.raw_bytes.clone() @@ -846,58 +1132,330 @@ impl FullTransaction { } /// Converts a zcash full transaction into a compact transaction. + #[deprecated] pub fn to_compact(self, index: u64) -> Result { - let hash = self.tx_id; + self.to_compact_tx(Some(index), &PoolTypeFilter::default()) + } - // NOTE: LightWalletD currently does not return a fee and is not currently priority here. Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) if you require this functionality. + /// Converts a Zcash Transaction into a `CompactTx` of the Light wallet protocol. + /// if the transaction you want to convert is a mempool transaction you can specify `None`. + /// specify the `PoolType`s that the transaction should include in the `pool_types` argument + /// with a `PoolTypeFilter` indicating which pools the compact block should include. + pub fn to_compact_tx( + self, + index: Option, + pool_types: &PoolTypeFilter, + ) -> Result { + let hash = self.tx_id(); + + // NOTE: LightWalletD currently does not return a fee and is not currently priority here. + // Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) + // if you require this functionality. let fee = 0; - let spends = self - .raw_transaction - .shielded_spends - .iter() - .map(|spend| CompactSaplingSpend { - nf: spend.nullifier.clone(), - }) - .collect(); + let spends = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_spends + .iter() + .map(|spend| CompactSaplingSpend { + nf: spend.nullifier.clone(), + }) + .collect() + } else { + vec![] + }; - let outputs = self - .raw_transaction - .shielded_outputs - .iter() - .map(|output| CompactSaplingOutput { - cmu: output.cmu.clone(), - ephemeral_key: output.ephemeral_key.clone(), - ciphertext: output.enc_ciphertext[..52].to_vec(), - }) - .collect(); - - let actions = self - .raw_transaction - .orchard_actions - .iter() - .map(|action| CompactOrchardAction { - nullifier: action.nullifier.clone(), - cmx: action.cmx.clone(), - ephemeral_key: action.ephemeral_key.clone(), - ciphertext: action.enc_ciphertext[..52].to_vec(), - }) - .collect(); + let outputs = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_outputs + .iter() + .map(|output| CompactSaplingOutput { + cmu: output.cmu.clone(), + ephemeral_key: output.ephemeral_key.clone(), + ciphertext: output.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let actions = if pool_types.includes_orchard() { + self.raw_transaction + .orchard_actions + .iter() + .map(|action| CompactOrchardAction { + nullifier: action.nullifier.clone(), + cmx: action.cmx.clone(), + ephemeral_key: action.ephemeral_key.clone(), + ciphertext: action.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let vout = if pool_types.includes_transparent() { + self.raw_transaction + .transparent_outputs + .iter() + .map(|t_out| CompactTxOut { + value: t_out.value, + script_pub_key: t_out.script_hash.clone(), + }) + .collect() + } else { + vec![] + }; + + let vin = if pool_types.includes_transparent() { + self.raw_transaction + .transparent_inputs + .iter() + .filter_map(|t_in| { + if t_in.is_null() { + None + } else { + Some(CompactTxIn { + prevout_txid: t_in.prev_txid.clone(), + prevout_index: t_in.prev_index, + }) + } + }) + .collect() + } else { + vec![] + }; Ok(CompactTx { - index, - hash, + index: index.unwrap_or(0), // this assumes that mempool txs have a zeroed index + txid: hash, fee, spends, outputs, actions, + vin, + vout, }) } - /// Returns true if the transaction contains either sapling spends or outputs. + /// Returns true if the transaction contains either sapling spends or outputs, or orchard actions. + #[allow(dead_code)] pub(crate) fn has_shielded_elements(&self) -> bool { !self.raw_transaction.shielded_spends.is_empty() || !self.raw_transaction.shielded_outputs.is_empty() || !self.raw_transaction.orchard_actions.is_empty() } } + +#[cfg(test)] +mod tests { + use super::*; + use zaino_testvectors::transactions::get_test_vectors; + + /// Test parsing v1 transactions using test vectors. + /// Validates that FullTransaction::parse_from_slice correctly handles v1 transaction format. + #[test] + fn test_v1_transaction_parsing_with_test_vectors() { + let test_vectors = get_test_vectors(); + let v1_vectors: Vec<_> = test_vectors.iter().filter(|tv| tv.version == 1).collect(); + + assert!(!v1_vectors.is_empty(), "No v1 test vectors found"); + + for (i, vector) in v1_vectors.iter().enumerate() { + let result = FullTransaction::parse_from_slice( + &vector.tx, + Some(vec![vector.txid.to_vec()]), + None, + ); + + assert!( + result.is_ok(), + "Failed to parse v1 test vector #{}: {:?}. Description: {}", + i, + result.err(), + vector.description + ); + + let (remaining, parsed_tx) = result.unwrap(); + assert!( + remaining.is_empty(), + "Should consume all data for v1 transaction #{i}" + ); + + // Verify version matches + assert_eq!( + parsed_tx.raw_transaction.version, 1, + "Version mismatch for v1 transaction #{i}" + ); + + // Verify transaction properties match test vector expectations + assert_eq!( + parsed_tx.raw_transaction.transparent_inputs.len(), + vector.transparent_inputs, + "Transparent inputs mismatch for v1 transaction #{i}" + ); + + assert_eq!( + parsed_tx.raw_transaction.transparent_outputs.len(), + vector.transparent_outputs, + "Transparent outputs mismatch for v1 transaction #{i}" + ); + } + } + + /// Test parsing v2 transactions using test vectors. + /// Validates that FullTransaction::parse_from_slice correctly handles v2 transaction format. + #[test] + fn test_v2_transaction_parsing_with_test_vectors() { + let test_vectors = get_test_vectors(); + let v2_vectors: Vec<_> = test_vectors.iter().filter(|tv| tv.version == 2).collect(); + + assert!(!v2_vectors.is_empty(), "No v2 test vectors found"); + + for (i, vector) in v2_vectors.iter().enumerate() { + let result = FullTransaction::parse_from_slice( + &vector.tx, + Some(vec![vector.txid.to_vec()]), + None, + ); + + assert!( + result.is_ok(), + "Failed to parse v2 test vector #{}: {:?}. Description: {}", + i, + result.err(), + vector.description + ); + + let (remaining, parsed_tx) = result.unwrap(); + assert!( + remaining.is_empty(), + "Should consume all data for v2 transaction #{}: {} bytes remaining, total length: {}", + i, remaining.len(), vector.tx.len() + ); + + // Verify version matches + assert_eq!( + parsed_tx.raw_transaction.version, 2, + "Version mismatch for v2 transaction #{i}" + ); + + // Verify transaction properties match test vector expectations + assert_eq!( + parsed_tx.raw_transaction.transparent_inputs.len(), + vector.transparent_inputs, + "Transparent inputs mismatch for v2 transaction #{i}" + ); + + assert_eq!( + parsed_tx.raw_transaction.transparent_outputs.len(), + vector.transparent_outputs, + "Transparent outputs mismatch for v2 transaction #{i}" + ); + } + } + + /// Test parsing v3 transactions using test vectors. + /// Validates that FullTransaction::parse_from_slice correctly handles v3 transaction format. + #[test] + fn test_v3_transaction_parsing_with_test_vectors() { + let test_vectors = get_test_vectors(); + let v3_vectors: Vec<_> = test_vectors.iter().filter(|tv| tv.version == 3).collect(); + + assert!(!v3_vectors.is_empty(), "No v3 test vectors found"); + + for (i, vector) in v3_vectors.iter().enumerate() { + let result = FullTransaction::parse_from_slice( + &vector.tx, + Some(vec![vector.txid.to_vec()]), + None, + ); + + assert!( + result.is_ok(), + "Failed to parse v3 test vector #{}: {:?}. Description: {}", + i, + result.err(), + vector.description + ); + + let (remaining, parsed_tx) = result.unwrap(); + assert!( + remaining.is_empty(), + "Should consume all data for v3 transaction #{}: {} bytes remaining, total length: {}", + i, remaining.len(), vector.tx.len() + ); + + // Verify version matches + assert_eq!( + parsed_tx.raw_transaction.version, 3, + "Version mismatch for v3 transaction #{i}" + ); + + // Verify transaction properties match test vector expectations + assert_eq!( + parsed_tx.raw_transaction.transparent_inputs.len(), + vector.transparent_inputs, + "Transparent inputs mismatch for v3 transaction #{i}" + ); + + assert_eq!( + parsed_tx.raw_transaction.transparent_outputs.len(), + vector.transparent_outputs, + "Transparent outputs mismatch for v3 transaction #{i}" + ); + } + } + + /// Test parsing v4 transactions using test vectors. + /// Validates that FullTransaction::parse_from_slice correctly handles v4 transaction format. + /// This also serves as a regression test for current v4 functionality. + #[test] + fn test_v4_transaction_parsing_with_test_vectors() { + let test_vectors = get_test_vectors(); + let v4_vectors: Vec<_> = test_vectors.iter().filter(|tv| tv.version == 4).collect(); + + assert!(!v4_vectors.is_empty(), "No v4 test vectors found"); + + for (i, vector) in v4_vectors.iter().enumerate() { + let result = FullTransaction::parse_from_slice( + &vector.tx, + Some(vec![vector.txid.to_vec()]), + None, + ); + + assert!( + result.is_ok(), + "Failed to parse v4 test vector #{}: {:?}. Description: {}", + i, + result.err(), + vector.description + ); + + let (remaining, parsed_tx) = result.unwrap(); + assert!( + remaining.is_empty(), + "Should consume all data for v4 transaction #{i}" + ); + + // Verify version matches + assert_eq!( + parsed_tx.raw_transaction.version, 4, + "Version mismatch for v4 transaction #{i}" + ); + + // Verify transaction properties match test vector expectations + assert_eq!( + parsed_tx.raw_transaction.transparent_inputs.len(), + vector.transparent_inputs, + "Transparent inputs mismatch for v4 transaction #{i}" + ); + + assert_eq!( + parsed_tx.raw_transaction.transparent_outputs.len(), + vector.transparent_outputs, + "Transparent outputs mismatch for v4 transaction #{i}" + ); + } + } +} diff --git a/zaino-fetch/src/chain/utils.rs b/zaino-fetch/src/chain/utils.rs index 8ba450dd5..4570a3a7c 100644 --- a/zaino-fetch/src/chain/utils.rs +++ b/zaino-fetch/src/chain/utils.rs @@ -9,11 +9,12 @@ use crate::chain::error::ParseError; pub trait ParseFromSlice { /// Reads data from a bytestring, consuming data read, and returns an instance of self along with the remaining data in the bytestring given. /// - /// txid is givin as an input as this is taken from a get_block verbose=1 call. + /// txid is giving as an input as this is taken from a get_block verbose=1 call. /// /// tx_version is used for deserializing sapling spends and outputs. fn parse_from_slice( data: &[u8], + // TODO: Why is txid a vec of vecs? txid: Option>>, tx_version: Option, ) -> Result<(&[u8], Self), ParseError> @@ -34,7 +35,7 @@ pub(crate) fn skip_bytes( Ok(()) } -/// Reads the next n bytes from cursor into a vec, returns error message given if eof is reached. +/// Reads the next n bytes from cursor into a `vec`, returns error message given if eof is reached. pub(crate) fn read_bytes( cursor: &mut Cursor<&[u8]>, n: usize, @@ -63,6 +64,14 @@ pub(crate) fn read_u32(cursor: &mut Cursor<&[u8]>, error_msg: &str) -> Result, error_msg: &str) -> Result { + cursor + .read_i64::() + .map_err(ParseError::from) + .map_err(|_| ParseError::InvalidData(error_msg.to_string())) +} + /// Reads the next 4 bytes from cursor into an i32, returns error message given if eof is reached. pub(crate) fn read_i32(cursor: &mut Cursor<&[u8]>, error_msg: &str) -> Result { cursor @@ -163,7 +172,7 @@ impl CompactSize { } } - /// Reads an integer encoded in contact form and performs checked conversion + /// Reads an integer encoded in compact form and performs checked conversion /// to the target type. #[allow(dead_code)] pub(crate) fn read_t>(mut reader: R) -> io::Result { @@ -195,20 +204,3 @@ impl CompactSize { } } } - -/// Takes a vec of big endian hex encoded txids and returns them as a vec of little endian raw bytes. -pub(crate) fn display_txids_to_server(txids: Vec) -> Result>, ParseError> { - txids - .iter() - .map(|txid| { - txid.as_bytes() - .chunks(2) - .map(|chunk| { - let hex_pair = std::str::from_utf8(chunk).map_err(ParseError::from)?; - u8::from_str_radix(hex_pair, 16).map_err(ParseError::from) - }) - .rev() - .collect::, _>>() - }) - .collect::>, _>>() -} diff --git a/zaino-fetch/src/jsonrpc/connector.rs b/zaino-fetch/src/jsonrpc/connector.rs deleted file mode 100644 index ee5b35d60..000000000 --- a/zaino-fetch/src/jsonrpc/connector.rs +++ /dev/null @@ -1,434 +0,0 @@ -//! JsonRPC client implementation. -//! -//! TODO: - Add option for http connector. -//! - Refactor JsonRpcConnectorError into concrete error types and implement fmt::display [https://github.com/zingolabs/zaino/issues/67]. - -use http::Uri; -use reqwest::{Client, Url}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use std::sync::atomic::{AtomicI32, Ordering}; - -use crate::jsonrpc::{ - error::JsonRpcConnectorError, - response::{ - GetBalanceResponse, GetBlockResponse, GetBlockchainInfoResponse, GetInfoResponse, - GetSubtreesResponse, GetTransactionResponse, GetTreestateResponse, GetUtxosResponse, - SendTransactionResponse, TxidsResponse, - }, -}; - -#[derive(Serialize, Deserialize, Debug)] -struct RpcRequest { - jsonrpc: String, - method: String, - params: T, - id: i32, -} - -#[derive(Serialize, Deserialize, Debug)] -struct RpcResponse { - id: i32, - jsonrpc: Option, - result: T, - error: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -struct RpcError { - code: i32, - message: String, - data: Option, -} - -/// JsonRPC Client config data. -#[derive(Debug)] -pub struct JsonRpcConnector { - url: Url, - id_counter: AtomicI32, - user: Option, - password: Option, -} - -impl JsonRpcConnector { - /// Returns a new JsonRpcConnector instance, tests uri and returns error if connection is not established. - pub async fn new( - uri: Uri, - user: Option, - password: Option, - ) -> Result { - let url = reqwest::Url::parse(&uri.to_string())?; - Ok(Self { - url, - id_counter: AtomicI32::new(0), - user, - password, - }) - } - - /// Returns the http::uri the JsonRpcConnector is configured to send requests to. - pub fn uri(&self) -> Result { - Ok(self.url.as_str().parse()?) - } - - /// Returns the reqwest::url the JsonRpcConnector is configured to send requests to. - pub fn url(&self) -> Url { - self.url.clone() - } - - /// Sends a jsonRPC request and returns the response. - /// - /// TODO: This function currently resends the call up to 5 times on a server response of "Work queue depth exceeded". - /// This is because the node's queue can become overloaded and stop servicing RPCs. - /// This functionality is weak and should be incorporated in Zaino's queue mechanism [WIP] that handles various errors appropriately. - async fn send_request Deserialize<'de>>( - &self, - method: &str, - params: T, - ) -> Result { - let id = self.id_counter.fetch_add(1, Ordering::SeqCst); - let req = RpcRequest { - jsonrpc: "2.0".to_string(), - method: method.to_string(), - params, - id, - }; - let max_attempts = 5; - let mut attempts = 0; - loop { - attempts += 1; - let client = Client::builder() - .connect_timeout(std::time::Duration::from_secs(2)) - .timeout(std::time::Duration::from_secs(5)) - .redirect(reqwest::redirect::Policy::none()) - .build()?; - - let mut request_builder = client - .post(self.url.clone()) - .header("Content-Type", "application/json"); - if let (Some(user), Some(password)) = (&self.user, &self.password) { - request_builder = request_builder.basic_auth(user.clone(), Some(password.clone())); - } - let request_body = - serde_json::to_string(&req).map_err(JsonRpcConnectorError::SerdeJsonError)?; - let response = request_builder - .body(request_body) - .send() - .await - .map_err(JsonRpcConnectorError::ReqwestError)?; - - let status = response.status(); - let body_bytes = response - .bytes() - .await - .map_err(JsonRpcConnectorError::ReqwestError)?; - let body_str = String::from_utf8_lossy(&body_bytes); - - if body_str.contains("Work queue depth exceeded") { - if attempts >= max_attempts { - return Err(JsonRpcConnectorError::new( - "Error: The node's rpc queue depth was exceeded after multiple attempts", - )); - } - tokio::time::sleep(std::time::Duration::from_millis(500)).await; - continue; - } - if !status.is_success() { - return Err(JsonRpcConnectorError::new(format!( - "Error: Error status from node's rpc server: {}, {}", - status, body_str - ))); - } - - let response: RpcResponse = serde_json::from_slice(&body_bytes) - .map_err(JsonRpcConnectorError::SerdeJsonError)?; - return match response.error { - Some(error) => Err(JsonRpcConnectorError::new(format!( - "Error: Error from node's rpc server: {} - {}", - error.code, error.message - ))), - None => Ok(response.result), - }; - } - } - - /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. - /// - /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) - /// method: post - /// tags: control - pub async fn get_info(&self) -> Result { - self.send_request::<(), GetInfoResponse>("getinfo", ()) - .await - } - - /// Returns blockchain state information, as a [`GetBlockChainInfo`] JSON struct. - /// - /// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html) - /// method: post - /// tags: blockchain - pub async fn get_blockchain_info( - &self, - ) -> Result { - self.send_request::<(), GetBlockchainInfoResponse>("getblockchaininfo", ()) - .await - } - - /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. - /// - /// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html) - /// method: post - /// tags: address - /// - /// # Parameters - /// - /// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry - /// - `addresses`: (array of strings) A list of base-58 encoded addresses. - pub async fn get_address_balance( - &self, - addresses: Vec, - ) -> Result { - let params = vec![serde_json::json!({ "addresses": addresses })]; - self.send_request("getaddressbalance", params).await - } - - /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. - /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. - /// - /// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html) - /// method: post - /// tags: transaction - /// - /// # Parameters - /// - /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. - pub async fn send_raw_transaction( - &self, - raw_transaction_hex: String, - ) -> Result { - let params = vec![serde_json::to_value(raw_transaction_hex)?]; - self.send_request("sendrawtransaction", params).await - } - - /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. - /// If the block is not in Zebra's state, returns - /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) - /// - /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) - /// method: post - /// tags: blockchain - /// - /// # Parameters - /// - /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. - /// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data. - pub async fn get_block( - &self, - hash_or_height: String, - verbosity: Option, - ) -> Result { - let params = match verbosity { - Some(v) => vec![ - serde_json::to_value(hash_or_height)?, - serde_json::to_value(v)?, - ], - None => vec![ - serde_json::to_value(hash_or_height)?, - serde_json::to_value(1)?, - ], - }; - self.send_request("getblock", params).await - } - - /// Returns all transaction ids in the memory pool, as a JSON array. - /// - /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) - /// method: post - /// tags: blockchain - pub async fn get_raw_mempool(&self) -> Result { - self.send_request::<(), TxidsResponse>("getrawmempool", ()) - .await - } - - /// Returns information about the given block's Sapling & Orchard tree state. - /// - /// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html) - /// method: post - /// tags: blockchain - /// - /// # Parameters - /// - /// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height. - pub async fn get_treestate( - &self, - hash_or_height: String, - ) -> Result { - let params = vec![serde_json::to_value(hash_or_height)?]; - self.send_request("z_gettreestate", params).await - } - - /// Returns information about a range of Sapling or Orchard subtrees. - /// - /// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link - /// method: post - /// tags: blockchain - /// - /// # Parameters - /// - /// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard". - /// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return. - /// - `limit`: (number, optional) The maximum number of subtree values to return. - pub async fn get_subtrees_by_index( - &self, - pool: String, - start_index: u16, - limit: Option, - ) -> Result { - let params = match limit { - Some(v) => vec![ - serde_json::to_value(pool)?, - serde_json::to_value(start_index)?, - serde_json::to_value(v)?, - ], - None => vec![ - serde_json::to_value(pool)?, - serde_json::to_value(start_index)?, - ], - }; - self.send_request("z_getsubtreesbyindex", params).await - } - - /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. - /// - /// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html) - /// method: post - /// tags: transaction - /// - /// # Parameters - /// - /// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned. - /// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object. - pub async fn get_raw_transaction( - &self, - txid_hex: String, - verbose: Option, - ) -> Result { - let params = match verbose { - Some(v) => vec![serde_json::to_value(txid_hex)?, serde_json::to_value(v)?], - None => vec![serde_json::to_value(txid_hex)?, serde_json::to_value(0)?], - }; - - self.send_request("getrawtransaction", params).await - } - - /// Returns the transaction ids made by the provided transparent addresses. - /// - /// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html) - /// method: post - /// tags: address - /// - /// # Parameters - /// - /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: - /// - `addresses`: (json array of string, required) The addresses to get transactions from. - /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). - /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). - pub async fn get_address_txids( - &self, - addresses: Vec, - start: u32, - end: u32, - ) -> Result { - let params = serde_json::json!({ - "addresses": addresses, - "start": start, - "end": end - }); - - self.send_request("getaddresstxids", vec![params]).await - } - - /// Returns all unspent outputs for a list of addresses. - /// - /// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html) - /// method: post - /// tags: address - /// - /// # Parameters - /// - /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. - pub async fn get_address_utxos( - &self, - addresses: Vec, - ) -> Result, JsonRpcConnectorError> { - let params = vec![serde_json::json!({ "addresses": addresses })]; - self.send_request("getaddressutxos", params).await - } -} - -/// Tests connection with zebrad / zebrad. -async fn test_node_connection( - url: Url, - user: Option, - password: Option, -) -> Result<(), JsonRpcConnectorError> { - let client = Client::builder() - .connect_timeout(std::time::Duration::from_secs(2)) - .timeout(std::time::Duration::from_secs(5)) - .redirect(reqwest::redirect::Policy::none()) - .build()?; - - let user = user.unwrap_or_else(|| "xxxxxx".to_string()); - let password = password.unwrap_or_else(|| "xxxxxx".to_string()); - let request_body = r#"{"jsonrpc":"2.0","method":"getinfo","params":[],"id":1}"#; - let mut request_builder = client - .post(url.clone()) - .header("Content-Type", "application/json") - .body(request_body); - request_builder = request_builder.basic_auth(user, Some(password)); // Used basic_auth method - - let response = request_builder - .send() - .await - .map_err(JsonRpcConnectorError::ReqwestError)?; - let body_bytes = response - .bytes() - .await - .map_err(JsonRpcConnectorError::ReqwestError)?; - let _response: RpcResponse = - serde_json::from_slice(&body_bytes).map_err(JsonRpcConnectorError::SerdeJsonError)?; - Ok(()) -} - -/// Tries to connect to zebrad/zcashd using IPv4 and IPv6 and returns the correct uri type, exits program with error message if connection cannot be established. -pub async fn test_node_and_return_uri( - port: &u16, - user: Option, - password: Option, -) -> Result { - let ipv4_uri: Url = format!("http://127.0.0.1:{}", port).parse()?; - let ipv6_uri: Url = format!("http://[::1]:{}", port).parse()?; - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); - for _ in 0..3 { - match test_node_connection(ipv4_uri.clone(), user.clone(), password.clone()).await { - Ok(_) => { - println!("Connected to node using IPv4 at address {}.", ipv4_uri); - return Ok(ipv4_uri.as_str().parse()?); - } - Err(_e_ipv4) => { - match test_node_connection(ipv6_uri.clone(), user.clone(), password.clone()).await { - Ok(_) => { - println!("Connected to node using IPv6 at address {}.", ipv6_uri); - return Ok(ipv6_uri.as_str().parse()?); - } - Err(_e_ipv6) => { - tokio::time::sleep(std::time::Duration::from_secs(3)).await; - } - } - } - } - interval.tick().await; - } - eprintln!("Error: Could not establish connection with node. \nPlease check config and confirm node is listening at the correct address and the correct authorisation details have been entered. \nExiting.."); - std::process::exit(1); -} diff --git a/zaino-fetch/src/jsonrpc/error.rs b/zaino-fetch/src/jsonrpc/error.rs deleted file mode 100644 index e575c7d6e..000000000 --- a/zaino-fetch/src/jsonrpc/error.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! Hold error types for the JsonRpcConnector and related functionality. - -/// General error type for handling JsonRpcConnector errors. -#[derive(Debug, thiserror::Error)] -pub enum JsonRpcConnectorError { - /// Type for errors without an underlying source. - #[error("Error: {0}")] - JsonRpcClientError(String), - - /// Serialization/Deserialization Errors. - #[error("Error: Serialization/Deserialization Error: {0}")] - SerdeJsonError(#[from] serde_json::Error), - - /// Reqwest Based Errors. - #[error("Error: HTTP Request Error: {0}")] - ReqwestError(#[from] reqwest::Error), - - /// Invalid URI Errors. - #[error("Error: Invalid URI: {0}")] - InvalidUriError(#[from] http::uri::InvalidUri), - - /// URL Parse Errors. - #[error("Error: Invalid URL:{0}")] - UrlParseError(#[from] url::ParseError), -} - -impl JsonRpcConnectorError { - /// Constructor for errors without an underlying source - pub fn new(msg: impl Into) -> Self { - JsonRpcConnectorError::JsonRpcClientError(msg.into()) - } - - /// Converts JsonRpcConnectorError to tonic::Status - /// - /// TODO: This impl should be changed to return the correct status [https://github.com/zcash/lightwalletd/issues/497] before release, - /// however propagating the server error is useful durin development. - pub fn to_grpc_status(&self) -> tonic::Status { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - tonic::Status::internal(format!("Error: JsonRPC Client Error: {}", self)) - } -} - -impl From for tonic::Status { - fn from(err: JsonRpcConnectorError) -> Self { - err.to_grpc_status() - } -} diff --git a/zaino-fetch/src/jsonrpc/response.rs b/zaino-fetch/src/jsonrpc/response.rs deleted file mode 100644 index 88c2774f7..000000000 --- a/zaino-fetch/src/jsonrpc/response.rs +++ /dev/null @@ -1,592 +0,0 @@ -//! Response types for jsonRPC client. - -/// Response to a `getinfo` RPC request. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_info`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetInfoResponse { - /// The node version build number - pub build: String, - /// The server sub-version identifier, used as the network protocol user-agent - pub subversion: String, -} - -/// Response to a `getblockchaininfo` RPC request. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_blockchain_info`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetBlockchainInfoResponse { - /// Current network name as defined in BIP70 (main, test, regtest) - pub chain: String, - - /// The current number of blocks processed in the server, numeric - pub blocks: zebra_chain::block::Height, - - /// The hash of the currently best block, in big-endian order, hex-encoded - #[serde(rename = "bestblockhash", with = "hex")] - pub best_block_hash: zebra_chain::block::Hash, - - /// If syncing, the estimated height of the chain, else the current best height, numeric. - /// - /// In Zebra, this is always the height estimate, so it might be a little inaccurate. - #[serde(rename = "estimatedheight")] - pub estimated_height: zebra_chain::block::Height, - - /// Status of network upgrades - pub upgrades: indexmap::IndexMap< - zebra_rpc::methods::ConsensusBranchIdHex, - zebra_rpc::methods::NetworkUpgradeInfo, - >, - - /// Branch IDs of the current and upcoming consensus rules - pub consensus: zebra_rpc::methods::TipConsensusBranch, -} - -/// The transparent balance of a set of addresses. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_address_balance`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetBalanceResponse { - /// The total transparent balance. - pub balance: u64, -} - -/// Contains the hex-encoded hash of the sent transaction. -/// -/// This is used for the output parameter of [`JsonRpcConnector::send_raw_transaction`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct SendTransactionResponse(#[serde(with = "hex")] pub zebra_chain::transaction::Hash); - -/// Response to a `getbestblockhash` and `getblockhash` RPC request. -/// -/// Contains the hex-encoded hash of the requested block. -/// -/// Also see the notes for the [`Rpc::get_best_block_hash`] and `get_block_hash` methods. -#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -#[serde(transparent)] -pub struct GetBlockHash(#[serde(with = "hex")] pub zebra_chain::block::Hash); - -impl Default for GetBlockHash { - fn default() -> Self { - GetBlockHash(zebra_chain::block::Hash([0; 32])) - } -} - -/// A wrapper struct for a zebra serialized block. -/// -/// Stores bytes that are guaranteed to be deserializable into a [`Block`]. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct SerializedBlock(zebra_chain::block::SerializedBlock); - -impl std::ops::Deref for SerializedBlock { - type Target = zebra_chain::block::SerializedBlock; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl AsRef<[u8]> for SerializedBlock { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl From> for SerializedBlock { - fn from(bytes: Vec) -> Self { - Self(zebra_chain::block::SerializedBlock::from(bytes)) - } -} - -impl From for SerializedBlock { - fn from(inner: zebra_chain::block::SerializedBlock) -> Self { - SerializedBlock(inner) - } -} - -impl hex::FromHex for SerializedBlock { - type Error = hex::FromHexError; - - fn from_hex>(hex: T) -> Result { - hex::decode(hex).map(SerializedBlock::from) - } -} - -impl<'de> serde::Deserialize<'de> for SerializedBlock { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct HexVisitor; - - impl<'de> serde::de::Visitor<'de> for HexVisitor { - type Value = SerializedBlock; - - fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { - formatter.write_str("a hex-encoded string") - } - - fn visit_str(self, value: &str) -> Result - where - E: serde::de::Error, - { - let bytes = hex::decode(value).map_err(serde::de::Error::custom)?; - Ok(SerializedBlock::from(bytes)) - } - } - - deserializer.deserialize_str(HexVisitor) - } -} - -/// Sapling note commitment tree information. -/// -/// Wrapper struct for zebra's SaplingTrees -#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct SaplingTrees { - size: u64, -} - -/// Orchard note commitment tree information. -/// -/// Wrapper struct for zebra's OrchardTrees -#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct OrchardTrees { - size: u64, -} - -/// Information about the sapling and orchard note commitment trees if any. -/// -/// Wrapper struct for zebra's GetBlockTrees -#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetBlockTrees { - sapling: Option, - orchard: Option, -} - -impl GetBlockTrees { - /// Returns sapling data held by ['GetBlockTrees']. - pub fn sapling(&self) -> u64 { - self.sapling.map_or(0, |s| s.size) - } - - /// Returns orchard data held by ['GetBlockTrees']. - pub fn orchard(&self) -> u64 { - self.orchard.map_or(0, |o| o.size) - } -} - -impl From for zebra_rpc::methods::GetBlockTrees { - fn from(val: GetBlockTrees) -> Self { - zebra_rpc::methods::GetBlockTrees::new(val.sapling(), val.orchard()) - } -} - -/// Contains the hex-encoded hash of the sent transaction. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_block`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -#[serde(untagged)] -pub enum GetBlockResponse { - /// The request block, hex-encoded. - Raw(#[serde(with = "hex")] SerializedBlock), - /// The block object. - Object { - /// The hash of the requested block. - hash: GetBlockHash, - - /// The number of confirmations of this block in the best chain, - /// or -1 if it is not in the best chain. - confirmations: i64, - - /// The height of the requested block. - #[serde(skip_serializing_if = "Option::is_none")] - height: Option, - - /// The height of the requested block. - #[serde(skip_serializing_if = "Option::is_none")] - time: Option, - - /// List of transaction IDs in block order, hex-encoded. - tx: Vec, - - /// Information about the note commitment trees. - trees: GetBlockTrees, - }, -} - -/// Vec of transaction ids, as a JSON array. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_raw_mempool`] and [`JsonRpcConnector::get_address_txids`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub struct TxidsResponse { - /// Vec of txids. - pub transactions: Vec, -} - -impl<'de> serde::Deserialize<'de> for TxidsResponse { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let v = serde_json::Value::deserialize(deserializer)?; - - let transactions = v - .as_array() - .ok_or_else(|| serde::de::Error::custom("Expected the JSON to be an array"))? - .iter() - .filter_map(|item| item.as_str().map(String::from)) - .collect::>(); - - Ok(TxidsResponse { transactions }) - } -} - -/// Contains the hex-encoded Sapling & Orchard note commitment trees, and their -/// corresponding [`block::Hash`], [`Height`], and block time. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_treestate`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub struct GetTreestateResponse { - /// The block height corresponding to the treestate, numeric. - pub height: i32, - - /// The block hash corresponding to the treestate, hex-encoded. - pub hash: String, - - /// Unix time when the block corresponding to the treestate was mined, numeric. - /// - /// UTC seconds since the Unix 1970-01-01 epoch. - pub time: u32, - - /// A treestate containing a Sapling note commitment tree, hex-encoded. - pub sapling: zebra_rpc::methods::trees::Treestate, - - /// A treestate containing an Orchard note commitment tree, hex-encoded. - pub orchard: zebra_rpc::methods::trees::Treestate, -} - -impl<'de> serde::Deserialize<'de> for GetTreestateResponse { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let v = serde_json::Value::deserialize(deserializer)?; - let height = v["height"] - .as_i64() - .ok_or_else(|| serde::de::Error::missing_field("height"))? as i32; - let hash = v["hash"] - .as_str() // This directly accesses the string value - .ok_or_else(|| serde::de::Error::missing_field("hash"))? // Converts Option to Result - .to_string(); - let time = v["time"] - .as_i64() - .ok_or_else(|| serde::de::Error::missing_field("time"))? as u32; - let sapling_final_state = v["sapling"]["commitments"]["finalState"] - .as_str() - .ok_or_else(|| serde::de::Error::missing_field("sapling final state"))? - .to_string(); - let orchard_final_state = v["orchard"]["commitments"]["finalState"] - .as_str() - .ok_or_else(|| serde::de::Error::missing_field("orchard final state"))? - .to_string(); - Ok(GetTreestateResponse { - height, - hash, - time, - sapling: zebra_rpc::methods::trees::Treestate::new( - zebra_rpc::methods::trees::Commitments::new(sapling_final_state), - ), - orchard: zebra_rpc::methods::trees::Treestate::new( - zebra_rpc::methods::trees::Commitments::new(orchard_final_state), - ), - }) - } -} - -/// A wrapper struct for a zebra serialized transaction. -/// -/// Stores bytes that are guaranteed to be deserializable into a [`Transaction`]. -/// -/// Sorts in lexicographic order of the transaction's serialized data. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct SerializedTransaction(zebra_chain::transaction::SerializedTransaction); - -impl std::ops::Deref for SerializedTransaction { - type Target = zebra_chain::transaction::SerializedTransaction; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl AsRef<[u8]> for SerializedTransaction { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl From> for SerializedTransaction { - fn from(bytes: Vec) -> Self { - Self(zebra_chain::transaction::SerializedTransaction::from(bytes)) - } -} - -impl From for SerializedTransaction { - fn from(inner: zebra_chain::transaction::SerializedTransaction) -> Self { - SerializedTransaction(inner) - } -} - -impl hex::FromHex for SerializedTransaction { - type Error = as hex::FromHex>::Error; - - fn from_hex>(hex: T) -> Result { - let bytes = >::from_hex(hex)?; - - Ok(bytes.into()) - } -} - -impl<'de> serde::Deserialize<'de> for SerializedTransaction { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let v = serde_json::Value::deserialize(deserializer)?; - if let Some(hex_str) = v.as_str() { - let bytes = hex::decode(hex_str).map_err(serde::de::Error::custom)?; - Ok(SerializedTransaction( - zebra_chain::transaction::SerializedTransaction::from(bytes), - )) - } else { - Err(serde::de::Error::custom("expected a hex string")) - } - } -} - -/// Contains raw transaction, encoded as hex bytes. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_raw_transaction`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub enum GetTransactionResponse { - /// The raw transaction, encoded as hex bytes. - Raw(#[serde(with = "hex")] SerializedTransaction), - /// The transaction object. - Object { - /// The raw transaction, encoded as hex bytes. - #[serde(with = "hex")] - hex: SerializedTransaction, - /// The height of the block in the best chain that contains the transaction, or -1 if - /// the transaction is in the mempool. - height: i32, - /// The confirmations of the block in the best chain that contains the transaction, - /// or 0 if the transaction is in the mempool. - confirmations: u32, - }, -} - -impl<'de> serde::Deserialize<'de> for GetTransactionResponse { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let v = serde_json::Value::deserialize(deserializer)?; - if v.get("height").is_some() && v.get("confirmations").is_some() { - let hex = serde_json::from_value(v["hex"].clone()).map_err(serde::de::Error::custom)?; - let height = v["height"] - .as_i64() - .ok_or_else(|| serde::de::Error::custom("Missing or invalid height"))? - as i32; - let confirmations = v["confirmations"] - .as_u64() - .ok_or_else(|| serde::de::Error::custom("Missing or invalid confirmations"))? - as u32; - let obj = GetTransactionResponse::Object { - hex, - height, - confirmations, - }; - Ok(obj) - } else if v.get("hex").is_some() && v.get("txid").is_some() { - let hex = serde_json::from_value(v["hex"].clone()).map_err(serde::de::Error::custom)?; - let obj = GetTransactionResponse::Object { - hex, - height: -1, - confirmations: 0, - }; - Ok(obj) - } else { - let raw = GetTransactionResponse::Raw( - serde_json::from_value(v.clone()).map_err(serde::de::Error::custom)?, - ); - Ok(raw) - } - } -} - -/// Wrapper struct for a zebra SubtreeRpcData. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -pub struct SubtreeRpcData(zebra_rpc::methods::trees::SubtreeRpcData); - -impl std::ops::Deref for SubtreeRpcData { - type Target = zebra_rpc::methods::trees::SubtreeRpcData; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl From for SubtreeRpcData { - fn from(inner: zebra_rpc::methods::trees::SubtreeRpcData) -> Self { - SubtreeRpcData(inner) - } -} - -impl hex::FromHex for SubtreeRpcData { - type Error = hex::FromHexError; - - fn from_hex>(hex: T) -> Result { - let hex_str = std::str::from_utf8(hex.as_ref()) - .map_err(|_| hex::FromHexError::InvalidHexCharacter { c: '�', index: 0 })?; - - if hex_str.len() < 8 { - return Err(hex::FromHexError::OddLength); - } - - let root_end_index = hex_str.len() - 8; - let (root_hex, height_hex) = hex_str.split_at(root_end_index); - - let root = root_hex.to_string(); - let height = u32::from_str_radix(height_hex, 16) - .map_err(|_| hex::FromHexError::InvalidHexCharacter { c: '�', index: 0 })?; - - Ok(SubtreeRpcData(zebra_rpc::methods::trees::SubtreeRpcData { - root, - end_height: zebra_chain::block::Height(height), - })) - } -} - -impl<'de> serde::Deserialize<'de> for SubtreeRpcData { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - #[derive(serde::Deserialize)] - struct SubtreeDataHelper { - root: String, - end_height: u32, - } - let helper = SubtreeDataHelper::deserialize(deserializer)?; - Ok(SubtreeRpcData(zebra_rpc::methods::trees::SubtreeRpcData { - root: helper.root, - end_height: zebra_chain::block::Height(helper.end_height), - })) - } -} - -/// Contains the Sapling or Orchard pool label, the index of the first subtree in the list, -/// and a list of subtree roots and end heights. -/// -/// This is used for the output parameter of [`JsonRpcConnector::get_subtrees_by_index`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetSubtreesResponse { - /// The shielded pool to which the subtrees belong. - pub pool: String, - - /// The index of the first subtree. - pub start_index: zebra_chain::subtree::NoteCommitmentSubtreeIndex, - - /// A sequential list of complete subtrees, in `index` order. - /// - /// The generic subtree root type is a hex-encoded Sapling or Orchard subtree root string. - // #[serde(skip_serializing_if = "Vec::is_empty")] - pub subtrees: Vec, -} - -/// Wrapper struct for a zebra Scrypt. -/// -/// # Correctness -/// -/// Consensus-critical serialization uses [`ZcashSerialize`]. -/// [`serde`]-based hex serialization must only be used for RPCs and testing. -#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize)] -pub struct Script(zebra_chain::transparent::Script); - -impl std::ops::Deref for Script { - type Target = zebra_chain::transparent::Script; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl AsRef<[u8]> for Script { - fn as_ref(&self) -> &[u8] { - self.0.as_raw_bytes() - } -} - -impl From> for Script { - fn from(bytes: Vec) -> Self { - Self(zebra_chain::transparent::Script::new(bytes.as_ref())) - } -} - -impl From for Script { - fn from(inner: zebra_chain::transparent::Script) -> Self { - Script(inner) - } -} - -impl hex::FromHex for Script { - type Error = as hex::FromHex>::Error; - - fn from_hex>(hex: T) -> Result { - let bytes = Vec::from_hex(hex)?; - let inner = zebra_chain::transparent::Script::new(&bytes); - Ok(Script(inner)) - } -} - -impl<'de> serde::Deserialize<'de> for Script { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let v = serde_json::Value::deserialize(deserializer)?; - if let Some(hex_str) = v.as_str() { - let bytes = hex::decode(hex_str).map_err(serde::de::Error::custom)?; - let inner = zebra_chain::transparent::Script::new(&bytes); - Ok(Script(inner)) - } else { - Err(serde::de::Error::custom("expected a hex string")) - } - } -} - -/// This is used for the output parameter of [`JsonRpcConnector::get_address_utxos`]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct GetUtxosResponse { - /// The transparent address, base58check encoded - pub address: zebra_chain::transparent::Address, - - /// The output txid, in big-endian order, hex-encoded - #[serde(with = "hex")] - pub txid: zebra_chain::transaction::Hash, - - /// The transparent output index, numeric - #[serde(rename = "outputIndex")] - pub output_index: u32, - - /// The transparent output script, hex encoded - #[serde(with = "hex")] - pub script: Script, - - /// The amount of zatoshis in the transparent output - pub satoshis: u64, - - /// The block height, numeric. - pub height: zebra_chain::block::Height, -} diff --git a/zaino-fetch/src/jsonrpc.rs b/zaino-fetch/src/jsonrpsee.rs similarity index 100% rename from zaino-fetch/src/jsonrpc.rs rename to zaino-fetch/src/jsonrpsee.rs diff --git a/zaino-fetch/src/jsonrpsee/connector.rs b/zaino-fetch/src/jsonrpsee/connector.rs new file mode 100644 index 000000000..b59519d5f --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/connector.rs @@ -0,0 +1,946 @@ +//! JsonRPSee client implementation. +//! +//! TODO: - Add option for http connector. +//! - Refactor JsonRPSeecConnectorError into concrete error types and implement fmt::display []. +use base64::{engine::general_purpose, Engine}; +use http::Uri; +use reqwest::{Client, ClientBuilder, Url}; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use std::{ + any::type_name, + convert::Infallible, + fmt, fs, + net::SocketAddr, + path::Path, + sync::{ + atomic::{AtomicI32, Ordering}, + Arc, + }, + time::Duration, +}; +use tracing::error; +use zebra_rpc::client::ValidateAddressResponse; + +use crate::jsonrpsee::response::address_deltas::GetAddressDeltasError; +use crate::jsonrpsee::{ + error::{JsonRpcError, TransportError}, + response::{ + address_deltas::{GetAddressDeltasParams, GetAddressDeltasResponse}, + block_deltas::{BlockDeltas, BlockDeltasError}, + block_header::{GetBlockHeader, GetBlockHeaderError}, + block_subsidy::GetBlockSubsidy, + mining_info::GetMiningInfoWire, + peer_info::GetPeerInfo, + GetBalanceError, GetBalanceResponse, GetBlockCountResponse, GetBlockError, GetBlockHash, + GetBlockResponse, GetBlockchainInfoResponse, GetInfoResponse, GetMempoolInfoResponse, + GetSubtreesError, GetSubtreesResponse, GetTransactionResponse, GetTreestateError, + GetTreestateResponse, GetUtxosError, GetUtxosResponse, SendTransactionError, + SendTransactionResponse, TxidsError, TxidsResponse, + }, +}; + +use super::response::{GetDifficultyResponse, GetNetworkSolPsResponse}; + +#[derive(Serialize, Deserialize, Debug)] +struct RpcRequest { + jsonrpc: String, + method: String, + params: T, + id: i32, +} + +#[derive(Serialize, Deserialize, Debug)] +struct RpcResponse { + id: i64, + jsonrpc: Option, + result: Option, + error: Option, +} + +/// Json RPSee Error type. +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcError { + /// Error Code. + pub code: i64, + /// Error Message. + pub message: String, + /// Error Data. + pub data: Option, +} + +impl RpcError { + /// Creates a new `RpcError` from zebra's `LegacyCode` enum + pub fn new_from_legacycode( + code: zebra_rpc::server::error::LegacyCode, + message: impl Into, + ) -> Self { + RpcError { + code: code as i64, + message: message.into(), + data: None, + } + } + /// Creates a new `RpcError` from jsonrpsee-types `ErrorObject`. + pub fn new_from_errorobject( + error_obj: jsonrpsee_types::ErrorObject<'_>, + fallback_message: impl Into, + ) -> Self { + RpcError { + // We can use the actual JSON-RPC code: + code: error_obj.code() as i64, + + // Or combine the fallback with the original message: + message: format!("{}: {}", fallback_message.into(), error_obj.message()), + + // If you want to store the data too: + data: error_obj + .data() + .map(|raw| serde_json::from_str(raw.get()).unwrap()), + } + } +} + +impl fmt::Display for RpcError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "RPC Error (code: {}): {}", self.code, self.message) + } +} + +impl std::error::Error for RpcError {} + +// Helper function to read and parse the cookie file content. +// Zebra's RPC server expects Basic Auth with username "__cookie__" +// and the token from the cookie file as the password. +// The cookie file itself is formatted as "__cookie__:". +// This function extracts just the part. +fn read_and_parse_cookie_token(cookie_path: &Path) -> Result { + let cookie_content = + fs::read_to_string(cookie_path).map_err(TransportError::CookieReadError)?; + let trimmed_content = cookie_content.trim(); + if let Some(stripped) = trimmed_content.strip_prefix("__cookie__:") { + Ok(stripped.to_string()) + } else { + // If the prefix is not present, use the entire trimmed content. + // This maintains compatibility with older formats or other cookie sources. + Ok(trimmed_content.to_string()) + } +} + +#[derive(Debug, Clone)] +enum AuthMethod { + Basic { username: String, password: String }, + Cookie { cookie: String }, +} + +/// Trait to convert a JSON-RPC response to an error. +pub trait ResponseToError: Sized { + /// The error type. + type RpcError: std::fmt::Debug + + TryFrom; + + /// Converts a JSON-RPC response to an error. + fn to_error(self) -> Result { + Ok(self) + } +} + +/// Error type for JSON-RPC requests. +#[derive(Debug, thiserror::Error)] +pub enum RpcRequestError { + /// Error variant for errors related to the transport layer. + #[error("Transport error: {0}")] + Transport(#[from] TransportError), + + /// Error variant for errors related to the JSON-RPC method being called. + #[error("Method error: {0:?}")] + Method(MethodError), + + /// The provided input failed to serialize. + #[error("request input failed to serialize: {0:?}")] + JsonRpc(serde_json::Error), + + /// Internal unrecoverable error. + #[error("Internal unrecoverable error: {0}")] + InternalUnrecoverable(String), + + /// Server at capacity + #[error("rpc server at capacity, please try again")] + ServerWorkQueueFull, + + /// An error related to the specific JSON-RPC method being called, that + /// wasn't accounted for as a MethodError. This means that either + /// Zaino has not yet accounted for the possibilty of this error, + /// or the Node returned an undocumented/malformed error response. + #[error("unexpected error response from server: {0}")] + UnexpectedErrorResponse(Box), +} + +/// JsonRpSee Client config data. +#[derive(Debug, Clone)] +pub struct JsonRpSeeConnector { + url: Url, + id_counter: Arc, + client: Client, + auth_method: AuthMethod, +} + +impl JsonRpSeeConnector { + /// Creates a new JsonRpSeeConnector with Basic Authentication. + pub fn new_with_basic_auth( + url: Url, + username: String, + password: String, + ) -> Result { + let client = ClientBuilder::new() + .connect_timeout(Duration::from_secs(2)) + .timeout(Duration::from_secs(5)) + .redirect(reqwest::redirect::Policy::none()) + .build() + .map_err(TransportError::ReqwestError)?; + + Ok(Self { + url, + id_counter: Arc::new(AtomicI32::new(0)), + client, + auth_method: AuthMethod::Basic { username, password }, + }) + } + + /// Creates a new JsonRpSeeConnector with Cookie Authentication. + pub fn new_with_cookie_auth(url: Url, cookie_path: &Path) -> Result { + let cookie_password = read_and_parse_cookie_token(cookie_path)?; + + let client = ClientBuilder::new() + .connect_timeout(Duration::from_secs(2)) + .timeout(Duration::from_secs(5)) + .redirect(reqwest::redirect::Policy::none()) + .cookie_store(true) + .build() + .map_err(TransportError::ReqwestError)?; + + Ok(Self { + url, + id_counter: Arc::new(AtomicI32::new(0)), + client, + auth_method: AuthMethod::Cookie { + cookie: cookie_password, + }, + }) + } + + /// Helper function to create from parts of a StateServiceConfig or FetchServiceConfig. + /// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. + pub async fn new_from_config_parts( + validator_rpc_address: &str, + validator_rpc_user: String, + validator_rpc_password: String, + validator_cookie_path: Option, + ) -> Result { + match validator_cookie_path.is_some() { + true => JsonRpSeeConnector::new_with_cookie_auth( + test_node_and_return_url( + validator_rpc_address, + validator_cookie_path.clone(), + None, + None, + ) + .await?, + Path::new( + &validator_cookie_path + .clone() + .expect("validator cookie authentication path missing"), + ), + ), + false => JsonRpSeeConnector::new_with_basic_auth( + test_node_and_return_url( + validator_rpc_address, + None, + Some(validator_rpc_user.clone()), + Some(validator_rpc_password.clone()), + ) + .await?, + validator_rpc_user.clone(), + validator_rpc_password.clone(), + ), + } + } + + /// Returns the http::uri the JsonRpSeeConnector is configured to send requests to. + pub fn uri(&self) -> Result { + Ok(self.url.as_str().parse()?) + } + + /// Returns the reqwest::url the JsonRpSeeConnector is configured to send requests to. + pub fn url(&self) -> Url { + self.url.clone() + } + + /// Sends a jsonRPC request and returns the response. + /// NOTE: This function currently resends the call up to 5 times on a server response of "Work queue depth exceeded". + /// This is because the node's queue can become overloaded and stop servicing RPCs. + async fn send_request< + T: std::fmt::Debug + Serialize, + R: std::fmt::Debug + for<'de> Deserialize<'de> + ResponseToError, + >( + &self, + method: &str, + params: T, + ) -> Result> + where + R::RpcError: Send + Sync + 'static, + { + let id = self.id_counter.fetch_add(1, Ordering::SeqCst); + + let max_attempts = 5; + let mut attempts = 0; + loop { + attempts += 1; + + let request_builder = self + .build_request(method, ¶ms, id) + .map_err(RpcRequestError::JsonRpc)?; + + let response = request_builder + .send() + .await + .map_err(|e| RpcRequestError::Transport(TransportError::ReqwestError(e)))?; + + let status = response.status(); + + let body_bytes = response + .bytes() + .await + .map_err(|e| RpcRequestError::Transport(TransportError::ReqwestError(e)))?; + + let body_str = String::from_utf8_lossy(&body_bytes); + + if body_str.contains("Work queue depth exceeded") { + if attempts >= max_attempts { + return Err(RpcRequestError::ServerWorkQueueFull); + } + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + + let code = status.as_u16(); + return match code { + // Invalid + ..100 | 600.. => Err(RpcRequestError::Transport( + TransportError::InvalidStatusCode(code), + )), + // Informational | Redirection + 100..200 | 300..400 => Err(RpcRequestError::Transport( + TransportError::UnexpectedStatusCode(code), + )), + // Success + 200..300 => { + let response: RpcResponse = serde_json::from_slice(&body_bytes) + .map_err(|e| TransportError::BadNodeData(Box::new(e), type_name::()))?; + + match (response.error, response.result) { + (Some(error), _) => Err(RpcRequestError::Method( + R::RpcError::try_from(error).map_err(|e| { + RpcRequestError::UnexpectedErrorResponse(Box::new(e)) + })?, + )), + (None, Some(result)) => match result.to_error() { + Ok(r) => Ok(r), + Err(e) => Err(RpcRequestError::Method(e)), + }, + (None, None) => Err(RpcRequestError::Transport( + TransportError::EmptyResponseBody, + )), + } + // Error + } + 400..600 => Err(RpcRequestError::Transport(TransportError::ErrorStatusCode( + code, + ))), + }; + } + } + + /// Builds a request from a given method, params, and id. + fn build_request( + &self, + method: &str, + params: T, + id: i32, + ) -> serde_json::Result { + let req = RpcRequest { + jsonrpc: "2.0".to_string(), + method: method.to_string(), + params, + id, + }; + + let mut request_builder = self + .client + .post(self.url.clone()) + .header("Content-Type", "application/json"); + + match &self.auth_method { + AuthMethod::Basic { username, password } => { + request_builder = request_builder.basic_auth(username, Some(password)); + } + AuthMethod::Cookie { cookie } => { + request_builder = request_builder.header( + reqwest::header::AUTHORIZATION, + format!( + "Basic {}", + general_purpose::STANDARD.encode(format!("__cookie__:{cookie}")) + ), + ); + } + } + + let request_body = serde_json::to_string(&req)?; + request_builder = request_builder.body(request_body); + + Ok(request_builder) + } + + /// Returns all changes for an address. + /// + /// Returns information about all changes to the given transparent addresses within the given block range (inclusive) + /// + /// block height range, default is the full blockchain. + /// If start or end are not specified, they default to zero. + /// If start is greater than the latest block height, it's interpreted as that height. + /// + /// If end is zero, it's interpreted as the latest block height. + /// + /// [Original zcashd implementation](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/misc.cpp#L881) + /// + /// zcashd reference: [`getaddressdeltas`](https://zcash.github.io/rpc/getaddressdeltas.html) + /// method: post + /// tags: address + pub async fn get_address_deltas( + &self, + params: GetAddressDeltasParams, + ) -> Result> { + let params = vec![serde_json::to_value(params).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("getaddressdeltas", params).await + } + + /// Returns software information from the RPC server, as a [`crate::jsonrpsee::connector::GetInfoResponse`] JSON struct. + /// + /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) + /// method: post + /// tags: control + pub async fn get_info(&self) -> Result> { + self.send_request::<(), GetInfoResponse>("getinfo", ()) + .await + } + + /// + /// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html) + /// method: post + /// tags: blockchain + pub async fn get_blockchain_info( + &self, + ) -> Result> { + self.send_request::<(), GetBlockchainInfoResponse>("getblockchaininfo", ()) + .await + } + + /// Returns details on the active state of the TX memory pool. + /// + /// online zcash rpc reference: [`getmempoolinfo`](https://zcash.github.io/rpc/getmempoolinfo.html) + /// method: post + /// tags: mempool + /// + /// Canonical source code implementation: [`getmempoolinfo`](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/blockchain.cpp#L1555) + /// + /// Zebra does not support this RPC directly. + pub async fn get_mempool_info( + &self, + ) -> Result> { + self.send_request::<(), GetMempoolInfoResponse>("getmempoolinfo", ()) + .await + } + + /// Returns data about each connected network node as a json array of objects. + /// + /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) + /// tags: network + /// + /// Current `zebrad` does not include the same fields as `zcashd`. + pub async fn get_peer_info(&self) -> Result> { + self.send_request::<(), GetPeerInfo>("getpeerinfo", ()) + .await + } + + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. + /// + /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) + /// method: post + /// tags: blockchain + pub async fn get_difficulty( + &self, + ) -> Result> { + self.send_request::<(), GetDifficultyResponse>("getdifficulty", ()) + .await + } + + /// Returns block subsidy reward, taking into account the mining slow start and the founders reward, of block at index provided. + /// + /// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `height`: (number, optional) The block height. If not provided, defaults to the current height of the chain. + pub async fn get_block_subsidy( + &self, + height: u32, + ) -> Result> { + let params = vec![serde_json::to_value(height).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("getblocksubsidy", params).await + } + + /// Returns the total balance of a provided `addresses` in an [`crate::jsonrpsee::response::GetBalanceResponse`] instance. + /// + /// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry + /// - `addresses`: (array of strings) A list of base-58 encoded addresses. + pub async fn get_address_balance( + &self, + addresses: Vec, + ) -> Result> { + let params = vec![serde_json::json!({ "addresses": addresses })]; + self.send_request("getaddressbalance", params).await + } + + /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. + /// + /// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. + pub async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result> { + let params = + vec![serde_json::to_value(raw_transaction_hex).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("sendrawtransaction", params).await + } + + /// Returns the requested block by hash or height, as a [`GetBlockResponse`]. + /// If the block is not in Zebra's state, returns + /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + /// + /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. + /// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data. + pub async fn get_block( + &self, + hash_or_height: String, + verbosity: Option, + ) -> Result> { + let v = verbosity.unwrap_or(1); + let params = [ + serde_json::to_value(hash_or_height).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(v).map_err(RpcRequestError::JsonRpc)?, + ]; + + if v == 0 { + self.send_request("getblock", params) + .await + .map(GetBlockResponse::Raw) + } else { + self.send_request("getblock", params) + .await + .map(GetBlockResponse::Object) + } + } + + /// Returns information about the given block and its transactions. + /// + /// zcashd reference: [`getblockdeltas`](https://zcash.github.io/rpc/getblockdeltas.html) + /// method: post + /// tags: blockchain + pub async fn get_block_deltas( + &self, + hash: String, + ) -> Result> { + let params = vec![serde_json::to_value(hash).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("getblockdeltas", params).await + } + + /// If verbose is false, returns a string that is serialized, hex-encoded data for blockheader `hash`. + /// If verbose is true, returns an Object with information about blockheader `hash`. + /// + /// # Parameters + /// + /// - hash: (string, required) The block hash + /// - verbose: (boolean, optional, default=true) true for a json object, false for the hex encoded data + /// + /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) + /// method: post + /// tags: blockchain + pub async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result> { + let params = [ + serde_json::to_value(hash).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(verbose).map_err(RpcRequestError::JsonRpc)?, + ]; + self.send_request("getblockheader", params).await + } + + /// Returns the hash of the best block (tip) of the longest chain. + /// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) + /// method: post + /// tags: blockchain + /// + /// # Notes + /// + /// The zcashd doc reference above says there are no parameters and the result is a "hex" (string) of the block hash hex encoded. + /// The Zcash source code is considered canonical. + /// [In the rpc definition](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/common.h#L48) there are no required params, or optional params. + /// [The function in rpc/blockchain.cpp](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L325) + /// where `return chainActive.Tip()->GetBlockHash().GetHex();` is the [return expression](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L339)returning a `std::string` + pub async fn get_best_blockhash(&self) -> Result> { + self.send_request::<(), GetBlockHash>("getbestblockhash", ()) + .await + } + + /// Returns the height of the most recent block in the best valid block chain + /// (equivalently, the number of blocks in this chain excluding the genesis block). + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + pub async fn get_block_count( + &self, + ) -> Result> { + self.send_request::<(), GetBlockCountResponse>("getblockcount", ()) + .await + } + + /// Return information about the given Zcash address. + /// + /// # Parameters + /// - `address`: (string, required, example="tmHMBeeYRuc2eVicLNfP15YLxbQsooCA6jb") The Zcash transparent address to validate. + /// + /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) + /// method: post + /// tags: blockchain + pub async fn validate_address( + &self, + address: String, + ) -> Result> { + let params = vec![serde_json::to_value(address).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("validateaddress", params).await + } + + /// Returns all transaction ids in the memory pool, as a JSON array. + /// + /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) + /// method: post + /// tags: blockchain + pub async fn get_raw_mempool(&self) -> Result> { + self.send_request::<(), TxidsResponse>("getrawmempool", ()) + .await + } + + /// Returns information about the given block's Sapling & Orchard tree state. + /// + /// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height. + pub async fn get_treestate( + &self, + hash_or_height: String, + ) -> Result> { + let params = vec![serde_json::to_value(hash_or_height).map_err(RpcRequestError::JsonRpc)?]; + self.send_request("z_gettreestate", params).await + } + + /// Returns information about a range of Sapling or Orchard subtrees. + /// + /// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard". + /// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return. + /// - `limit`: (number, optional) The maximum number of subtree values to return. + pub async fn get_subtrees_by_index( + &self, + pool: String, + start_index: u16, + limit: Option, + ) -> Result> { + let params = match limit { + Some(v) => vec![ + serde_json::to_value(pool).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(start_index).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(v).map_err(RpcRequestError::JsonRpc)?, + ], + None => vec![ + serde_json::to_value(pool).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(start_index).map_err(RpcRequestError::JsonRpc)?, + ], + }; + self.send_request("z_getsubtreesbyindex", params).await + } + + /// Returns the raw transaction data, as a [`GetTransactionResponse`]. + /// + /// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned. + /// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object. + pub async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result> { + let params = match verbose { + Some(v) => vec![ + serde_json::to_value(txid_hex).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(v).map_err(RpcRequestError::JsonRpc)?, + ], + None => vec![ + serde_json::to_value(txid_hex).map_err(RpcRequestError::JsonRpc)?, + serde_json::to_value(0).map_err(RpcRequestError::JsonRpc)?, + ], + }; + + self.send_request("getrawtransaction", params).await + } + + /// Returns the transaction ids made by the provided transparent addresses. + /// + /// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: + /// - `addresses`: (json array of string, required) The addresses to get transactions from. + /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). + /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). + pub async fn get_address_txids( + &self, + addresses: Vec, + start: u32, + end: u32, + ) -> Result> { + let params = serde_json::json!({ + "addresses": addresses, + "start": start, + "end": end + }); + + self.send_request("getaddresstxids", vec![params]).await + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. + pub async fn get_address_utxos( + &self, + addresses: Vec, + ) -> Result, RpcRequestError> { + let params = vec![serde_json::json!({ "addresses": addresses })]; + self.send_request("getaddressutxos", params).await + } + + /// Returns a json object containing mining-related information. + /// + /// `zcashd` reference (may be outdated): [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) + pub async fn get_mining_info(&self) -> Result> { + self.send_request("getmininginfo", ()).await + } + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// This RPC is implemented in the [mining.cpp](https://github.com/zcash/zcash/blob/d00fc6f4365048339c83f463874e4d6c240b63af/src/rpc/mining.cpp#L104) + /// file of the Zcash repository. The Zebra implementation can be found [here](https://github.com/ZcashFoundation/zebra/blob/19bca3f1159f9cb9344c9944f7e1cb8d6a82a07f/zebra-rpc/src/methods.rs#L2687). + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + pub async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result> { + let mut params = Vec::new(); + + // check whether the blocks parameter is present + if let Some(b) = blocks { + params.push(serde_json::json!(b)); + } else { + params.push(serde_json::json!(120_i32)) + } + + // check whether the height parameter is present + if let Some(h) = height { + params.push(serde_json::json!(h)); + } else { + // default to -1 + params.push(serde_json::json!(-1_i32)) + } + + self.send_request("getnetworksolps", params).await + } +} + +/// Tests connection with zebrad / zebrad. +async fn test_node_connection(url: Url, auth_method: AuthMethod) -> Result<(), TransportError> { + let client = Client::builder() + .connect_timeout(std::time::Duration::from_secs(2)) + .timeout(std::time::Duration::from_secs(5)) + .redirect(reqwest::redirect::Policy::none()) + .build()?; + + let request_body = r#"{"jsonrpc":"2.0","method":"getinfo","params":[],"id":1}"#; + let mut request_builder = client + .post(url.clone()) + .header("Content-Type", "application/json") + .body(request_body); + + match &auth_method { + AuthMethod::Basic { username, password } => { + request_builder = request_builder.basic_auth(username, Some(password)); + } + AuthMethod::Cookie { cookie } => { + request_builder = request_builder.header( + reqwest::header::AUTHORIZATION, + format!( + "Basic {}", + general_purpose::STANDARD.encode(format!("__cookie__:{cookie}")) + ), + ); + } + } + + let response = request_builder + .send() + .await + .map_err(TransportError::ReqwestError)?; + let body_bytes = response + .bytes() + .await + .map_err(TransportError::ReqwestError)?; + let _response: RpcResponse = serde_json::from_slice(&body_bytes) + .map_err(|e| TransportError::BadNodeData(Box::new(e), ""))?; + Ok(()) +} + +/// Resolves an address string (hostname:port or ip:port) to a SocketAddr. +fn resolve_address(address: &str) -> Result { + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| TransportError::BadNodeData(Box::new(e), "address resolution")) +} + +/// Tries to connect to zebrad/zcashd using the provided address and returns the correct URL. +/// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. +pub async fn test_node_and_return_url( + address: &str, + cookie_path: Option, + user: Option, + password: Option, +) -> Result { + let addr = resolve_address(address)?; + + let auth_method = match cookie_path.is_some() { + true => { + let cookie_file_path_str = cookie_path.expect("validator rpc cookie path missing"); + let cookie_password = read_and_parse_cookie_token(Path::new(&cookie_file_path_str))?; + AuthMethod::Cookie { + cookie: cookie_password, + } + } + false => AuthMethod::Basic { + username: user.unwrap_or_else(|| "xxxxxx".to_string()), + password: password.unwrap_or_else(|| "xxxxxx".to_string()), + }, + }; + + let host = match addr { + SocketAddr::V4(_) => addr.ip().to_string(), + SocketAddr::V6(_) => format!("[{}]", addr.ip()), + }; + + let url: Url = format!("http://{}:{}", host, addr.port()).parse()?; + + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); + for _ in 0..3 { + match test_node_connection(url.clone(), auth_method.clone()).await { + Ok(_) => { + return Ok(url); + } + Err(_) => { + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + } + interval.tick().await; + } + error!("Error: Could not establish connection with node. Please check config and confirm node is listening at {url} and the correct authorisation details have been entered. Exiting.."); + std::process::exit(1); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_resolve_address_wraps_common_function() { + // Verify the wrapper correctly converts io::Error to TransportError + let result = resolve_address("127.0.0.1:8080"); + assert!(result.is_ok()); + assert_eq!(result.unwrap().port(), 8080); + + let result = resolve_address("invalid"); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + TransportError::BadNodeData(_, "address resolution") + )); + } +} diff --git a/zaino-fetch/src/jsonrpsee/error.rs b/zaino-fetch/src/jsonrpsee/error.rs new file mode 100644 index 000000000..d2034ad1b --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/error.rs @@ -0,0 +1,79 @@ +//! Hold error types for the JsonRpSeeConnector and related functionality. + +use std::io; + +/// Error type for JSON-RPC responses. +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct JsonRpcError { + /// The JSON-RPC error code + pub code: i32, + + /// The JSON-RPC error message + pub message: String, + + /// The JSON-RPC error data + pub data: Option, +} + +/// General error type for handling JsonRpSeeConnector errors. +#[derive(Debug, thiserror::Error)] +pub enum TransportError { + /// The cookie file used to authenticate with zebra could not be read + #[error("could not read zebra authentication cookie file: {0}")] + CookieReadError(io::Error), + + /// Reqwest Based Errors. + #[error("Error: HTTP Request Error: {0}")] + ReqwestError(#[from] reqwest::Error), + + /// Invalid URI Errors. + #[error("Error: Invalid URI: {0}")] + InvalidUriError(#[from] http::uri::InvalidUri), + + /// URL Parse Errors. + #[error("Error: Invalid URL:{0}")] + UrlParseError(#[from] url::ParseError), + + // Above this line, zaino failed to connect to a node + // ----------------------------------- + // below this line, zaino connected to a node which returned a bad response + /// Node returned a non-canonical status code + #[error("validator returned invalid status code: {0}")] + InvalidStatusCode(u16), + + /// Node returned a status code we don't expect + #[error("validator returned unexpected status code: {0}")] + UnexpectedStatusCode(u16), + + /// Node returned a status code we don't expect + #[error("validator returned error code: {0}")] + ErrorStatusCode(u16), + + /// The data returned by the validator was invalid. + #[error("validator returned invalid {1} data: {0}")] + BadNodeData( + Box, + &'static str, + ), + + /// Validator returned empty response body + #[error("no response body")] + EmptyResponseBody, +} + +impl TransportError { + /// Converts TransportError to tonic::Status + /// + /// TODO: This impl should be changed to return the correct status [per this issue](https://github.com/zcash/lightwalletd/issues/497) before release, + /// however propagating the server error is useful during development. + pub fn to_grpc_status(&self) -> tonic::Status { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + tonic::Status::internal(format!("Error: JsonRpSee Client Error: {self}")) + } +} + +impl From for tonic::Status { + fn from(err: TransportError) -> Self { + err.to_grpc_status() + } +} diff --git a/zaino-fetch/src/jsonrpsee/response.rs b/zaino-fetch/src/jsonrpsee/response.rs new file mode 100644 index 000000000..7329328fd --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response.rs @@ -0,0 +1,1555 @@ +//! Response types for jsonRPSeeConnector. +//! +//! These types are redefined rather than imported from zebra_rpc +//! to prevent locking consumers into a zebra_rpc version + +pub mod address_deltas; +pub mod block_deltas; +pub mod block_header; +pub mod block_subsidy; +pub mod common; +pub mod mining_info; +pub mod peer_info; + +use std::{convert::Infallible, num::ParseIntError}; + +use hex::FromHex; +use serde::{de::Error as DeserError, Deserialize, Deserializer, Serialize}; + +use zebra_chain::{ + amount::{Amount, NonNegative}, + block::Height, + value_balance::ValueBalance, + work::difficulty::CompactDifficulty, +}; +use zebra_rpc::{ + client::{Commitments, GetBlockchainInfoBalance, Treestate, ValidateAddressResponse}, + methods::opthex, +}; + +use crate::jsonrpsee::connector::ResponseToError; + +use super::connector::RpcError; + +impl TryFrom for Infallible { + type Error = RpcError; + + fn try_from(err: RpcError) -> Result { + Err(err) + } +} + +/// Response to a `getinfo` RPC request. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_info`]. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetInfoResponse { + /// The node version + #[serde(default)] + version: u64, + /// The node version build number + pub build: String, + /// The server sub-version identifier, used as the network protocol user-agent + pub subversion: String, + /// The protocol version + #[serde(default)] + #[serde(rename = "protocolversion")] + protocol_version: u32, + + /// The current number of blocks processed in the server + #[serde(default)] + blocks: u32, + + /// The total (inbound and outbound) number of connections the node has + #[serde(default)] + connections: usize, + + /// The proxy (if any) used by the server. Currently always `None` in Zebra. + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + proxy: Option, + + /// The current network difficulty + #[serde(default)] + difficulty: f64, + + /// True if the server is running in testnet mode, false otherwise + #[serde(default)] + testnet: bool, + + /// The minimum transaction fee in ZEC/kB + #[serde(default)] + #[serde(rename = "paytxfee")] + pay_tx_fee: f64, + + /// The minimum relay fee for non-free transactions in ZEC/kB + #[serde(default)] + #[serde(rename = "relayfee")] + relay_fee: f64, + + /// The last error or warning message, or "no errors" if there are no errors + #[serde(default)] + errors: String, + + /// The time of the last error or warning message, or "no errors timestamp" if there are no errors + #[serde(default)] + #[serde(rename = "errorstimestamp")] + errors_timestamp: ErrorsTimestamp, +} + +impl ResponseToError for GetInfoResponse { + type RpcError = Infallible; +} + +impl ResponseToError for GetDifficultyResponse { + type RpcError = Infallible; +} + +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(untagged)] +/// A wrapper to allow both types of error timestamp +pub enum ErrorsTimestamp { + /// Returned from zcashd, the timestamp is an integer unix timstamp + Num(usize), + /// Returned from zebrad, the timestamp is a string representing a timestamp + Int(i64), +} + +/// This infallible version is unsafe. Use only when sure that the +/// timestamp will never exceed i64 +impl From for i64 { + fn from(value: ErrorsTimestamp) -> Self { + match value { + ErrorsTimestamp::Num(n) => n as i64, + ErrorsTimestamp::Int(i) => i, + } + } +} + +impl ResponseToError for ErrorsTimestamp { + type RpcError = Infallible; +} +impl std::fmt::Display for ErrorsTimestamp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ErrorsTimestamp::Num(n) => write!(f, "{}", n), + ErrorsTimestamp::Int(i) => write!(f, "{}", i), + } + } +} + +impl Default for ErrorsTimestamp { + fn default() -> Self { + ErrorsTimestamp::Int(0) + } +} + +impl From for zebra_rpc::methods::GetInfo { + fn from(response: GetInfoResponse) -> Self { + zebra_rpc::methods::GetInfo::new( + response.version, + response.build, + response.subversion, + response.protocol_version, + response.blocks, + response.connections, + response.proxy, + response.difficulty, + response.testnet, + response.pay_tx_fee, + response.relay_fee, + response.errors, + i64::from(response.errors_timestamp), + ) + } +} + +/// Response to a `getblockchaininfo` RPC request. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_blockchain_info`]. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBlockchainInfoResponse { + /// Current network name as defined in BIP70 (main, test, regtest) + pub chain: String, + + /// The current number of blocks processed in the server, numeric + pub blocks: zebra_chain::block::Height, + + /// The hash of the currently best block, in big-endian order, hex-encoded + #[serde(rename = "bestblockhash", with = "hex")] + pub best_block_hash: zebra_chain::block::Hash, + + /// If syncing, the estimated height of the chain, else the current best height, numeric. + /// + /// In Zebra, this is always the height estimate, so it might be a little inaccurate. + #[serde(rename = "estimatedheight")] + pub estimated_height: zebra_chain::block::Height, + + /// Chain supply balance + #[serde(default)] + #[serde(rename = "chainSupply")] + chain_supply: ChainBalance, + + /// Status of network upgrades + pub upgrades: indexmap::IndexMap< + zebra_rpc::methods::ConsensusBranchIdHex, + zebra_rpc::methods::NetworkUpgradeInfo, + >, + + /// Value pool balances + #[serde(rename = "valuePools")] + value_pools: [ChainBalance; 5], + + /// Branch IDs of the current and upcoming consensus rules + pub consensus: zebra_rpc::methods::TipConsensusBranch, + + /// The current number of headers we have validated in the best chain, that is, + /// the height of the best chain. + #[serde(default = "default_header")] + headers: Height, + + /// The estimated network solution rate in Sol/s. + #[serde(default)] + difficulty: f64, + + /// The verification progress relative to the estimated network chain tip. + #[serde(default)] + #[serde(rename = "verificationprogress")] + verification_progress: f64, + + /// The total amount of work in the best chain, hex-encoded. + #[serde(default)] + #[serde(rename = "chainwork")] + chain_work: ChainWork, + + /// Whether this node is pruned, currently always false in Zebra. + #[serde(default)] + pruned: bool, + + /// The estimated size of the block and undo files on disk + #[serde(default)] + size_on_disk: u64, + + /// The current number of note commitments in the commitment tree + #[serde(default)] + commitments: u64, +} + +impl ResponseToError for GetBlockchainInfoResponse { + type RpcError = Infallible; +} + +/// Response to a `getdifficulty` RPC request. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetDifficultyResponse(pub f64); + +/// Response to a `getnetworksolps` RPC request. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetNetworkSolPsResponse(pub u64); + +impl ResponseToError for GetNetworkSolPsResponse { + type RpcError = Infallible; +} + +fn default_header() -> Height { + Height(0) +} + +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(untagged)] +/// A wrapper type to allow both kinds of ChainWork +pub enum ChainWork { + /// Returned from zcashd, a chainwork is a String representing a + /// base-16 integer + Str(String), + /// Returned from zebrad, a chainwork is an integer + Num(u64), +} + +/// Error type used for the `chainwork` field of the `getblockchaininfo` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum ChainWorkError {} + +impl ResponseToError for ChainWork { + type RpcError = ChainWorkError; +} +impl TryFrom for ChainWorkError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl TryFrom for u64 { + type Error = ParseIntError; + + fn try_from(value: ChainWork) -> Result { + match value { + ChainWork::Str(s) => u64::from_str_radix(&s, 16), + ChainWork::Num(u) => Ok(u), + } + } +} + +impl Default for ChainWork { + fn default() -> Self { + ChainWork::Num(0) + } +} + +/// Wrapper struct for a Zebra [`GetBlockchainInfoBalance`], enabling custom +/// deserialisation logic to handle both zebrad and zcashd. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct ChainBalance(GetBlockchainInfoBalance); + +impl ResponseToError for ChainBalance { + type RpcError = Infallible; +} + +impl<'de> Deserialize<'de> for ChainBalance { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize, Debug)] + struct TempBalance { + #[serde(default)] + id: String, + #[serde(rename = "chainValue")] + chain_value: f64, + #[serde(rename = "chainValueZat")] + chain_value_zat: u64, + #[allow(dead_code)] + #[serde(default)] + monitored: bool, + } + let temp = TempBalance::deserialize(deserializer)?; + let computed_zat = (temp.chain_value * 100_000_000.0).round() as u64; + if computed_zat != temp.chain_value_zat { + return Err(D::Error::custom(format!( + "chainValue and chainValueZat mismatch: computed {} but got {}", + computed_zat, temp.chain_value_zat + ))); + } + let amount = Amount::::from_bytes(temp.chain_value_zat.to_le_bytes()) + .map_err(|e| DeserError::custom(e.to_string()))?; + match temp.id.as_str() { + "transparent" => Ok(ChainBalance(GetBlockchainInfoBalance::transparent( + amount, None, /*TODO: handle optional delta*/ + ))), + "sprout" => Ok(ChainBalance(GetBlockchainInfoBalance::sprout( + amount, None, /*TODO: handle optional delta*/ + ))), + "sapling" => Ok(ChainBalance(GetBlockchainInfoBalance::sapling( + amount, None, /*TODO: handle optional delta*/ + ))), + "orchard" => Ok(ChainBalance(GetBlockchainInfoBalance::orchard( + amount, None, /*TODO: handle optional delta*/ + ))), + // TODO: Investigate source of undocument 'lockbox' value + // that likely is intended to be 'deferred' + "lockbox" | "deferred" => Ok(ChainBalance(GetBlockchainInfoBalance::deferred( + amount, None, + ))), + "" => Ok(ChainBalance(GetBlockchainInfoBalance::chain_supply( + // The pools are immediately summed internally, which pool we pick doesn't matter here + ValueBalance::from_transparent_amount(amount), + ))), + otherwise => todo!("error: invalid chain id deser {otherwise}"), + } + } +} + +impl Default for ChainBalance { + fn default() -> Self { + Self(GetBlockchainInfoBalance::chain_supply(ValueBalance::zero())) + } +} + +impl TryFrom for zebra_rpc::methods::GetBlockchainInfoResponse { + fn try_from(response: GetBlockchainInfoResponse) -> Result { + Ok(zebra_rpc::methods::GetBlockchainInfoResponse::new( + response.chain, + response.blocks, + response.best_block_hash, + response.estimated_height, + response.chain_supply.0, + response.value_pools.map(|pool| pool.0), + response.upgrades, + response.consensus, + response.headers, + response.difficulty, + response.verification_progress, + response.chain_work.try_into()?, + response.pruned, + response.size_on_disk, + response.commitments, + )) + } + + type Error = ParseIntError; +} + +/// The transparent balance of a set of addresses. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_address_balance`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBalanceResponse { + /// The total transparent balance. + pub balance: u64, + #[serde(default)] + /// The total balance received, including change + pub received: u64, +} + +/// Error type for the `get_address_balance` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetBalanceError { + /// Invalid number of provided addresses. + #[error("Invalid number of addresses: {0}")] + InvalidAddressesAmount(i16), + + /// Invalid encoding. + #[error("Invalid encoding: {0}")] + InvalidEncoding(String), +} + +impl ResponseToError for GetBalanceResponse { + type RpcError = GetBalanceError; +} +impl TryFrom for GetBalanceError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl From for zebra_rpc::methods::AddressBalance { + fn from(response: GetBalanceResponse) -> Self { + zebra_rpc::methods::GetAddressBalanceResponse::new(response.balance, response.received) + } +} + +/// Contains the hex-encoded hash of the sent transaction. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::send_raw_transaction`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct SendTransactionResponse(#[serde(with = "hex")] pub zebra_chain::transaction::Hash); + +/// Error type for the `sendrawtransaction` RPC request. +/// TODO: should we track state here? (`Rejected`, `MissingInputs`) +#[derive(Debug, thiserror::Error)] +pub enum SendTransactionError { + /// Decoding failed. + #[error("Decoding failed")] + DeserializationError, + + /// Transaction rejected due to `expiryheight` being under `TX_EXPIRING_SOON_THRESHOLD`. + /// This is used for DoS mitigation. + #[error("Transaction expiring soon: {0}")] + ExpiringSoon(u64), + + /// Transaction has no inputs. + #[error("Missing inputs")] + MissingInputs, + + /// Transaction already in the blockchain. + #[error("Already in chain")] + AlreadyInChain, + + /// Transaction rejected. + #[error("Transaction rejected")] + Rejected(String), +} + +impl ResponseToError for SendTransactionResponse { + type RpcError = SendTransactionError; +} +impl TryFrom for SendTransactionError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl From for zebra_rpc::methods::SentTransactionHash { + fn from(value: SendTransactionResponse) -> Self { + zebra_rpc::methods::SentTransactionHash::new(value.0) + } +} + +/// Response to a `getbestblockhash` and `getblockhash` RPC request. +/// +/// Contains the hex-encoded hash of the requested block. +#[derive( + Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize, derive_more::From, +)] +#[serde(transparent)] +pub struct GetBlockHash(#[serde(with = "hex")] pub zebra_chain::block::Hash); + +impl ResponseToError for GetBlockHash { + type RpcError = Infallible; +} + +impl Default for GetBlockHash { + fn default() -> Self { + GetBlockHash(zebra_chain::block::Hash([0; 32])) + } +} + +impl From for zebra_rpc::methods::GetBlockHash { + fn from(value: GetBlockHash) -> Self { + zebra_rpc::methods::GetBlockHashResponse::new(value.0) + } +} + +/// A wrapper struct for a zebra serialized block. +/// +/// Stores bytes that are guaranteed to be deserializable into a [`zebra_chain::block::Block`]. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct SerializedBlock(zebra_chain::block::SerializedBlock); + +impl std::ops::Deref for SerializedBlock { + type Target = zebra_chain::block::SerializedBlock; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef<[u8]> for SerializedBlock { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From> for SerializedBlock { + fn from(bytes: Vec) -> Self { + Self(zebra_chain::block::SerializedBlock::from(bytes)) + } +} + +impl From for SerializedBlock { + fn from(inner: zebra_chain::block::SerializedBlock) -> Self { + SerializedBlock(inner) + } +} + +impl hex::FromHex for SerializedBlock { + type Error = hex::FromHexError; + + fn from_hex>(hex: T) -> Result { + hex::decode(hex).map(SerializedBlock::from) + } +} + +impl<'de> serde::Deserialize<'de> for SerializedBlock { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct HexVisitor; + + impl serde::de::Visitor<'_> for HexVisitor { + type Value = SerializedBlock; + + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { + formatter.write_str("a hex-encoded string") + } + + fn visit_str(self, value: &str) -> Result + where + E: DeserError, + { + let bytes = hex::decode(value).map_err(DeserError::custom)?; + Ok(SerializedBlock::from(bytes)) + } + } + + deserializer.deserialize_str(HexVisitor) + } +} + +/// Sapling note commitment tree information. +/// +/// Wrapper struct for zebra's SaplingTrees +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct SaplingTrees { + size: u64, +} + +/// Orchard note commitment tree information. +/// +/// Wrapper struct for zebra's OrchardTrees +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct OrchardTrees { + size: u64, +} + +/// Information about the sapling and orchard note commitment trees if any. +/// +/// Wrapper struct for zebra's GetBlockTrees +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBlockTrees { + #[serde(default)] + sapling: Option, + #[serde(default)] + orchard: Option, +} + +impl GetBlockTrees { + /// Returns sapling data held by ['GetBlockTrees']. + pub fn sapling(&self) -> u64 { + self.sapling.map_or(0, |s| s.size) + } + + /// Returns orchard data held by ['GetBlockTrees']. + pub fn orchard(&self) -> u64 { + self.orchard.map_or(0, |o| o.size) + } +} + +impl From for zebra_rpc::methods::GetBlockTrees { + fn from(val: GetBlockTrees) -> Self { + zebra_rpc::methods::GetBlockTrees::new(val.sapling(), val.orchard()) + } +} + +/// Wrapper struct for a zebra `Solution`. +/// +/// *** NOTE / TODO: ToHex should be inmlemented in zebra to avoid the use of a wrapper struct. *** +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Solution(pub zebra_chain::work::equihash::Solution); + +impl std::ops::Deref for Solution { + type Target = zebra_chain::work::equihash::Solution; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl hex::ToHex for Solution { + fn encode_hex>(&self) -> T { + self.0.encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.0.encode_hex_upper() + } +} + +impl hex::FromHex for Solution { + type Error = zebra_chain::serialization::SerializationError; + + fn from_hex>(hex: T) -> Result { + let hex_str = std::str::from_utf8(hex.as_ref()).map_err(|_| { + zebra_chain::serialization::SerializationError::Parse("invalid UTF-8 in hex input") + })?; + let bytes = hex::decode(hex_str).map_err(|_| { + zebra_chain::serialization::SerializationError::Parse("failed to decode hex string") + })?; + zebra_chain::work::equihash::Solution::from_bytes(&bytes).map(Solution) + } +} + +impl From for zebra_chain::work::equihash::Solution { + fn from(value: Solution) -> Self { + value.0 + } +} + +/// Contains the hex-encoded hash of the sent transaction. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_block`]. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(untagged)] +pub enum GetBlockResponse { + /// The request block, hex-encoded. + Raw(#[serde(with = "hex")] SerializedBlock), + /// The block object. + Object(Box), +} + +impl ResponseToError for SerializedBlock { + type RpcError = GetBlockError; +} +impl TryFrom for GetBlockError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // If the block is not in Zebra's state, returns + // [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + if value.code == -8 { + Ok(Self::MissingBlock(value.message)) + } else { + Err(value) + } + } +} + +impl ResponseToError for BlockObject { + type RpcError = GetBlockError; +} + +/// Error type for the `getblock` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetBlockError { + /// Verbosity not in range from 0 to 2. + #[error("Invalid verbosity: {0}")] + InvalidVerbosity(i8), + + /// Not found. + #[error("Block not found")] + BlockNotFound, + + /// Block was pruned. + #[error("Block not available, pruned data: {0}")] + BlockNotAvailable(String), + + /// TODO: Cannot read block from disk. + #[error("Cannot read block")] + CannotReadBlock, + /// TODO: temporary variant + #[error("Custom error: {0}")] + Custom(String), + /// The requested block hash or height could not be found + #[error("Block not found: {0}")] + MissingBlock(String), +} + +// impl std::fmt::Display for GetBlockError { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// f.write_str("block not found") +// } +// } + +impl ResponseToError for GetBlockResponse { + type RpcError = GetBlockError; +} + +/// Contains the height of the most recent block in the best valid block chain +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBlockCountResponse(Height); + +impl ResponseToError for GetBlockCountResponse { + type RpcError = Infallible; +} + +impl From for Height { + fn from(value: GetBlockCountResponse) -> Self { + value.0 + } +} + +impl ResponseToError for ValidateAddressResponse { + type RpcError = Infallible; +} + +/// A block object containing data and metadata about a block. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_block`]. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct BlockObject { + /// The hash of the requested block. + pub hash: GetBlockHash, + + /// The number of confirmations of this block in the best chain, + /// or -1 if it is not in the best chain. + pub confirmations: i64, + + /// The block size. TODO: fill it + #[serde(default, skip_serializing_if = "Option::is_none")] + pub size: Option, + + /// The height of the requested block. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub height: Option, + + /// The version field of the requested block. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, + + /// The merkle root of the requested block. + #[serde(with = "opthex", rename = "merkleroot")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub merkle_root: Option, + + /// The blockcommitments field of the requested block. Its interpretation changes + /// depending on the network and height. + #[serde(with = "opthex", rename = "blockcommitments")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub block_commitments: Option<[u8; 32]>, + + /// The root of the Sapling commitment tree after applying this block. + #[serde(with = "opthex", rename = "finalsaplingroot")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub final_sapling_root: Option<[u8; 32]>, + + /// The root of the Orchard commitment tree after applying this block. + #[serde(with = "opthex", rename = "finalorchardroot")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub final_orchard_root: Option<[u8; 32]>, + + /// The height of the requested block. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub time: Option, + + /// The nonce of the requested block header. + #[serde(with = "opthex")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nonce: Option<[u8; 32]>, + + /// The Equihash solution in the requested block header. + /// Note: presence of this field in getblock is not documented in zcashd. + #[serde(with = "opthex")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub solution: Option, + + /// The difficulty threshold of the requested block header displayed in compact form. + #[serde(with = "opthex")] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bits: Option, + + /// Floating point number that represents the difficulty limit for this block as a multiple + /// of the minimum difficulty for the network. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub difficulty: Option, + + /// List of transaction IDs in block order, hex-encoded. + pub tx: Vec, + + /// Chain supply balance + #[serde(default)] + #[serde(rename = "chainSupply")] + chain_supply: Option, + /// Value pool balances + /// + #[serde(rename = "valuePools")] + value_pools: Option<[ChainBalance; 5]>, + + /// Information about the note commitment trees. + pub trees: GetBlockTrees, + + /// The previous block hash of the requested block header. + #[serde( + rename = "previousblockhash", + default, + skip_serializing_if = "Option::is_none" + )] + pub previous_block_hash: Option, + + /// The next block hash after the requested block header. + #[serde( + rename = "nextblockhash", + default, + skip_serializing_if = "Option::is_none" + )] + pub next_block_hash: Option, +} + +impl TryFrom for zebra_rpc::methods::GetBlock { + type Error = zebra_chain::serialization::SerializationError; + + fn try_from(response: GetBlockResponse) -> Result { + match response { + GetBlockResponse::Raw(serialized_block) => { + Ok(zebra_rpc::methods::GetBlock::Raw(serialized_block.0)) + } + GetBlockResponse::Object(block) => { + let tx_ids: Result, _> = block + .tx + .into_iter() + .map(|txid| { + txid.parse::() + .map(zebra_rpc::methods::GetBlockTransaction::Hash) + }) + .collect(); + + Ok(zebra_rpc::methods::GetBlock::Object(Box::new( + zebra_rpc::client::BlockObject::new( + block.hash.0, + block.confirmations, + block.size, + block.height, + block.version, + block.merkle_root, + block.block_commitments, + block.final_sapling_root, + block.final_orchard_root, + tx_ids?, + block.time, + block.nonce, + block.solution.map(Into::into), + block.bits, + block.difficulty, + block.chain_supply.map(|supply| supply.0), + block.value_pools.map( + |[transparent, sprout, sapling, orchard, deferred]| { + [transparent.0, sprout.0, sapling.0, orchard.0, deferred.0] + }, + ), + block.trees.into(), + block.previous_block_hash.map(|hash| hash.0), + block.next_block_hash.map(|hash| hash.0), + ), + ))) + } + } + } +} + +/// Vec of transaction ids, as a JSON array. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_raw_mempool`] and [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_address_txids`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct TxidsResponse { + /// Vec of txids. + pub transactions: Vec, +} + +/// Error type for the `get_address_txids` RPC method. +#[derive(Debug, thiserror::Error)] +pub enum TxidsError { + /// TODO: double check. + /// + /// If start is greater than the latest block height, + /// it's interpreted as that height. + #[error("invalid start block height: {0}")] + InvalidStartBlockHeight(i64), + + /// TODO: check which cases this can happen. + #[error("invalid end block height: {0}")] + InvalidEndBlockHeight(i64), + + /// Invalid address encoding. + #[error("Invalid encoding: {0}")] + InvalidEncoding(String), +} + +impl ResponseToError for TxidsResponse { + type RpcError = TxidsError; +} +impl TryFrom for TxidsError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +/// Separate response for the `get_raw_mempool` RPC method. +/// +/// Even though the output type is the same as [`TxidsResponse`], +/// errors are different. +pub struct RawMempoolResponse { + /// Vec of txids. + pub transactions: Vec, +} + +impl ResponseToError for RawMempoolResponse { + type RpcError = Infallible; + + fn to_error(self) -> Result { + Ok(self) + } +} + +impl<'de> serde::Deserialize<'de> for TxidsResponse { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let v = serde_json::Value::deserialize(deserializer)?; + + let transactions = v + .as_array() + .ok_or_else(|| DeserError::custom("Expected the JSON to be an array"))? + .iter() + .filter_map(|item| item.as_str().map(String::from)) + .collect::>(); + + Ok(TxidsResponse { transactions }) + } +} + +/// Contains the hex-encoded Sapling & Orchard note commitment trees, and their +/// corresponding `block::Hash`, `Height`, and block time. +/// +/// Encoded using v0 frontier encoding. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_treestate`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct GetTreestateResponse { + /// The block height corresponding to the treestate, numeric. + pub height: i32, + + /// The block hash corresponding to the treestate, hex-encoded. + pub hash: String, + + /// Unix time when the block corresponding to the treestate was mined, numeric. + /// + /// UTC seconds since the Unix 1970-01-01 epoch. + pub time: u32, + + /// A treestate containing a Sapling note commitment tree, hex-encoded. + pub sapling: zebra_rpc::client::Treestate, + + /// A treestate containing an Orchard note commitment tree, hex-encoded. + pub orchard: zebra_rpc::client::Treestate, +} + +/// Error type for the `get_treestate` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetTreestateError { + /// Invalid hash or height. + #[error("invalid hash or height: {0}")] + InvalidHashOrHeight(String), +} + +impl ResponseToError for GetTreestateResponse { + type RpcError = GetTreestateError; +} +impl TryFrom for GetTreestateError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl TryFrom for zebra_rpc::client::GetTreestateResponse { + type Error = zebra_chain::serialization::SerializationError; + + fn try_from(value: GetTreestateResponse) -> Result { + let parsed_hash = zebra_chain::block::Hash::from_hex(&value.hash)?; + let height_u32 = u32::try_from(value.height).map_err(|_| { + zebra_chain::serialization::SerializationError::Parse("negative block height") + })?; + + let sapling_bytes = value.sapling.commitments().final_state().clone(); + let sapling = Treestate::new(Commitments::new(None, sapling_bytes)); + + let orchard_bytes = value.orchard.commitments().final_state().clone(); + let orchard = Treestate::new(Commitments::new(None, orchard_bytes)); + + Ok(zebra_rpc::client::GetTreestateResponse::new( + parsed_hash, + zebra_chain::block::Height(height_u32), + value.time, + // Sprout + None, + sapling, + orchard, + )) + } +} + +/// Contains raw transaction, encoded as hex bytes. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_raw_transaction`]. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub enum GetTransactionResponse { + /// The raw transaction, encoded as hex bytes. + Raw(#[serde(with = "hex")] zebra_chain::transaction::SerializedTransaction), + /// The transaction object. + Object(Box), +} + +impl ResponseToError for GetTransactionResponse { + type RpcError = Infallible; +} + +impl<'de> serde::Deserialize<'de> for GetTransactionResponse { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use zebra_rpc::client::{ + Input, Orchard, Output, ShieldedOutput, ShieldedSpend, TransactionObject, + }; + + let tx_value = serde_json::Value::deserialize(deserializer)?; + + if let Some(hex_value) = tx_value.get("hex") { + let hex_str = hex_value + .as_str() + .ok_or_else(|| DeserError::custom("expected hex to be a string"))?; + + let hex = zebra_chain::transaction::SerializedTransaction::from_hex(hex_str) + .map_err(DeserError::custom)?; + + // Convert `mempool tx height = -1` (Zcashd) to `None` (Zebrad). + let height = match tx_value.get("height").and_then(|v| v.as_i64()) { + Some(-1) | None => None, + Some(h) if h < -1 => { + return Err(DeserError::custom("invalid height returned in block")) + } + Some(h) => match TryInto::::try_into(h) { + Ok(h) => Some(h), + Err(_e) => { + return Err(DeserError::custom( + "invalid height returned in block: `{h}`", + )) + } + }, + }; + + macro_rules! get_tx_value_fields{ + ($(let $field:ident: $kind:ty = $transaction_json:ident[$field_name:literal]; )+) => { + $(let $field = $transaction_json + .get($field_name) + .map(|v| ::serde_json::from_value::<$kind>(v.clone())) + .transpose() + .map_err(::serde::de::Error::custom)?; + )+ + } + } + + let confirmations = tx_value + .get("confirmations") + .and_then(|v| v.as_u64()) + .map(|v| v as u32); + + // if let Some(vin_value) = tx_value.get("vin") { + // match serde_json::from_value::>(vin_value.clone()) { + // Ok(_inputs) => { /* continue */ } + // Err(err) => { + // eprintln!("Failed to parse vin: {err}"); + // eprintln!( + // "Offending JSON:\n{}", + // serde_json::to_string_pretty(vin_value).unwrap() + // ); + // return Err(serde::de::Error::custom("Failed to deserialize vin")); + // } + // } + // } + get_tx_value_fields! { + // We don't need this, as it should always be true if and only if height is Some + // There's no reason to rely on this field being present when we can determine + // it correctly in all cases + let _in_active_chain: bool = tx_value["in_active_chain"]; + let inputs: Vec = tx_value["vin"]; + let outputs: Vec = tx_value["vout"]; + let shielded_spends: Vec = tx_value["vShieldedSpend"]; + let shielded_outputs: Vec = tx_value["vShieldedOutput"]; + let orchard: Orchard = tx_value["orchard"]; + let value_balance: f64 = tx_value["valueBalance"]; + let value_balance_zat: i64 = tx_value["valueBalanceZat"]; + let size: i64 = tx_value["size"]; + let time: i64 = tx_value["time"]; + let txid: String = tx_value["txid"]; + let auth_digest: String = tx_value["authdigest"]; + let overwintered: bool = tx_value["overwintered"]; + let version: u32 = tx_value["version"]; + let version_group_id: String = tx_value["versiongroupid"]; + let lock_time: u32 = tx_value["locktime"]; + let expiry_height: Height = tx_value["expiryheight"]; + let block_hash: String = tx_value["blockhash"]; + let block_time: i64 = tx_value["blocktime"]; + } + + let txid = txid.ok_or(DeserError::missing_field("txid"))?; + + let txid = zebra_chain::transaction::Hash::from_hex(txid) + .map_err(|e| DeserError::custom(format!("txid was not valid hash: {e}")))?; + let block_hash = block_hash + .map(|bh| { + zebra_chain::block::Hash::from_hex(bh).map_err(|e| { + DeserError::custom(format!("blockhash was not valid hash: {e}")) + }) + }) + .transpose()?; + let auth_digest = auth_digest + .map(|ad| { + zebra_chain::transaction::AuthDigest::from_hex(ad).map_err(|e| { + DeserError::custom(format!("authdigest was not valid hash: {e}")) + }) + }) + .transpose()?; + let version_group_id = version_group_id + .map(hex::decode) + .transpose() + .map_err(|e| DeserError::custom(format!("txid was not valid hash: {e}")))?; + + Ok(GetTransactionResponse::Object(Box::new( + TransactionObject::new( + // optional, but we can infer from height + Some(height.is_some()), + hex, + // optional + height, + // optional + confirmations, + inputs.unwrap_or_default(), + outputs.unwrap_or_default(), + shielded_spends.unwrap_or_default(), + shielded_outputs.unwrap_or_default(), + // TODO: sprout joinsplits + vec![], + None, + None, + None, + // optional + orchard, + // optional + value_balance, + // optional + value_balance_zat, + // optional + size, + // optional + time, + txid, + // optional + auth_digest, + overwintered.unwrap_or(false), + version.ok_or(DeserError::missing_field("version"))?, + // optional + version_group_id, + lock_time.ok_or(DeserError::missing_field("locktime"))?, + // optional + expiry_height, + // optional + block_hash, + // optional + block_time, + ), + ))) + } else if let Some(hex_str) = tx_value.as_str() { + let raw = zebra_chain::transaction::SerializedTransaction::from_hex(hex_str) + .map_err(DeserError::custom)?; + Ok(GetTransactionResponse::Raw(raw)) + } else { + Err(DeserError::custom("Unexpected transaction format")) + } + } +} + +impl From for zebra_rpc::methods::GetRawTransaction { + fn from(value: GetTransactionResponse) -> Self { + match value { + GetTransactionResponse::Raw(serialized_transaction) => { + zebra_rpc::methods::GetRawTransaction::Raw(serialized_transaction) + } + + GetTransactionResponse::Object(obj) => zebra_rpc::methods::GetRawTransaction::Object( + Box::new(zebra_rpc::client::TransactionObject::new( + obj.in_active_chain(), + obj.hex().clone(), + obj.height(), + obj.confirmations(), + obj.inputs().clone(), + obj.outputs().clone(), + obj.shielded_spends().clone(), + obj.shielded_outputs().clone(), + //TODO: sprout joinspits + vec![], + None, + None, + None, + obj.orchard().clone(), + obj.value_balance(), + obj.value_balance_zat(), + obj.size(), + obj.time(), + obj.txid(), + obj.auth_digest(), + obj.overwintered(), + obj.version(), + obj.version_group_id().clone(), + obj.lock_time(), + obj.expiry_height(), + obj.block_hash(), + obj.block_time(), + )), + ), + } + } +} + +/// Wrapper struct for a zebra SubtreeRpcData. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] +pub struct SubtreeRpcData(zebra_rpc::client::SubtreeRpcData); + +impl std::ops::Deref for SubtreeRpcData { + type Target = zebra_rpc::client::SubtreeRpcData; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From for SubtreeRpcData { + fn from(inner: zebra_rpc::client::SubtreeRpcData) -> Self { + SubtreeRpcData(inner) + } +} + +impl hex::FromHex for SubtreeRpcData { + type Error = hex::FromHexError; + + fn from_hex>(hex: T) -> Result { + let hex_str = std::str::from_utf8(hex.as_ref()) + .map_err(|_| hex::FromHexError::InvalidHexCharacter { c: '�', index: 0 })?; + + if hex_str.len() < 8 { + return Err(hex::FromHexError::OddLength); + } + + let root_end_index = hex_str.len() - 8; + let (root_hex, height_hex) = hex_str.split_at(root_end_index); + + let root = root_hex.to_string(); + let height = u32::from_str_radix(height_hex, 16) + .map_err(|_| hex::FromHexError::InvalidHexCharacter { c: '�', index: 0 })?; + + Ok(SubtreeRpcData(zebra_rpc::client::SubtreeRpcData { + root, + end_height: zebra_chain::block::Height(height), + })) + } +} + +impl<'de> serde::Deserialize<'de> for SubtreeRpcData { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(serde::Deserialize)] + struct SubtreeDataHelper { + root: String, + end_height: u32, + } + let helper = SubtreeDataHelper::deserialize(deserializer)?; + Ok(SubtreeRpcData(zebra_rpc::client::SubtreeRpcData { + root: helper.root, + end_height: zebra_chain::block::Height(helper.end_height), + })) + } +} + +/// Contains the Sapling or Orchard pool label, the index of the first subtree in the list, +/// and a list of subtree roots and end heights. +/// +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_subtrees_by_index`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetSubtreesResponse { + /// The shielded pool to which the subtrees belong. + pub pool: String, + + /// The index of the first subtree. + pub start_index: zebra_chain::subtree::NoteCommitmentSubtreeIndex, + + /// A sequential list of complete subtrees, in `index` order. + /// + /// The generic subtree root type is a hex-encoded Sapling or Orchard subtree root string. + // #[serde(skip_serializing_if = "Vec::is_empty")] + pub subtrees: Vec, +} + +/// Error type for the `z_getsubtreesbyindex` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetSubtreesError { + /// Invalid pool + #[error("Invalid pool: {0}")] + InvalidPool(String), + + /// Invalid start index + #[error("Invalid start index")] + InvalidStartIndex, + + /// Invalid limit + #[error("Invalid limit")] + InvalidLimit, +} + +impl ResponseToError for GetSubtreesResponse { + type RpcError = GetSubtreesError; +} +impl TryFrom for GetSubtreesError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl From for zebra_rpc::client::GetSubtreesByIndexResponse { + fn from(value: GetSubtreesResponse) -> Self { + zebra_rpc::client::GetSubtreesByIndexResponse::new( + value.pool, + value.start_index, + value + .subtrees + .into_iter() + .map(|wrapped_subtree| wrapped_subtree.0) + .collect(), + ) + } +} + +/// Wrapper struct for a zebra Scrypt. +/// +/// # Correctness +/// +/// Consensus-critical serialization uses `ZcashSerialize`. +/// [`serde`]-based hex serialization must only be used for RPCs and testing. +#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize)] +pub struct Script(zebra_chain::transparent::Script); + +impl std::ops::Deref for Script { + type Target = zebra_chain::transparent::Script; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef<[u8]> for Script { + fn as_ref(&self) -> &[u8] { + self.0.as_raw_bytes() + } +} + +impl From> for Script { + fn from(bytes: Vec) -> Self { + Self(zebra_chain::transparent::Script::new(bytes.as_ref())) + } +} + +impl From for Script { + fn from(inner: zebra_chain::transparent::Script) -> Self { + Script(inner) + } +} + +impl hex::FromHex for Script { + type Error = as hex::FromHex>::Error; + + fn from_hex>(hex: T) -> Result { + let bytes = Vec::from_hex(hex)?; + let inner = zebra_chain::transparent::Script::new(&bytes); + Ok(Script(inner)) + } +} + +impl<'de> serde::Deserialize<'de> for Script { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let v = serde_json::Value::deserialize(deserializer)?; + if let Some(hex_str) = v.as_str() { + let bytes = hex::decode(hex_str).map_err(DeserError::custom)?; + let inner = zebra_chain::transparent::Script::new(&bytes); + Ok(Script(inner)) + } else { + Err(DeserError::custom("expected a hex string")) + } + } +} + +/// This is used for the output parameter of [`crate::jsonrpsee::connector::JsonRpSeeConnector::get_address_utxos`]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetUtxosResponse { + /// The transparent address, base58check encoded + pub address: zebra_chain::transparent::Address, + + /// The output txid, in big-endian order, hex-encoded + #[serde(with = "hex")] + pub txid: zebra_chain::transaction::Hash, + + /// The transparent output index, numeric + #[serde(rename = "outputIndex")] + pub output_index: u32, + + /// The transparent output script, hex encoded + #[serde(with = "hex")] + pub script: Script, + + /// The amount of zatoshis in the transparent output + pub satoshis: u64, + + /// The block height, numeric. + pub height: zebra_chain::block::Height, +} + +/// Error type for the `getaddressutxos` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetUtxosError { + /// Invalid encoding + #[error("Invalid encoding: {0}")] + InvalidEncoding(String), +} + +impl ResponseToError for GetUtxosResponse { + type RpcError = GetUtxosError; +} +impl TryFrom for GetUtxosError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // TODO: attempt to convert RpcError into errors specific to this RPC response + Err(value) + } +} + +impl ResponseToError for Vec { + type RpcError = GetUtxosError; +} + +impl From for zebra_rpc::methods::GetAddressUtxos { + fn from(value: GetUtxosResponse) -> Self { + zebra_rpc::methods::GetAddressUtxos::new( + value.address, + value.txid, + zebra_chain::transparent::OutputIndex::from_index(value.output_index), + value.script.0, + value.satoshis, + value.height, + ) + } +} + +impl ResponseToError for Box +where + T::RpcError: Send + Sync + 'static, +{ + type RpcError = T::RpcError; +} + +/// Response type for the `getmempoolinfo` RPC request +/// Details on the state of the TX memory pool. +/// In Zaino, this RPC call information is gathered from the local Zaino state instead of directly reflecting the full node's mempool. This state is populated from a gRPC stream, sourced from the full node. +/// The Zcash source code is considered canonical: +/// [from the rpc definition](), [this function is called to produce the return value](>). +/// the `size` field is called by [this line of code](), and returns an int64. +/// `size` represents the number of transactions currently in the mempool. +/// the `bytes` field is called by [this line of code](), and returns an int64 from [this variable](). +/// `bytes` is the sum memory size in bytes of all transactions in the mempool: the sum of all transaction byte sizes. +/// the `usage` field is called by [this line of code](), and returns an int64 derived from the return of this function(), which includes a number of elements. +/// `usage` is the total memory usage for the mempool, in bytes. +/// the [optional `fullyNotified` field](), is only utilized for zcashd regtests, is deprecated, and is not included. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetMempoolInfoResponse { + /// Current tx count + pub size: u64, + /// Sum of all tx sizes + pub bytes: u64, + /// Total memory usage for the mempool + pub usage: u64, +} + +impl ResponseToError for GetMempoolInfoResponse { + type RpcError = Infallible; +} diff --git a/zaino-fetch/src/jsonrpsee/response/address_deltas.rs b/zaino-fetch/src/jsonrpsee/response/address_deltas.rs new file mode 100644 index 000000000..7178e1933 --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/address_deltas.rs @@ -0,0 +1,536 @@ +//! Types associated with the `getaddressdeltas` RPC request. + +use serde::{Deserialize, Serialize}; +use zebra_rpc::client::{Input, Output, TransactionObject}; + +use crate::jsonrpsee::connector::{ResponseToError, RpcError}; + +/// Request parameters for the `getaddressdeltas` RPC method. +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +#[serde(untagged)] +pub enum GetAddressDeltasParams { + /// Extends the basic address/height range with chaininfo and multiple address support. + Filtered { + /// List of base58check encoded addresses + addresses: Vec, + + /// Start block height (inclusive) + #[serde(default)] + start: u32, + + /// End block height (inclusive) + #[serde(default)] + end: u32, + + /// Whether to include chain info in response (defaults to false) + #[serde(default, rename = "chainInfo")] + chain_info: bool, + }, + + /// Get deltas for a single transparent address + Address(String), +} + +impl GetAddressDeltasParams { + /// Creates a new [`GetAddressDeltasParams::Filtered`] instance. + pub fn new_filtered(addresses: Vec, start: u32, end: u32, chain_info: bool) -> Self { + GetAddressDeltasParams::Filtered { + addresses, + start, + end, + chain_info, + } + } + + /// Creates a new [`GetAddressDeltasParams::Address`] instance. + pub fn new_address(addr: impl Into) -> Self { + GetAddressDeltasParams::Address(addr.into()) + } +} + +/// Response to a `getaddressdeltas` RPC request. +/// +/// This enum supports both simple array responses and extended responses with chain info. +/// The format depends on the `chaininfo` parameter in the request. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(untagged)] +pub enum GetAddressDeltasResponse { + /// Simple array format (chaininfo = false or not specified) + /// Returns: [AddressDelta, AddressDelta, ...] + Simple(Vec), + /// Extended format with chain info (chaininfo = true) + /// Returns: {"deltas": [...], "start": {...}, "end": {...}} + WithChainInfo { + /// The address deltas + deltas: Vec, + + /// Information about the start block + start: BlockInfo, + + /// Information about the end block + end: BlockInfo, + }, +} + +impl GetAddressDeltasResponse { + /// Processes transaction objects into address deltas for specific addresses. + /// This is a pure function that can be easily unit tested. + pub fn process_transactions_to_deltas( + transactions: &[Box], + target_addresses: &[String], + ) -> Vec { + let mut deltas: Vec = transactions + .iter() + .filter(|tx| tx.height().unwrap_or(0) > 0) + .flat_map(|tx| { + let txid = tx.txid().to_string(); + let height = tx.height().unwrap(); // height > 0 due to previous filter + + // Inputs (negative deltas) + let input_deltas = tx.inputs().iter().enumerate().filter_map({ + let input_txid = txid.clone(); + move |(input_index, input)| { + AddressDelta::from_input( + input, + input_index as u32, + &input_txid, + height as u32, // Height is known to be non-negative + target_addresses, + None, + ) + } + }); + + // Outputs (positive deltas) + let output_deltas = tx.outputs().iter().flat_map({ + let output_txid = txid; + move |output| { + AddressDelta::from_output( + output, + &output_txid, + height as u32, // Height is known to be non-negative + target_addresses, + None, + ) + } + }); + + input_deltas.chain(output_deltas) + }) + .collect(); + // zcashd-like ordering: (height ASC, blockindex ASC, index ASC) + deltas.sort_by_key(|d| (d.height, d.block_index.unwrap_or(u32::MAX), d.index)); + deltas + } +} + +/// Error type used for the `getaddressdeltas` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetAddressDeltasError { + /// Invalid encoding + #[error("Invalid encoding: {0}")] + InvalidEncoding(String), + + /// Wrong block range + #[error("Invalid block range. Start = {0}, End = {1}")] + InvalidBlockRange(u32, u32), +} + +impl ResponseToError for GetAddressDeltasResponse { + type RpcError = GetAddressDeltasError; +} + +impl TryFrom for GetAddressDeltasError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + Err(value) + } +} + +/// Represents a change in the balance of a transparent address. +#[derive(Debug, Clone, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct AddressDelta { + /// The difference in zatoshis (or satoshis equivalent in Zcash) + satoshis: i64, + + /// The related transaction ID in hex string format + txid: String, + + /// The related input or output index + pub index: u32, + + /// The block height where the change occurred + pub height: u32, + + /// The base58check encoded address + address: String, + + #[serde(rename = "blockindex", skip_serializing_if = "Option::is_none")] + /// Zero-based position of the transaction within its containing block. + pub block_index: Option, +} + +impl AddressDelta { + /// Create a delta from a transaction input (spend - negative value) + pub fn from_input( + input: &Input, + input_index: u32, + txid: &str, + height: u32, + target_addresses: &[String], + block_index: Option, + ) -> Option { + match input { + Input::NonCoinbase { + address: Some(addr), + value_zat: Some(value), + .. + } => { + // Check if this address is in our target addresses + if target_addresses.iter().any(|req_addr| req_addr == addr) { + Some(AddressDelta { + satoshis: -value, // Negative for inputs (spends) + txid: txid.to_string(), + index: input_index, + height, + address: addr.clone(), + block_index, + }) + } else { + None + } + } + _ => None, // Skip coinbase inputs or inputs without address/value + } + } + + /// Create a delta from a transaction output (receive - positive value) + pub fn from_output( + output: &Output, + txid: &str, + height: u32, + target_addresses: &[String], + block_index: Option, + ) -> Vec { + if let Some(output_addresses) = &output.script_pub_key().addresses() { + output_addresses + .iter() + .filter(|addr| target_addresses.iter().any(|req_addr| req_addr == *addr)) + .map(|addr| AddressDelta { + satoshis: output.value_zat(), // Positive for outputs (receives) + txid: txid.to_string(), + index: output.n(), + height, + address: addr.clone(), + block_index, + }) + .collect() + } else { + Vec::new() + } + } +} + +/// Block information for `getaddressdeltas` responses with `chaininfo = true`. +#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct BlockInfo { + /// The block hash in hex-encoded display order + pub hash: String, + /// The block height + pub height: u32, +} + +impl BlockInfo { + /// Creates a new BlockInfo from a hash in hex-encoded display order and height. + pub fn new(hash: String, height: u32) -> Self { + Self { hash, height } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_delta_with_block_index(i: u32, bi: Option) -> AddressDelta { + AddressDelta { + satoshis: if i.is_multiple_of(2) { 1_000 } else { -500 }, + txid: format!("deadbeef{:02x}", i), + index: i, + height: 123_456 + i, + address: format!("tmSampleAddress{:02}", i), + block_index: bi, + } + } + + mod serde { + mod params { + use serde_json::{json, Value}; + + use crate::jsonrpsee::response::address_deltas::GetAddressDeltasParams; + + #[test] + fn params_deser_filtered_with_camel_case_and_defaults() { + let json_value = json!({ + "addresses": ["tmA", "tmB"], + "start": 1000, + "end": 0, + "chainInfo": true + }); + + let params: GetAddressDeltasParams = + serde_json::from_value(json_value).expect("deserialize Filtered"); + match params { + GetAddressDeltasParams::Filtered { + addresses, + start, + end, + chain_info, + } => { + assert_eq!(addresses, vec!["tmA".to_string(), "tmB".to_string()]); + assert_eq!(start, 1000); + assert_eq!(end, 0); + assert!(chain_info); + } + _ => panic!("expected Filtered variant"), + } + } + + #[test] + fn params_deser_filtered_defaults_when_missing() { + // Only required field is addresses. Others default to 0/false. + let json_value = json!({ "addresses": ["tmOnly"] }); + let params: GetAddressDeltasParams = + serde_json::from_value(json_value).expect("deserialize Filtered minimal"); + match params { + GetAddressDeltasParams::Filtered { + addresses, + start, + end, + chain_info, + } => { + assert_eq!(addresses, vec!["tmOnly".to_string()]); + assert_eq!(start, 0); + assert_eq!(end, 0); + assert!(!chain_info); + } + _ => panic!("expected Filtered variant"), + } + } + + #[test] + fn params_deser_single_address_variant() { + let json_value = Value::String("tmSingleAddress".into()); + let params: GetAddressDeltasParams = + serde_json::from_value(json_value).expect("deserialize Address"); + match params { + GetAddressDeltasParams::Address(s) => assert_eq!(s, "tmSingleAddress"), + _ => panic!("expected Address variant"), + } + } + + #[test] + fn params_ser_filtered_has_expected_keys_no_block_index() { + let params = + GetAddressDeltasParams::new_filtered(vec!["tmA".into()], 100, 200, true); + let json_value = serde_json::to_value(¶ms).expect("serialize"); + let json_object = json_value.as_object().expect("object"); + assert!(json_object.get("addresses").is_some()); + assert_eq!(json_object.get("start").and_then(Value::as_u64), Some(100)); + assert_eq!(json_object.get("end").and_then(Value::as_u64), Some(200)); + assert!(json_object.get("chainInfo").is_some()); + + // Critically: no blockindex in params + assert!(json_object.get("blockindex").is_none()); + } + } + mod address_delta { + use serde_json::Value; + + use crate::jsonrpsee::response::address_deltas::{ + tests::sample_delta_with_block_index, AddressDelta, + }; + + #[test] + fn address_delta_ser_deser_roundtrip_with_block_index() { + let delta_0 = sample_delta_with_block_index(0, Some(7)); + let json_str = serde_json::to_string(&delta_0).expect("serialize delta"); + let delta_1: AddressDelta = + serde_json::from_str(&json_str).expect("deserialize delta"); + assert_eq!(delta_0, delta_1); + + // JSON contains the key with the value + let json_value: Value = serde_json::from_str(&json_str).unwrap(); + assert_eq!( + json_value.get("blockindex").and_then(Value::as_u64), + Some(7) + ); + } + + #[test] + fn address_delta_ser_deser_roundtrip_without_block_index() { + let delta_0 = sample_delta_with_block_index(1, None); + let json_str = serde_json::to_string(&delta_0).expect("serialize delta"); + let delta_1: AddressDelta = + serde_json::from_str(&json_str).expect("deserialize delta"); + assert_eq!(delta_0, delta_1); + + let json_value: Value = serde_json::from_str(&json_str).unwrap(); + match json_value.get("blockindex") { + None => {} // Omitted + Some(val) => assert!(val.is_null(), "if present, it should be null when None"), + } + } + } + + mod response { + use serde_json::{json, Value}; + + use crate::jsonrpsee::response::address_deltas::{ + tests::sample_delta_with_block_index, BlockInfo, GetAddressDeltasResponse, + }; + + #[test] + fn response_ser_simple_array_shape_includes_delta_block_index() { + let deltas = vec![ + sample_delta_with_block_index(0, Some(2)), + sample_delta_with_block_index(1, None), + ]; + let resp = GetAddressDeltasResponse::Simple(deltas.clone()); + let json_value = serde_json::to_value(&resp).expect("serialize response"); + assert!( + json_value.is_array(), + "Simple response must be a JSON array" + ); + let json_array = json_value.as_array().unwrap(); + assert_eq!(json_array.len(), deltas.len()); + + // First delta has blockindex = 2 + assert_eq!( + json_array[0].get("blockindex").and_then(Value::as_u64), + Some(2) + ); + + // Second delta may omit or null blockindex + match json_array[1].get("blockindex") { + None => {} + Some(val) => assert!(val.is_null()), + } + } + + #[test] + fn response_ser_with_chain_info_shape_deltas_carry_block_index() { + let source_deltas = vec![ + sample_delta_with_block_index(2, Some(5)), + sample_delta_with_block_index(3, None), + ]; + let start = BlockInfo { + hash: "00..aa".into(), + height: 1000, + }; + let end = BlockInfo { + hash: "00..bb".into(), + height: 2000, + }; + let response = GetAddressDeltasResponse::WithChainInfo { + deltas: source_deltas, + start, + end, + }; + + let json_value = serde_json::to_value(&response).expect("serialize response"); + let json_object = json_value.as_object().expect("object"); + assert!(json_object.get("deltas").is_some()); + assert!(json_object.get("start").is_some()); + assert!(json_object.get("end").is_some()); + + let deltas = json_object + .get("deltas") + .unwrap() + .as_array() + .expect("deltas array"); + + // First delta has blockindex=5 + assert_eq!(deltas[0].get("blockindex").and_then(Value::as_u64), Some(5)); + + // Second delta may omit or null blockindex + match deltas[1].get("blockindex") { + None => {} + Some(val) => assert!(val.is_null()), + } + + assert!(json_object.get("blockindex").is_none()); + assert!(json_object.get("blockindex").is_none()); + } + + #[test] + fn response_deser_simple_from_array_with_and_without_block_index() { + let deltas_source = json!([ + { + "satoshis": 1000, + "txid": "deadbeef00", + "index": 0, + "height": 123456, + "address": "tmX", + "blockindex": 9 + }, + { + "satoshis": -500, + "txid": "deadbeef01", + "index": 1, + "height": 123457, + "address": "tmY" + // blockindex missing + } + ]); + let response: GetAddressDeltasResponse = + serde_json::from_value(deltas_source).expect("deserialize simple"); + match response { + GetAddressDeltasResponse::Simple(ds) => { + assert_eq!(ds.len(), 2); + assert_eq!(ds[0].txid, "deadbeef00"); + assert_eq!(ds[0].block_index, Some(9)); + assert_eq!(ds[1].txid, "deadbeef01"); + assert_eq!(ds[1].block_index, None); + } + _ => panic!("expected Simple variant"), + } + } + + #[test] + fn response_deser_with_chain_info_from_object_delays_block_index_per_delta() { + let deltas_source = json!({ + "deltas": [{ + "satoshis": -500, + "txid": "deadbeef02", + "index": 1, + "height": 123457, + "address": "tmY", + "blockindex": 4 + }, { + "satoshis": 2500, + "txid": "deadbeef03", + "index": 2, + "height": 123458, + "address": "tmZ" + // no blockindex + }], + "start": { "hash": "aa", "height": 1000 }, + "end": { "hash": "bb", "height": 2000 } + }); + let response: GetAddressDeltasResponse = + serde_json::from_value(deltas_source).expect("deserialize with chain info"); + match response { + GetAddressDeltasResponse::WithChainInfo { deltas, start, end } => { + assert_eq!(deltas.len(), 2); + assert_eq!(deltas[0].block_index, Some(4)); + assert_eq!(deltas[1].block_index, None); + assert_eq!(start.height, 1000); + assert_eq!(end.height, 2000); + } + _ => panic!("expected WithChainInfo variant"), + } + } + } + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/block_deltas.rs b/zaino-fetch/src/jsonrpsee/response/block_deltas.rs new file mode 100644 index 000000000..23f34a11b --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/block_deltas.rs @@ -0,0 +1,156 @@ +//! Types associated with the `getblockdeltas` RPC request. + +use zebra_chain::amount::{Amount, NonNegative}; + +use crate::jsonrpsee::connector::{ResponseToError, RpcError}; + +/// Error type for the `getblockdeltas` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum BlockDeltasError { + /// Block not found. + #[error("Block not found: {0}")] + BlockNotFound(String), + + /// Error while calculating median time past + #[error("Error while calculating median time past")] + CalculationError, + + /// Received a raw block when expecting a block object + #[error("Received a raw block when expecting a block object")] + UnexpectedRawBlock, +} + +/// Response to a `getblockdeltas` RPC request. +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, PartialEq)] +pub struct BlockDeltas { + /// The hash of the block. + pub hash: String, + + /// The number of confirmations. + pub confirmations: i64, + + /// Serialized block size in bytes. + pub size: i64, + + /// Block height in the best chain. + pub height: u32, + + /// Block header version. + pub version: u32, + + /// The merkle root of the block. + #[serde(rename = "merkleroot")] + pub merkle_root: String, + + /// Per-transaction transparent deltas for this block. + /// Each entry corresponds to a transaction at position `index` in the block and + /// contains: + /// - `inputs`: non-coinbase vins with **negative** zatoshi amounts and their prevouts, + /// - `outputs`: vouts with exactly one transparent address and **positive** amounts. + pub deltas: Vec, + + /// Block header timestamp as set by the miner. + pub time: i64, + + /// Median-Time-Past (MTP) of this block, i.e. the median of the timestamps of + /// this block and up to the 10 previous blocks `[N-10 … N]` (Unix epoch seconds). + #[serde(rename = "mediantime")] + pub median_time: i64, + + /// Block header nonce encoded as hex (Equihash nonce). + pub nonce: String, + + /// Compact target (“nBits”) as a hex string, e.g. `"1d00ffff"`. + pub bits: String, + + /// Difficulty corresponding to `bits` (relative to minimum difficulty, e.g. `1.0`). + pub difficulty: f64, + + // `chainwork` would be here, but Zebra does not plan to support it + // pub chainwork: Vec, + /// Previous block hash as hex, or `None` for genesis. + #[serde(skip_serializing_if = "Option::is_none", rename = "previousblockhash")] + pub previous_block_hash: Option, + + /// Next block hash in the active chain, if known. Omitted for the current tip + /// or for blocks not in the active chain. + #[serde(skip_serializing_if = "Option::is_none", rename = "nextblockhash")] + pub next_block_hash: Option, +} + +impl ResponseToError for BlockDeltas { + type RpcError = BlockDeltasError; +} + +impl TryFrom for BlockDeltasError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + if value.code == -8 { + Ok(Self::UnexpectedRawBlock) + } else { + Err(value) + } + } +} + +/// Per-transaction transparent deltas within a block, as returned by +/// `getblockdeltas`. One `BlockDelta` is emitted for each transaction in +/// the block, at the transaction’s position (`index`). +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq)] +pub struct BlockDelta { + /// Transaction hash. + pub txid: String, + + /// Zero-based position of this transaction within the block. + pub index: u32, + + /// Transparent input deltas (non-coinbase only). + /// + /// Each entry spends a previous transparent output and records a **negative** + /// amount in zatoshis. Inputs that do not resolve to exactly one transparent + /// address are omitted. + pub inputs: Vec, + + /// Transparent output deltas. + /// + /// Each entry pays exactly one transparent address and records a **positive** + /// amount in zatoshis. Outputs without a single transparent address (e.g., + /// OP_RETURN, bare multisig with multiple addresses) are omitted. + pub outputs: Vec, +} + +/// A single transparent input delta within a transaction. +/// +/// Represents spending of a specific previous output (`prevtxid`/`prevout`) +/// to a known transparent address. Amounts are **negative** (funds leaving). +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct InputDelta { + /// Transparent address that the spent prevout paid to. + pub address: String, + + /// Amount in zatoshis, **negative** for inputs/spends. + pub satoshis: Amount, + + /// Zero-based vin index within the transaction. + pub index: u32, + + /// Hash of the previous transaction containing the spent output. + pub prevtxid: String, + + /// Output index (`vout`) in `prevtxid` that is being spent. + pub prevout: u32, +} + +/// A single transparent output delta within a transaction. +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct OutputDelta { + /// Transparent address paid by this output. + pub address: String, + + /// Amount in zatoshis, **non-negative**. + pub satoshis: Amount, + + /// Zero-based vout index within the transaction. + pub index: u32, +} diff --git a/zaino-fetch/src/jsonrpsee/response/block_header.rs b/zaino-fetch/src/jsonrpsee/response/block_header.rs new file mode 100644 index 000000000..2edeb23de --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/block_header.rs @@ -0,0 +1,396 @@ +//! Types associated with the `getblockheader` RPC request. + +use serde::{Deserialize, Serialize}; + +use zebra_rpc::methods::opthex; + +use crate::jsonrpsee::connector::{ResponseToError, RpcError}; + +/// Response to a `getblockheader` RPC request. +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetBlockHeader { + /// The verbose variant of the response. Returned when `verbose` is set to `true`. + Verbose(VerboseBlockHeader), + + /// The compact variant of the response. Returned when `verbose` is set to `false`. + Compact(String), + + /// An unknown response shape. + Unknown(serde_json::Value), +} + +/// Error type for the `getblockheader` RPC request. +#[derive(Debug, thiserror::Error)] +pub enum GetBlockHeaderError { + /// Verbosity not valid + #[error("Invalid verbosity: {0}")] + InvalidVerbosity(i8), + + /// The requested block hash or height could not be found + #[error("Block not found: {0}")] + MissingBlock(String), +} + +/// Verbose response to a `getblockheader` RPC request. +/// +/// See the notes for the `get_block_header` method. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct VerboseBlockHeader { + /// The hash of the requested block. + #[serde(with = "hex")] + pub hash: zebra_chain::block::Hash, + + /// The number of confirmations of this block in the best chain, + /// or -1 if it is not in the best chain. + pub confirmations: i64, + + /// The height of the requested block. + pub height: u32, + + /// The version field of the requested block. + pub version: u32, + + /// The merkle root of the requesteed block. + #[serde(with = "hex", rename = "merkleroot")] + pub merkle_root: zebra_chain::block::merkle::Root, + + /// The blockcommitments field of the requested block. Its interpretation changes + /// depending on the network and height. + /// + /// This field is only present in Zebra. It was added [here](https://github.com/ZcashFoundation/zebra/pull/9217). + #[serde( + with = "opthex", + rename = "blockcommitments", + default, + skip_serializing_if = "Option::is_none" + )] + pub block_commitments: Option<[u8; 32]>, + + /// The root of the Sapling commitment tree after applying this block. + #[serde(with = "opthex", rename = "finalsaplingroot")] + #[serde(skip_serializing_if = "Option::is_none")] + pub final_sapling_root: Option<[u8; 32]>, + + /// The block time of the requested block header in non-leap seconds since Jan 1 1970 GMT. + pub time: i64, + + /// The nonce of the requested block header. + pub nonce: String, + + /// The Equihash solution in the requested block header. + pub solution: String, + + /// The difficulty threshold of the requested block header displayed in compact form. + pub bits: String, + + /// Floating point number that represents the difficulty limit for this block as a multiple + /// of the minimum difficulty for the network. + pub difficulty: f64, + + /// Cumulative chain work for this block (hex). + /// + /// Present in zcashd, omitted by Zebra. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub chainwork: Option, + + /// The previous block hash of the requested block header. + #[serde( + rename = "previousblockhash", + default, + skip_serializing_if = "Option::is_none" + )] + pub previous_block_hash: Option, + + /// The next block hash after the requested block header. + #[serde( + rename = "nextblockhash", + default, + skip_serializing_if = "Option::is_none" + )] + pub next_block_hash: Option, +} + +impl ResponseToError for GetBlockHeader { + type RpcError = GetBlockHeaderError; +} + +impl TryFrom for GetBlockHeaderError { + type Error = RpcError; + + fn try_from(value: RpcError) -> Result { + // If the block is not in Zebra's state, returns + // [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + if value.code == -8 { + Ok(Self::MissingBlock(value.message)) + } else { + Err(value) + } + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + use hex::FromHex; + use serde_json::{json, Value}; + use zebra_chain::block; + + /// Zcashd verbose response. + fn zcashd_verbose_json() -> &'static str { + r#"{ + "hash": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "confirmations": 10, + "height": 123456, + "version": 4, + "merkleroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "finalsaplingroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "time": 1700000000, + "nonce": "11nonce", + "solution": "22solution", + "bits": "1d00ffff", + "difficulty": 123456.789, + "chainwork": "0000000000000000000000000000000000000000000000000000000000001234", + "previousblockhash": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "nextblockhash": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + }"# + } + + // Zebra verbose response + fn zebra_verbose_json() -> &'static str { + r#"{ + "hash": "00000000001b76b932f31289beccd3988d098ec3c8c6e4a0c7bcaf52e9bdead1", + "confirmations": 3, + "height": 42, + "version": 5, + "merkleroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "blockcommitments": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "finalsaplingroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "time": 1699999999, + "nonce": "33nonce", + "solution": "44solution", + "bits": "1c654321", + "difficulty": 7890.123, + "previousblockhash": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + }"# + } + + #[test] + fn deserialize_verbose_zcashd_includes_chainwork() { + match serde_json::from_str::(zcashd_verbose_json()) { + Ok(block_header) => { + assert_eq!( + block_header.hash, + block::Hash::from_str( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + assert_eq!(block_header.confirmations, 10); + assert_eq!(block_header.height, 123_456); + assert_eq!(block_header.version, 4); + assert_eq!( + block_header.merkle_root, + block::merkle::Root::from_hex( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + assert_eq!( + block_header.final_sapling_root.unwrap(), + <[u8; 32]>::from_hex( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + assert_eq!(block_header.time, 1_700_000_000); + assert_eq!(block_header.nonce, "11nonce"); + assert_eq!(block_header.solution, "22solution"); + assert_eq!(block_header.bits, "1d00ffff"); + assert!((block_header.difficulty - 123_456.789).abs() < f64::EPSILON); + + assert_eq!( + block_header.chainwork.as_deref(), + Some("0000000000000000000000000000000000000000000000000000000000001234") + ); + + assert_eq!( + block_header.previous_block_hash.as_deref(), + Some("000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f") + ); + assert_eq!( + block_header.next_block_hash.as_deref(), + Some("000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f") + ); + } + Err(e) => { + panic!( + "VerboseBlockHeader failed at {}:{} — {}", + e.line(), + e.column(), + e + ); + } + } + } + + #[test] + fn deserialize_verbose_zebra_includes_blockcommitments_and_omits_chainwork() { + match serde_json::from_str::(zebra_verbose_json()) { + Ok(block_header) => { + assert_eq!( + block_header.hash, + block::Hash::from_str( + "00000000001b76b932f31289beccd3988d098ec3c8c6e4a0c7bcaf52e9bdead1" + ) + .unwrap() + ); + assert_eq!(block_header.confirmations, 3); + assert_eq!(block_header.height, 42); + assert_eq!(block_header.version, 5); + assert_eq!( + block_header.merkle_root, + block::merkle::Root::from_hex( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + + assert_eq!( + block_header.block_commitments.unwrap(), + <[u8; 32]>::from_hex( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + + assert_eq!( + block_header.final_sapling_root.unwrap(), + <[u8; 32]>::from_hex( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + ) + .unwrap() + ); + + assert_eq!(block_header.time, 1_699_999_999); + assert_eq!(block_header.nonce, "33nonce"); + assert_eq!(block_header.solution, "44solution"); + assert_eq!(block_header.bits, "1c654321"); + assert!((block_header.difficulty - 7890.123).abs() < f64::EPSILON); + + assert!(block_header.chainwork.is_none()); + + // Zebra always sets previous + assert_eq!( + block_header.previous_block_hash.as_deref(), + Some("000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f") + ); + assert!(block_header.next_block_hash.is_none()); + } + Err(e) => { + panic!( + "VerboseBlockHeader failed at {}:{} — {}", + e.line(), + e.column(), + e + ); + } + } + } + + #[test] + fn compact_header_is_hex_string() { + let s = r#""040102deadbeef""#; + let block_header: GetBlockHeader = serde_json::from_str(s).unwrap(); + match block_header.clone() { + GetBlockHeader::Compact(hex) => assert_eq!(hex, "040102deadbeef"), + _ => panic!("expected Compact variant"), + } + + // Roundtrip + let out = serde_json::to_string(&block_header).unwrap(); + assert_eq!(out, s); + } + + #[test] + fn unknown_shape_falls_back_to_unknown_variant() { + let weird = r#"{ "weird": 1, "unexpected": ["a","b","c"] }"#; + let block_header: GetBlockHeader = serde_json::from_str(weird).unwrap(); + match block_header { + GetBlockHeader::Unknown(v) => { + assert_eq!(v["weird"], json!(1)); + assert_eq!(v["unexpected"], json!(["a", "b", "c"])); + } + _ => panic!("expected Unknown variant"), + } + } + + #[test] + fn zebra_roundtrip_does_not_inject_chainwork_field() { + let block_header: GetBlockHeader = serde_json::from_str(zebra_verbose_json()).unwrap(); + let header_value: Value = serde_json::to_value(&block_header).unwrap(); + + let header_object = header_value + .as_object() + .expect("verbose should serialize to object"); + assert!(!header_object.contains_key("chainwork")); + + assert_eq!( + header_object.get("blockcommitments"), + Some(&json!( + "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f" + )) + ); + } + + #[test] + fn zcashd_roundtrip_preserves_chainwork() { + let block_header: GetBlockHeader = serde_json::from_str(zcashd_verbose_json()).unwrap(); + let header_value: Value = serde_json::to_value(&block_header).unwrap(); + let header_object = header_value.as_object().unwrap(); + + assert_eq!( + header_object.get("chainwork"), + Some(&json!( + "0000000000000000000000000000000000000000000000000000000000001234" + )) + ); + } + + #[test] + fn previous_and_next_optional_edges() { + // Simulate genesis + let genesis_like = r#"{ + "hash": "00000000001b76b932f31289beccd3988d098ec3c8c6e4a0c7bcaf52e9bdead1", + "confirmations": 1, + "height": 0, + "version": 4, + "merkleroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "finalsaplingroot": "000000000053d2771290ff1b57181bd067ae0e55a367ba8ddee2d961ea27a14f", + "time": 1477641369, + "nonce": "nonce", + "solution": "solution", + "bits": "1d00ffff", + "difficulty": 1.0 + }"#; + + match serde_json::from_str::(genesis_like) { + Ok(block_header) => { + assert!(block_header.previous_block_hash.is_none()); + assert!(block_header.next_block_hash.is_none()); + } + Err(e) => { + panic!( + "VerboseBlockHeader failed at {}:{} — {}", + e.line(), + e.column(), + e + ); + } + } + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/block_subsidy.rs b/zaino-fetch/src/jsonrpsee/response/block_subsidy.rs new file mode 100644 index 000000000..0594f1356 --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/block_subsidy.rs @@ -0,0 +1,337 @@ +//! Types associated with the `getblocksubsidy` RPC request. + +use std::convert::Infallible; + +use crate::jsonrpsee::{ + connector::ResponseToError, + response::common::amount::{Zatoshis, ZecAmount}, +}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; + +/// Struct used to represent a funding stream. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct FundingStream { + /// A description of the funding stream recipient. + pub recipient: String, + + /// A URL for the specification of this funding stream. + pub specification: String, + + /// The funding stream amount in ZEC. + /// + /// Amount as ZEC on the wire (string or number), normalized to zatoshis. + #[serde(rename = "value")] + pub value: ZecAmount, + + /// Amount as zatoshis on the wire. + #[serde(rename = "valueZat")] + pub value_zat: Zatoshis, + + /// The address of the funding stream recipient. + #[serde(default)] + pub address: Option, +} + +/// Struct used to represent a lockbox stream. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct LockBoxStream { + /// A description of the funding stream recipient, or the lockbox. + pub recipient: String, + + /// A URL for the specification of this lockbox. + pub specification: String, + + /// The amount locked in ZEC. + /// + /// Amount as ZEC on the wire (string or number), normalized to zatoshis. + #[serde(rename = "value")] + pub value: ZecAmount, + + /// The amount locked in zatoshis. + #[serde(rename = "valueZat")] + pub value_zat: Zatoshis, +} + +/// Response to a `getblocksubsidy` RPC request. Used for both `zcashd` and `zebrad`. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct BlockSubsidy { + /// The mining reward amount in ZEC. + pub miner: ZecAmount, + + /// The founders' reward amount in ZEC. + pub founders: ZecAmount, + + /// The total value of direct funding streams in ZEC. + #[serde(rename = "fundingstreamstotal")] + pub funding_streams_total: ZecAmount, + + /// The total value sent to development funding lockboxes in ZEC. + #[serde(rename = "lockboxtotal")] + pub lockbox_total: ZecAmount, + + /// The total value of the block subsidy in ZEC. + #[serde(rename = "totalblocksubsidy")] + pub total_block_subsidy: ZecAmount, + + /// An array of funding stream descriptions (present only when funding streams are active). + #[serde( + rename = "fundingstreams", + default, + skip_serializing_if = "Vec::is_empty" + )] + pub funding_streams: Vec, + + /// An array of development fund lockbox stream descriptions (present only when lockbox streams are active). + #[serde( + rename = "lockboxstreams", + default, + skip_serializing_if = "Vec::is_empty" + )] + pub lockbox_streams: Vec, +} + +/// Response to a `getblocksubsidy` RPC request. +#[derive(Debug, Clone, Serialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum GetBlockSubsidy { + /// Validated payload + Known(BlockSubsidy), + + /// Unrecognized shape + Unknown(Value), +} + +impl ResponseToError for GetBlockSubsidy { + type RpcError = Infallible; +} + +impl<'de> Deserialize<'de> for GetBlockSubsidy { + fn deserialize>(de: D) -> Result { + let v = Value::deserialize(de)?; + if let Ok(bs) = serde_json::from_value::(v.clone()) { + Ok(GetBlockSubsidy::Known(bs)) + } else { + Ok(GetBlockSubsidy::Unknown(v)) + } + } +} + +#[cfg(test)] +mod tests { + + use crate::jsonrpsee::response::{ + block_subsidy::{BlockSubsidy, GetBlockSubsidy}, + common::amount::Zatoshis, + }; + + #[test] + fn zcashd_decimals_parse_to_zats() { + let j = serde_json::json!({ + "miner": 2.5, + "founders": 0.0, + "fundingstreamstotal": 0.5, + "lockboxtotal": 0.0, + "totalblocksubsidy": 3.0, + "fundingstreams": [ + {"recipient":"ZCG","specification":"https://spec","value":0.5,"valueZat":50_000_000,"address":"t1abc"} + ] + }); + let r: GetBlockSubsidy = serde_json::from_value(j).unwrap(); + match r { + GetBlockSubsidy::Known(x) => { + assert_eq!(u64::from(x.miner), 250_000_000); + assert_eq!(u64::from(x.funding_streams_total), 50_000_000); + assert_eq!(u64::from(x.total_block_subsidy), 300_000_000); + assert_eq!(x.funding_streams.len(), 1); + assert_eq!(x.funding_streams[0].value_zat, Zatoshis(50_000_000)); + assert_eq!(x.funding_streams[0].address.as_deref(), Some("t1abc")); + } + _ => panic!("expected Known"), + } + } + + #[test] + fn zebrad_strings_parse_to_zats() { + let j = serde_json::json!({ + "fundingstreams": [], + "lockboxstreams": [], + "miner": "2.5", + "founders": "0.0", + "fundingstreamstotal": "0.5", + "lockboxtotal": "0.0", + "totalblocksubsidy": "3.0" + }); + let r: GetBlockSubsidy = serde_json::from_value(j).unwrap(); + match r { + GetBlockSubsidy::Known(x) => { + assert_eq!(u64::from(x.miner), 250_000_000); + assert_eq!(u64::from(x.total_block_subsidy), 300_000_000); + assert!(x.funding_streams.is_empty()); + assert!(x.lockbox_streams.is_empty()); + } + _ => panic!("expected Known"), + } + } + + #[test] + fn lockbox_streams_parse_and_match_totals_single() { + // Top-level amounts given in zatoshis (integers) to avoid unit ambiguity. + let j = serde_json::json!({ + "miner": 3.0, // 3.0 ZEC + "founders": 0.0, + "fundingstreamstotal": 0.0, + "lockboxtotal": 0.5, // 0.5 ZEC + "totalblocksubsidy": 3.5, // 3.5 ZEC + "lockboxstreams": [ + { + "recipient":"Lockbox A", + "specification":"https://spec", + "value": 0.5, // ZEC decimal on wire means parsed to zats by ZecAmount + "valueZat": 50_000_000 // integer zats on wire + } + ] + }); + + let r: GetBlockSubsidy = serde_json::from_value(j).unwrap(); + match r { + GetBlockSubsidy::Known(x) => { + assert_eq!(x.miner.as_zatoshis(), 300_000_000); + assert_eq!(x.lockbox_total.as_zatoshis(), 50_000_000); + assert_eq!(x.total_block_subsidy.as_zatoshis(), 350_000_000); + + assert!(x.funding_streams.is_empty()); + assert_eq!(x.lockbox_streams.len(), 1); + + let lb = &x.lockbox_streams[0]; + assert_eq!(lb.value.as_zatoshis(), lb.value_zat.0); + assert_eq!(lb.recipient, "Lockbox A"); + } + _ => panic!("expected Known"), + } + } + + #[test] + fn lockbox_streams_multiple_items_sum_matches_total() { + let j = serde_json::json!({ + "miner": 0, + "founders": 0, + "fundingstreamstotal": 0, + "lockboxtotal": 1.5, // 1.5 ZEC + "totalblocksubsidy": 1.5, + "lockboxstreams": [ + { "recipient":"L1","specification":"s1","value": "1.0","valueZat": 100_000_000 }, + { "recipient":"L2","specification":"s2","value": "0.5","valueZat": 50_000_000 } + ] + }); + + let r: GetBlockSubsidy = serde_json::from_value(j).unwrap(); + match r { + GetBlockSubsidy::Known(x) => { + assert_eq!(u64::from(x.lockbox_total), 150_000_000); + let sum: u64 = x.lockbox_streams.iter().map(|s| s.value_zat.0).sum(); + assert_eq!(sum, u64::from(x.lockbox_total)); + } + _ => panic!("expected Known"), + } + } + + #[test] + fn lockbox_stream_rejects_address_field() { + // LockBoxStream has no `address` field. + // Note that this would actually get matched to the `Unknown` variant. + let j = serde_json::json!({ + "miner": 0, "founders": 0, "fundingstreamstotal": 0, + "lockboxtotal": 1, "totalblocksubsidy": 1, + "lockboxstreams": [ + { "recipient":"L","specification":"s","value":"0.00000001","valueZat":1, "address":"t1should_not_be_here" } + ] + }); + + let err = serde_json::from_value::(j).unwrap_err(); + assert!( + err.to_string().contains("unknown field") && err.to_string().contains("address"), + "expected unknown field error, got: {err}" + ); + } + + #[test] + fn block_subsidy_full_roundtrip_everything() { + use crate::jsonrpsee::response::{ + block_subsidy::{BlockSubsidy, FundingStream, GetBlockSubsidy, LockBoxStream}, + common::amount::{Zatoshis, ZecAmount}, + }; + + let bs = BlockSubsidy { + // 3.0 ZEC miner, 0 founders, 0.5 funding streams, 1.5 lockboxes = 5.0 total + miner: ZecAmount::try_from_zec_f64(3.0).unwrap(), + founders: ZecAmount::from_zats(0), + funding_streams_total: ZecAmount::try_from_zec_f64(0.5).unwrap(), + lockbox_total: ZecAmount::try_from_zec_f64(1.5).unwrap(), + total_block_subsidy: ZecAmount::try_from_zec_f64(5.0).unwrap(), + + funding_streams: vec![FundingStream { + recipient: "ZCG".into(), + specification: "https://spec".into(), + value: ZecAmount::from_zats(50_000_000), // 0.5 ZEC + value_zat: Zatoshis(50_000_000), + address: Some("t1abc".into()), + }], + lockbox_streams: vec![ + LockBoxStream { + recipient: "Lockbox A".into(), + specification: "https://boxA".into(), + value: ZecAmount::from_zats(100_000_000), // 1.0 ZEC + value_zat: Zatoshis(100_000_000), + }, + LockBoxStream { + recipient: "Lockbox B".into(), + specification: "https://boxB".into(), + value: ZecAmount::from_zats(50_000_000), // 0.5 ZEC + value_zat: Zatoshis(50_000_000), + }, + ], + }; + + let wrapped = GetBlockSubsidy::Known(bs.clone()); + + // Serialize to JSON + let s = serde_json::to_string(&wrapped).unwrap(); + + let v: serde_json::Value = serde_json::from_str(&s).unwrap(); + // Top-level amounts are integers (zats) + assert!(v["miner"].is_number()); + assert!(v["totalblocksubsidy"].is_number()); + + // Funding stream value is a decimal number, valueZat is an integer + let fs0 = &v["fundingstreams"][0]; + assert!(fs0["value"].is_number()); + assert!(fs0["valueZat"].is_u64()); + assert!(fs0.get("address").is_some()); + + // Lockbox streams have no address + let lb0 = &v["lockboxstreams"][0]; + assert!(lb0.get("address").is_none()); + + // Deserialize back + let back: GetBlockSubsidy = serde_json::from_str(&s).unwrap(); + + // Struct-level equality must hold + assert_eq!(back, GetBlockSubsidy::Known(bs)); + + // Totals match sums + if let GetBlockSubsidy::Known(x) = back { + let sum_funding: u64 = x.funding_streams.iter().map(|f| f.value_zat.0).sum(); + let sum_lockbox: u64 = x.lockbox_streams.iter().map(|l| l.value_zat.0).sum(); + assert_eq!(sum_funding, u64::from(x.funding_streams_total)); + assert_eq!(sum_lockbox, u64::from(x.lockbox_total)); + assert_eq!( + u64::from(x.miner) + u64::from(x.founders) + sum_funding + sum_lockbox, + u64::from(x.total_block_subsidy) + ); + } + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/common.rs b/zaino-fetch/src/jsonrpsee/response/common.rs new file mode 100644 index 000000000..bb198dd82 --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/common.rs @@ -0,0 +1,128 @@ +//! Common types used across jsonrpsee responses + +pub mod amount; + +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// The identifier for a Zcash node. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct NodeId(pub i64); + +/// The height of a Zcash block. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct BlockHeight(pub u32); + +impl From for BlockHeight { + fn from(v: u32) -> Self { + BlockHeight(v) + } +} + +/// The height of a Zcash block, or None if unknown. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct MaybeHeight(pub Option); + +impl Serialize for MaybeHeight { + fn serialize(&self, ser: S) -> Result { + match self.0 { + Some(BlockHeight(h)) => ser.serialize_u32(h), + None => ser.serialize_i64(-1), + } + } +} + +impl<'de> Deserialize<'de> for MaybeHeight { + fn deserialize>(de: D) -> Result { + // Accept either a number or null. + // Negative → None; non-negative → Some(height). + let opt = Option::::deserialize(de)?; + match opt { + None => Ok(MaybeHeight(None)), + Some(n) if n < 0 => Ok(MaybeHeight(None)), + Some(n) => { + let h = u32::try_from(n).map_err(serde::de::Error::custom)?; + Ok(MaybeHeight(Some(BlockHeight(h)))) + } + } + } +} + +/// Unix timestamp. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct UnixTime(pub i64); + +impl UnixTime { + /// Converts to a [`SystemTime`]. + pub fn as_system_time(self) -> SystemTime { + if self.0 >= 0 { + UNIX_EPOCH + Duration::from_secs(self.0 as u64) + } else { + UNIX_EPOCH - Duration::from_secs(self.0.unsigned_abs()) + } + } +} + +/// Duration in seconds. +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SecondsF64(pub f64); + +impl SecondsF64 { + /// Converts to a [`Duration`]. + pub fn as_duration(self) -> Duration { + Duration::from_secs_f64(self.0) + } +} + +/// Protocol version. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ProtocolVersion(pub i64); + +/// A byte array. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Bytes(pub u64); + +/// Time offset in seconds. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct TimeOffsetSeconds(pub i64); + +#[cfg(test)] +mod tests { + use crate::jsonrpsee::response::common::{BlockHeight, MaybeHeight}; + + #[test] + fn maybeheight_deser_accepts_minus_one_and_null() { + let a: MaybeHeight = serde_json::from_str("-1").unwrap(); + assert!(a.0.is_none()); + + let b: MaybeHeight = serde_json::from_str("null").unwrap(); + assert!(b.0.is_none()); + + let c: MaybeHeight = serde_json::from_str("123").unwrap(); + assert_eq!(c.0.unwrap().0, 123); + } + + #[test] + fn maybeheight_serializes_none_as_minus_one() { + let m = MaybeHeight(None); + let s = serde_json::to_string(&m).unwrap(); + assert_eq!(s, "-1"); + } + + #[test] + fn maybeheight_roundtrips_some() { + let m = MaybeHeight(Some(BlockHeight(42))); + let s = serde_json::to_string(&m).unwrap(); + assert_eq!(s, "42"); + let back: MaybeHeight = serde_json::from_str(&s).unwrap(); + assert_eq!(back.0.unwrap().0, 42); + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/common/amount.rs b/zaino-fetch/src/jsonrpsee/response/common/amount.rs new file mode 100644 index 000000000..cdd03ca1e --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/common/amount.rs @@ -0,0 +1,284 @@ +//! Common types for handling ZEC and Zatoshi amounts. + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// Zatoshis per ZEC. +pub const ZATS_PER_ZEC: u64 = 100_000_000; +/// Represents an amount in Zatoshis. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] +#[serde(transparent)] +pub struct Zatoshis(pub u64); + +impl<'de> Deserialize<'de> for Zatoshis { + fn deserialize>(de: D) -> Result { + /// For floats, use [`ZecAmount`]. + #[derive(Deserialize)] + #[serde(untagged)] + enum IntLike { + U64(u64), + I64(i64), + Str(String), + } + + match IntLike::deserialize(de)? { + IntLike::U64(u) => Ok(Zatoshis(u)), + IntLike::I64(i) if i >= 0 => Ok(Zatoshis(i as u64)), + IntLike::I64(_) => Err(serde::de::Error::custom("negative amount")), + IntLike::Str(s) => { + let s = s.trim(); + if s.is_empty() || !s.bytes().all(|b| b.is_ascii_digit()) { + return Err(serde::de::Error::custom("expected integer zatoshis")); + } + s.parse::() + .map(Zatoshis) + .map_err(serde::de::Error::custom) + } + } + } +} + +/// Represents a ZEC amount. The amount is stored in zatoshis. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ZecAmount(u64); + +impl ZecAmount { + /// Returns the amount in zatoshis. + pub fn as_zatoshis(self) -> u64 { + self.0 + } + + /// Construct from integer zatoshis. + pub const fn from_zats(z: u64) -> Self { + Self(z) + } + + /// Construct from a ZEC decimal (f64). + pub fn try_from_zec_f64(zec: f64) -> Result { + if !zec.is_finite() || zec < 0.0 { + return Err("invalid amount"); + } + let z = (zec * ZATS_PER_ZEC as f64).round(); + if z < 0.0 || z > u64::MAX as f64 { + return Err("overflow"); + } + Ok(Self(z as u64)) + } +} + +impl<'de> Deserialize<'de> for ZecAmount { + fn deserialize>(de: D) -> Result { + #[derive(Deserialize)] + #[serde(untagged)] + enum NumLike { + U64(u64), + I64(i64), + F64(f64), + Str(String), + } + + match NumLike::deserialize(de)? { + NumLike::U64(u) => u + .checked_mul(ZATS_PER_ZEC) + .map(ZecAmount) + .ok_or_else(|| serde::de::Error::custom("overflow")), + NumLike::I64(i) if i >= 0 => (i as u64) + .checked_mul(ZATS_PER_ZEC) + .map(ZecAmount) + .ok_or_else(|| serde::de::Error::custom("overflow")), + NumLike::I64(_) => Err(serde::de::Error::custom("negative amount")), + NumLike::F64(f) => { + if !f.is_finite() || f < 0.0 { + return Err(serde::de::Error::custom("invalid amount")); + } + Ok(ZecAmount((f * (ZATS_PER_ZEC as f64)).round() as u64)) + } + NumLike::Str(s) => { + // Parse "int.frac" with up to 8 fractional digits into zats + let s = s.trim(); + if s.starts_with('-') { + return Err(serde::de::Error::custom("negative amount")); + } + let (int, frac) = s.split_once('.').unwrap_or((s, "")); + if frac.len() > 8 { + return Err(serde::de::Error::custom("too many fractional digits")); + } + let int_part: u64 = if int.is_empty() { + 0 + } else { + int.parse().map_err(serde::de::Error::custom)? + }; + let mut frac_buf = frac.as_bytes().to_vec(); + while frac_buf.len() < 8 { + frac_buf.push(b'0'); + } + let frac_part: u64 = if frac_buf.is_empty() { + 0 + } else { + std::str::from_utf8(&frac_buf) + .unwrap() + .parse() + .map_err(serde::de::Error::custom)? + }; + let base = int_part + .checked_mul(ZATS_PER_ZEC) + .ok_or_else(|| serde::de::Error::custom("overflow"))?; + base.checked_add(frac_part) + .map(ZecAmount) + .ok_or_else(|| serde::de::Error::custom("overflow")) + } + } + } +} + +impl Serialize for ZecAmount { + fn serialize(&self, ser: S) -> Result { + // Emit a JSON number in ZEC. + let zec = (self.0 as f64) / 100_000_000.0; + ser.serialize_f64(zec) + } +} + +impl From for u64 { + fn from(z: ZecAmount) -> u64 { + z.0 + } +} + +#[cfg(test)] +mod tests { + + mod zatoshis { + use crate::jsonrpsee::response::common::amount::Zatoshis; + + #[test] + fn zatoshis_integer_number_is_zats() { + let z: Zatoshis = serde_json::from_str("625000000").unwrap(); + assert_eq!(z.0, 625_000_000); + } + + #[test] + fn zatoshis_string_digits_are_zats() { + let z: Zatoshis = serde_json::from_str(r#""625000000""#).unwrap(); + assert_eq!(z.0, 625_000_000); + } + + #[test] + fn zatoshis_rejects_float_number() { + let result = serde_json::from_str::("2.5"); + assert!(result.is_err()); + } + + #[test] + fn zatoshis_rejects_decimal_string() { + let err = serde_json::from_str::(r#""2.5""#).unwrap_err(); + assert!(err.to_string().contains("expected integer")); + } + + #[test] + fn zatoshis_rejects_negative() { + let err = serde_json::from_str::("-1").unwrap_err(); + assert!(err.to_string().contains("negative")); + } + + #[test] + fn zatoshis_rejects_non_digit_string() { + let err = serde_json::from_str::(r#""abc""#).unwrap_err(); + assert!(err.to_string().contains("expected integer")); + } + } + + mod zecamount { + use crate::jsonrpsee::response::common::amount::ZecAmount; + + #[test] + fn zecamount_from_float_decimal() { + let a: ZecAmount = serde_json::from_str("2.5").unwrap(); + assert_eq!(a.as_zatoshis(), 250_000_000); + } + + #[test] + fn zecamount_from_string_decimal() { + let a: ZecAmount = serde_json::from_str(r#""0.00000001""#).unwrap(); + assert_eq!(a.as_zatoshis(), 1); + } + + #[test] + fn zecamount_from_integer_number_interpreted_as_zec() { + // 2 ZEC + let a: ZecAmount = serde_json::from_str("2").unwrap(); + assert_eq!(a.as_zatoshis(), 200_000_000); + } + + #[test] + fn zecamount_from_integer_string_interpreted_as_zec() { + // 2 ZEC + let a: ZecAmount = serde_json::from_str(r#""2""#).unwrap(); + assert_eq!(a.as_zatoshis(), 200_000_000); + } + + #[test] + fn zecamount_rejects_negative() { + let err = serde_json::from_str::("-0.1").unwrap_err(); + assert!( + err.to_string().contains("invalid amount") || err.to_string().contains("negative") + ); + } + + #[test] + fn zecamount_rejects_more_than_8_fractional_digits() { + let err = serde_json::from_str::(r#""1.000000000""#).unwrap_err(); + assert!(err.to_string().contains("fractional")); + } + + #[test] + fn zecamount_overflow_on_huge_integer_zec() { + // From u64::MAX ZEC, multiplying by 1e8 should overflow + let huge = format!("{}", u64::MAX); + let err = serde_json::from_str::(&huge).unwrap_err(); + assert!( + err.to_string().contains("overflow"), + "expected overflow, got: {err}" + ); + } + + #[test] + fn zecamount_boundary_integer_ok() { + // Max integer ZEC that fits when scaled: floor(u64::MAX / 1e8) + let max_int_zec = 184_467_440_737u64; + let a: ZecAmount = serde_json::from_str(&max_int_zec.to_string()).unwrap(); + assert_eq!(a.as_zatoshis(), 18_446_744_073_700_000_000); + } + + #[test] + fn zecamount_overflow_on_large_integer_zec() { + // Just over the boundary must overflow + let too_big = 184_467_440_738u64; + let err = serde_json::from_str::(&too_big.to_string()).unwrap_err(); + assert!(err.to_string().contains("overflow")); + } + + #[test] + fn zecamount_serializes_as_decimal_number() { + let a = ZecAmount::from_zats(250_000_000); // 2.5 ZEC + let s = serde_json::to_string(&a).unwrap(); + // Parse back and compare as f64 to avoid formatting quirks (e.g., 1e-8) + let v: serde_json::Value = serde_json::from_str(&s).unwrap(); + let f = v.as_f64().unwrap(); + assert!((f - 2.5).abs() < 1e-12, "serialized {s} parsed {f}"); + } + + #[test] + fn zecamount_roundtrip_small_fraction() { + // 1 zat + let a: ZecAmount = serde_json::from_str(r#""0.00000001""#).unwrap(); + let s = serde_json::to_string(&a).unwrap(); + let v: serde_json::Value = serde_json::from_str(&s).unwrap(); + let f = v.as_f64().unwrap(); + assert!( + (f - 0.00000001f64).abs() < 1e-20, + "serialized {s} parsed {f}" + ); + assert_eq!(a.as_zatoshis(), 1); + } + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/mining_info.rs b/zaino-fetch/src/jsonrpsee/response/mining_info.rs new file mode 100644 index 000000000..4dd463701 --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/mining_info.rs @@ -0,0 +1,296 @@ +//! Types associated with the `getmininginfo` RPC request. + +use std::{collections::HashMap, convert::Infallible}; + +use serde::{Deserialize, Serialize}; + +use crate::jsonrpsee::connector::ResponseToError; + +impl ResponseToError for GetMiningInfoWire { + type RpcError = Infallible; +} + +/// Wire superset compatible with `zcashd` and `zebrad`. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct GetMiningInfoWire { + #[serde(rename = "blocks")] + tip_height: u64, + + #[serde(rename = "currentblocksize", default)] + current_block_size: Option, + + #[serde(rename = "currentblocktx", default)] + current_block_tx: Option, + + #[serde(default)] + networksolps: Option, + #[serde(default)] + networkhashps: Option, + + // Present on both zcashd and zebrad + #[serde(default)] + chain: String, + #[serde(default)] + testnet: bool, + + // zcashd + #[serde(default)] + difficulty: Option, + #[serde(default)] + errors: Option, + #[serde(default)] + errorstimestamp: Option, + #[serde(default)] + genproclimit: Option, + #[serde(default)] + localsolps: Option, + #[serde(default)] + pooledtx: Option, + #[serde(default)] + generate: Option, + + #[serde(flatten)] + extras: HashMap, +} + +/// Internal representation of `GetMiningInfoWire`. +#[derive(Debug, Clone)] +pub struct MiningInfo { + /// Current tip height. + pub tip_height: u64, + + /// Size of the last mined block, if present. + pub current_block_size: Option, + + /// Transaction count in the last mined block, if present. + pub current_block_tx: Option, + + /// Estimated network solution rate (Sol/s), if present. + pub network_solution_rate: Option, + + /// Estimated network hash rate (H/s), if present. + pub network_hash_rate: Option, + + /// Network name (e.g., "main", "test"). + pub chain: String, + + /// Whether the node is on testnet. + pub testnet: bool, + + /// Current difficulty, if present. + pub difficulty: Option, + + /// Upstream error/status message, if present. + pub errors: Option, + + /// Extra upstream fields. + pub extras: HashMap, +} + +impl From for MiningInfo { + fn from(w: GetMiningInfoWire) -> Self { + Self { + tip_height: w.tip_height, + current_block_size: w.current_block_size, + current_block_tx: w.current_block_tx, + network_solution_rate: w.networksolps, + network_hash_rate: w.networkhashps, + chain: w.chain, + testnet: w.testnet, + difficulty: w.difficulty, + errors: w.errors, + extras: w.extras, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + fn zebrad_json() -> String { + serde_json::to_string(&json!({ + "blocks": 1_234_567u64, + "currentblocksize": 1_000_000u64, + "currentblocktx": 100u64, + "networksolps": 1234, + "networkhashps": 5678, + "chain": "main", + "testnet": false, + "errors": null, + "somefuture": { "x": 1 } + })) + .unwrap() + } + + fn zcashd_json() -> String { + serde_json::to_string(&json!({ + "blocks": 765_432u64, + "currentblocksize": 999_999u64, + "currentblocktx": 99u64, + "networksolps": 1_000_000u64, + "networkhashps": 2_000_000u64, + "chain": "main", + "testnet": false, + "difficulty": 100_000.0_f64, + "errors": "", + "errorstimestamp": 1700000000_i64, + "genproclimit": 0_i64, + "localsolps": 2500_u64, + "pooledtx": 5_u64, + "generate": false + })) + .unwrap() + } + + #[test] + fn deser_zebrad_then_roundtrip() { + let json_str = zebrad_json(); + let wire: GetMiningInfoWire = serde_json::from_str(&json_str).expect("deserialize zebrad"); + + assert_eq!(wire.tip_height, 1_234_567); + assert_eq!(wire.current_block_size, Some(1_000_000)); + assert_eq!(wire.current_block_tx, Some(100)); + assert_eq!(wire.chain, "main"); + assert!(!wire.testnet); + assert_eq!(wire.difficulty, None); + assert_eq!(wire.errors, None); + + assert_eq!(wire.networksolps, Some(1_234)); + assert_eq!(wire.networkhashps, Some(5_678)); + + assert!(wire.extras.contains_key("somefuture")); + assert_eq!(wire.extras["somefuture"], json!({"x": 1})); + + let str_from_wire = serde_json::to_string(&wire).unwrap(); + let wire2: GetMiningInfoWire = serde_json::from_str(&str_from_wire).unwrap(); + assert_eq!(wire, wire2); + } + + #[test] + fn deser_zcashd_integers_then_roundtrip() { + let json_str = zcashd_json(); + let wire: GetMiningInfoWire = serde_json::from_str(&json_str).expect("deserialize zcashd"); + + assert_eq!(wire.tip_height, 765_432); + assert_eq!(wire.current_block_size, Some(999_999)); + assert_eq!(wire.current_block_tx, Some(99)); + assert_eq!(wire.chain, "main"); + assert!(!wire.testnet); + assert_eq!(wire.difficulty, Some(100_000.0)); + assert_eq!(wire.errors.as_deref(), Some("")); + + assert_eq!(wire.networksolps, Some(1_000_000)); + assert_eq!(wire.networkhashps, Some(2_000_000)); + + assert!(wire.errorstimestamp.is_some()); + assert_eq!(wire.genproclimit, Some(0)); + assert_eq!(wire.localsolps, Some(2500)); + assert_eq!(wire.pooledtx, Some(5)); + assert_eq!(wire.generate, Some(false)); + + let s = serde_json::to_string(&wire).unwrap(); + let wire2: GetMiningInfoWire = serde_json::from_str(&s).unwrap(); + assert_eq!(wire, wire2); + } + + #[test] + fn minimal_payload_defaults() { + let blocks = r#"{ "blocks": 0 }"#; + let wire: GetMiningInfoWire = serde_json::from_str(blocks).unwrap(); + + assert_eq!(wire.tip_height, 0); + assert_eq!(wire.current_block_size, None); + assert_eq!(wire.current_block_tx, None); + assert_eq!(wire.networksolps, None); + assert_eq!(wire.networkhashps, None); + + assert_eq!(wire.chain, ""); + assert!(!wire.testnet); + + assert_eq!(wire.difficulty, None); + assert_eq!(wire.errors, None); + assert!(wire.extras.is_empty()); + + let blocks_deserialized = serde_json::to_string(&wire).unwrap(); + let wire2: GetMiningInfoWire = serde_json::from_str(&blocks_deserialized).unwrap(); + assert_eq!(wire, wire2); + } + + #[test] + fn convert_to_internal_from_zebrad() { + let wire: GetMiningInfoWire = serde_json::from_str(&zebrad_json()).unwrap(); + let mining_info: MiningInfo = wire.clone().into(); + + assert_eq!(mining_info.tip_height, wire.tip_height); + assert_eq!(mining_info.current_block_size, wire.current_block_size); + assert_eq!(mining_info.current_block_tx, wire.current_block_tx); + + assert_eq!(mining_info.network_solution_rate, wire.networksolps); + assert_eq!(mining_info.network_hash_rate, wire.networkhashps); + + assert_eq!(mining_info.chain, wire.chain); + assert!(!mining_info.testnet); + assert_eq!(mining_info.difficulty, wire.difficulty); + assert_eq!(mining_info.errors, wire.errors); + assert!(mining_info.extras.contains_key("somefuture")); + } + + #[test] + fn convert_to_internal_from_zcashd() { + let wire: GetMiningInfoWire = serde_json::from_str(&zcashd_json()).unwrap(); + let mining_info: MiningInfo = wire.clone().into(); + + assert_eq!(mining_info.tip_height, wire.tip_height); + assert_eq!(mining_info.current_block_size, wire.current_block_size); + assert_eq!(mining_info.current_block_tx, wire.current_block_tx); + + assert_eq!(mining_info.network_solution_rate, wire.networksolps); + assert_eq!(mining_info.network_hash_rate, wire.networkhashps); + + assert_eq!(mining_info.chain, wire.chain); + assert!(!mining_info.testnet); + assert_eq!(mining_info.difficulty, wire.difficulty); + assert_eq!(mining_info.errors, wire.errors); + } + + #[test] + fn invalid_numeric_type_errors() { + let bad_str = r#"{ "blocks": 1, "networksolps": "not-a-number" }"#; + assert!(serde_json::from_str::(bad_str).is_err()); + + let bad_float = r#"{ "blocks": 1, "networkhashps": 1234.5 }"#; + assert!(serde_json::from_str::(bad_float).is_err()); + + let bad_negative = r#"{ "blocks": 1, "networkhashps": -1 }"#; + assert!(serde_json::from_str::(bad_negative).is_err()); + } + + #[test] + fn localsolps_roundtrip_and_reject_float() { + let integer_payload_json = json!({ + "blocks": 3, + "localsolps": 42_u64 + }); + let wire_int: GetMiningInfoWire = serde_json::from_value(integer_payload_json).unwrap(); + assert_eq!(wire_int.localsolps, Some(42)); + let wire_after_roundtrip: GetMiningInfoWire = + serde_json::from_str(&serde_json::to_string(&wire_int).unwrap()).unwrap(); + assert_eq!(wire_int, wire_after_roundtrip); + + let float_payload_json_str = r#"{ "blocks": 2, "localsolps": 12.5 }"#; + assert!(serde_json::from_str::(float_payload_json_str).is_err()); + } + + #[test] + fn missing_network_rates_convert_to_none() { + let json_str = r#"{ "blocks": 111, "chain": "test", "testnet": true }"#; + let wire: GetMiningInfoWire = serde_json::from_str(json_str).unwrap(); + let mining_info: MiningInfo = wire.into(); + assert_eq!(mining_info.network_solution_rate, None); + assert_eq!(mining_info.network_hash_rate, None); + assert_eq!(mining_info.chain, "test"); + assert!(mining_info.testnet); + } +} diff --git a/zaino-fetch/src/jsonrpsee/response/peer_info.rs b/zaino-fetch/src/jsonrpsee/response/peer_info.rs new file mode 100644 index 000000000..046101604 --- /dev/null +++ b/zaino-fetch/src/jsonrpsee/response/peer_info.rs @@ -0,0 +1,465 @@ +//! Types associated with the `getpeerinfo` RPC request. +//! +//! Although the current threat model assumes that `zaino` connects to a trusted validator, +//! the `getpeerinfo` RPC performs some light validation. + +use std::convert::Infallible; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; + +use crate::jsonrpsee::{ + connector::ResponseToError, + response::common::{ + BlockHeight, Bytes, MaybeHeight, NodeId, ProtocolVersion, SecondsF64, TimeOffsetSeconds, + UnixTime, + }, +}; + +/// Response to a `getpeerinfo` RPC request. +#[derive(Debug, Clone, Serialize, PartialEq)] +#[serde(untagged)] +pub enum GetPeerInfo { + /// The `zcashd` typed response. + Zcashd(Vec), + + /// The `zebrad` typed response. + Zebrad(Vec), + + /// Unrecognized shape. Only enforced to be an array. + Unknown(Vec), +} + +/// Response to a `getpeerinfo` RPC request coming from `zebrad`. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct ZebradPeerInfo { + /// Remote address `host:port`. + pub addr: String, + /// Whether the connection is inbound. + pub inbound: bool, +} + +// TODO: Do not use primitive types +/// Response to a `getpeerinfo` RPC request coming from `zcashd`. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct ZcashdPeerInfo { + /// Peer index (NodeId). + pub id: NodeId, + + /// Remote address `host:port`. + pub addr: String, + + /// Typed representation of the hex-encoded service flags. + pub services: ServiceFlags, + + /// Whether the peer asked us to relay transactions. + pub relaytxes: bool, + + /// Last send time (Unix seconds). + pub lastsend: UnixTime, + + /// Last receive time (Unix seconds). + pub lastrecv: UnixTime, + + /// Total bytes sent. + pub bytessent: Bytes, + + /// Total bytes received. + pub bytesrecv: Bytes, + + /// Connection time (Unix seconds). + pub conntime: UnixTime, + + /// Clock offset (seconds, can be negative). + pub timeoffset: TimeOffsetSeconds, + + /// Ping time (seconds). + pub pingtime: SecondsF64, + + /// Protocol version. + pub version: ProtocolVersion, + + /// User agent string. + pub subver: String, + + /// Whether the connection is inbound. + pub inbound: bool, + + /// Starting block height advertised by the peer. + pub startingheight: MaybeHeight, + + /// Count of processed addr messages. + pub addr_processed: u64, + + /// Count of rate-limited addr messages. + pub addr_rate_limited: u64, + + /// Whether the peer is whitelisted. + pub whitelisted: bool, + + /// Local address `host:port`. + #[serde(default)] + pub addrlocal: Option, + + /// Ping wait time in seconds. Only present if > 0.0. + #[serde(default)] + pub pingwait: Option, + + /// Grouped validation/sync state (present when zcashd exposes state stats). + #[serde(flatten)] + pub state: Option, +} + +impl<'de> Deserialize<'de> for GetPeerInfo { + /// Deserialize either a `ZcashdPeerInfo` or a `ZebradPeerInfo` depending on the shape of the JSON. + /// + /// In the `Unkown` variant, the raw array is preserved for passthrough/logging. + /// If the value is not an array, an error is returned. + fn deserialize(de: D) -> Result + where + D: Deserializer<'de>, + { + let v = Value::deserialize(de)?; + + // zcashd first + if let Ok(zd) = serde_json::from_value::>(v.clone()) { + return Ok(GetPeerInfo::Zcashd(zd)); + } + // zebrad + if let Ok(zebra) = serde_json::from_value::>(v.clone()) { + return Ok(GetPeerInfo::Zebrad(zebra)); + } + // unknown + if v.is_array() { + let raw: Vec = serde_json::from_value(v).map_err(serde::de::Error::custom)?; + Ok(GetPeerInfo::Unknown(raw)) + } else { + Err(serde::de::Error::custom("getpeerinfo: expected JSON array")) + } + } +} + +impl ResponseToError for GetPeerInfo { + type RpcError = Infallible; +} + +/// Bitflags for the peer's advertised services (backed by a u64). +/// Serialized as a zero-padded 16-digit lowercase hex string. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct ServiceFlags(pub u64); + +impl ServiceFlags { + /// Returns the underlying bits + pub fn bits(self) -> u64 { + self.0 + } + + /// Returns true if the given bit is set + pub fn has(self, mask: u64) -> bool { + (self.0 & mask) != 0 + } + + /// Node offers full network services (bit 0). + pub const NODE_NETWORK: u64 = 1 << 0; + + /// Legacy Bloom filter support (bit 2). + pub const NODE_BLOOM: u64 = 1 << 2; + + /// Returns true if the `NODE_NETWORK` bit is set + pub fn has_node_network(self) -> bool { + self.has(Self::NODE_NETWORK) + } + + /// Returns true if the `NODE_BLOOM` bit is set + pub fn has_node_bloom(self) -> bool { + self.has(Self::NODE_BLOOM) + } + + /// Bits not recognized by this crate. + pub fn unknown_bits(self) -> u64 { + let known = Self::NODE_NETWORK | Self::NODE_BLOOM; + self.bits() & !known + } +} + +impl From for ServiceFlags { + fn from(x: u64) -> Self { + ServiceFlags(x) + } +} +impl From for u64 { + fn from(f: ServiceFlags) -> Self { + f.0 + } +} + +impl Serialize for ServiceFlags { + fn serialize(&self, ser: S) -> Result { + ser.serialize_str(&format!("{:016x}", self.0)) + } +} +impl<'de> Deserialize<'de> for ServiceFlags { + fn deserialize>(de: D) -> Result { + let s = String::deserialize(de)?; + + // Optional `0x` + let s = s.strip_prefix("0x").unwrap_or(&s); + u64::from_str_radix(s, 16) + .map(ServiceFlags) + .map_err(|e| serde::de::Error::custom(format!("invalid services hex: {e}"))) + } +} + +/// Per-peer validation/sync state. Present when state stats are set. `zcashd` only. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PeerStateStats { + /// Misbehavior score. + pub banscore: i64, + /// Last header height in common. + pub synced_headers: BlockHeight, + /// Last block height in common. + pub synced_blocks: BlockHeight, + /// Block heights currently requested from this peer. + pub inflight: Vec, +} + +#[cfg(test)] +mod tests { + use super::*; + // use pretty_assertions::assert_eq; + + // TODO: get a real testvector + #[test] + fn parses_zcashd_payload() { + let zcashd_json = r#" + [ + { + "id": 1, + "addr": "127.0.0.1:8233", + "services": "0000000000000001", + "relaytxes": true, + "lastsend": 1690000000, + "lastrecv": 1690000100, + "bytessent": 1234, + "bytesrecv": 5678, + "conntime": 1690000000, + "timeoffset": 0, + "pingtime": 0.001, + "version": 170002, + "subver": "/MagicBean:5.8.0/", + "inbound": false, + "startingheight": 2000000, + "addr_processed": 1, + "addr_rate_limited": 0, + "whitelisted": false, + "addrlocal": "192.168.1.10:8233", + "pingwait": 0.1, + "banscore": 0, + "synced_headers": 1999999, + "synced_blocks": 1999999, + "inflight": [2000000, 2000001] + } + ] + "#; + + let parsed: GetPeerInfo = serde_json::from_str(zcashd_json).unwrap(); + match parsed { + GetPeerInfo::Zcashd(items) => { + let p = &items[0]; + assert_eq!(p.id, NodeId(1)); + assert_eq!(p.addr, "127.0.0.1:8233"); + assert_eq!(p.version, ProtocolVersion(170002)); + assert!(!p.inbound); + assert_eq!(p.pingwait, Some(SecondsF64(0.1))); + + let st = p.state.as_ref().expect("expected state stats"); + assert_eq!(st.synced_blocks, BlockHeight::from(1999999)); + assert_eq!(st.synced_headers, BlockHeight::from(1999999)); + assert_eq!(st.banscore, 0); + assert_eq!( + st.inflight, + vec![BlockHeight::from(2000000), BlockHeight::from(2000001),] + ); + } + other => panic!("expected Zcashd, got: {:?}", other), + } + } + + // TODO: get a real testvector + #[test] + fn parses_zebrad_payload() { + let zebrad_json = r#" + [ + { "addr": "1.2.3.4:8233", "inbound": true }, + { "addr": "5.6.7.8:8233", "inbound": false } + ] + "#; + + let parsed: GetPeerInfo = serde_json::from_str(zebrad_json).unwrap(); + match parsed { + GetPeerInfo::Zebrad(items) => { + assert_eq!(items.len(), 2); + assert_eq!(items[0].addr, "1.2.3.4:8233"); + assert!(items[0].inbound); + assert_eq!(items[1].addr, "5.6.7.8:8233"); + assert!(!items[1].inbound); + } + other => panic!("expected Zebrad variant, got: {:?}", other), + } + } + + #[test] + fn zcashd_rejects_extra_fields() { + let j = r#"[{ + "id":1,"addr":"127.0.0.1:8233","services":"0000000000000001", + "relaytxes":true,"lastsend":1,"lastrecv":2,"bytessent":3,"bytesrecv":4, + "conntime":5,"timeoffset":0,"pingtime":0.1,"version":170002,"subver":"/X/","inbound":false, + "startingheight":-1,"addr_processed":0,"addr_rate_limited":0,"whitelisted":false, + "unexpected":"oops" + }]"#; + + // zcashd fails due to unknown field + let err = serde_json::from_str::>(j).unwrap_err(); + assert!(err.to_string().contains("unknown field")); + + // Should be `Unknown` + let parsed = serde_json::from_str::(j).unwrap(); + matches!(parsed, GetPeerInfo::Unknown(_)); + } + + /// Integrity test that ensures no Downgrade-to-Zebrad via type poisoning is possible. + #[test] + fn zebrad_does_not_act_as_catchall() { + let invalid_zcashd = r#" + [ + { "addr": "1.2.3.4:8233", "inbound": false, "whitelisted": "true" } + ] + "#; + + let parsed: GetPeerInfo = serde_json::from_str(invalid_zcashd).unwrap(); + + match parsed { + GetPeerInfo::Unknown(items) => { + assert_eq!(items.len(), 1); + } + other => { + panic!("expected Unknown variant, got: {:?}", other); + } + } + } + + // TODO: get a real testvector + #[test] + fn falls_back_to_unknown_for_unrecognized_shape() { + let unknown_json = r#" + [ + { "foo": 1, "bar": "baz" }, + { "weird": [1,2,3] } + ] + "#; + + let parsed: GetPeerInfo = serde_json::from_str(unknown_json).unwrap(); + match parsed { + GetPeerInfo::Unknown(items) => { + assert_eq!(items.len(), 2); + assert!(items[0].get("foo").is_some()); + } + other => panic!("expected Unknown variant, got: {:?}", other), + } + } + + // TODO: get a real testvector + #[test] + fn fails_on_non_array() { + let non_array_json = r#"{"foo": 1, "bar": "baz"}"#; + let err = serde_json::from_str::(non_array_json).unwrap_err(); + assert_eq!(err.to_string(), "getpeerinfo: expected JSON array"); + } + + #[test] + fn getpeerinfo_serializes_as_raw_array() { + let val = GetPeerInfo::Zcashd(Vec::new()); + let s = serde_json::to_string(&val).unwrap(); + assert_eq!(s, "[]"); + } + + #[test] + fn getpeerinfo_unknown_serializes_as_raw_array() { + let val = GetPeerInfo::Unknown(vec![serde_json::json!({"foo":1})]); + let s = serde_json::to_string(&val).unwrap(); + assert_eq!(s, r#"[{"foo":1}]"#); + } + + mod serviceflags { + use crate::jsonrpsee::response::{ + common::{ + BlockHeight, Bytes, MaybeHeight, NodeId, ProtocolVersion, SecondsF64, + TimeOffsetSeconds, UnixTime, + }, + peer_info::{ServiceFlags, ZcashdPeerInfo}, + }; + + #[test] + fn serviceflags_roundtrip() { + let f = ServiceFlags(0x0000_0000_0000_0001); + let s = serde_json::to_string(&f).unwrap(); + assert_eq!(s, r#""0000000000000001""#); // zero-padded, lowercase + let back: ServiceFlags = serde_json::from_str(&s).unwrap(); + assert_eq!(back.bits(), 1); + assert!(back.has(1)); + } + + #[test] + fn zcashd_peerinfo_deser_with_typed_services() { + let j = r#"[{ + "id":1, + "addr":"127.0.0.1:8233", + "services":"0000000000000003", + "relaytxes":true, + "lastsend":1,"lastrecv":2,"bytessent":3,"bytesrecv":4, + "conntime":5,"timeoffset":0,"pingtime":0.001, + "version":170002,"subver":"/MagicBean:5.8.0/","inbound":false, + "startingheight":2000000,"addr_processed":7,"addr_rate_limited":8,"whitelisted":false + }]"#; + + let v: Vec = serde_json::from_str(j).unwrap(); + assert_eq!(v[0].services.bits(), 3); + assert!(v[0].services.has(1)); + assert!(v[0].services.has(2)); + } + + #[test] + fn zcashd_peerinfo_serializes_back_to_hex() { + let pi = ZcashdPeerInfo { + id: NodeId(1), + addr: "127.0.0.1:8233".into(), + services: ServiceFlags(0x0A0B_0C0D_0E0F), + relaytxes: true, + lastsend: UnixTime(1), + lastrecv: UnixTime(2), + bytessent: Bytes(3), + bytesrecv: Bytes(4), + conntime: UnixTime(5), + timeoffset: TimeOffsetSeconds(0), + pingtime: SecondsF64(0.1), + version: ProtocolVersion(170002), + subver: "/X/".into(), + inbound: false, + startingheight: MaybeHeight(Some(BlockHeight::from(42))), + addr_processed: 0, + addr_rate_limited: 0, + whitelisted: false, + addrlocal: None, + pingwait: None, + state: None, + }; + + let v = serde_json::to_value(&pi).unwrap(); + let services_str = v["services"].as_str().unwrap(); + let expected = format!("{:016x}", u64::from(pi.services)); + assert_eq!(services_str, expected); // "00000a0b0c0d0e0f" + } + } +} diff --git a/zaino-fetch/src/lib.rs b/zaino-fetch/src/lib.rs index c08ea3c7c..3a0c69e63 100644 --- a/zaino-fetch/src/lib.rs +++ b/zaino-fetch/src/lib.rs @@ -1,9 +1,9 @@ -//! A mempool-fetching, chain-fetching and transaction submission service that uses zebra's RPC interface. +//! A mempool-fetching, chain-fetching and transaction submission service that uses zcashd's JsonRPC interface. //! -//! Used primarily as a backup and legacy option for backwards compatibility. +//! Usable as a backwards-compatible, legacy option. #![warn(missing_docs)] #![forbid(unsafe_code)] pub mod chain; -pub mod jsonrpc; +pub mod jsonrpsee; diff --git a/zaino-proto/CHANGELOG.md b/zaino-proto/CHANGELOG.md new file mode 100644 index 000000000..60dfd28f9 --- /dev/null +++ b/zaino-proto/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + +### Added +- `ValidatedBlockRangeRequest` type that encapsulates validations of the + `GetBlockRange` RPC request +- utils submodule to handle `PoolType` conversions +- `PoolTypeError` defines conversion errors between i32 and known `PoolType` variants +- `PoolTypeFilter` indicates which pools need to be returned in a compact block. diff --git a/zaino-proto/Cargo.toml b/zaino-proto/Cargo.toml index e34fbcb46..347f7f64b 100644 --- a/zaino-proto/Cargo.toml +++ b/zaino-proto/Cargo.toml @@ -1,18 +1,26 @@ [package] name = "zaino-proto" description = "Holds tonic files and build logic for the lightwallet and darkside RPCs." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } [dependencies] +zebra-state = { workspace = true } +zebra-chain = { workspace = true } + # Miscellaneous Workspace tonic = { workspace = true } # Miscellaneous Crate prost = { workspace = true } +tonic-prost = "0.14" [build-dependencies] -tonic-build = { workspace = true, features = ["prost"] } +tonic-build = { workspace = true } which = { workspace = true } +tonic-prost-build = "0.14" +prost-build = "0.14" \ No newline at end of file diff --git a/zaino-proto/README.md b/zaino-proto/README.md new file mode 100644 index 000000000..780175b6c --- /dev/null +++ b/zaino-proto/README.md @@ -0,0 +1,65 @@ +# Zaino Proto files module + +This module encapsulates the lightclient-protocol functionality and imports the canonicals files +using `git subtree`. + + +Below you can see the structure of the module + +```` +zaino-proto +├── build.rs +├── build.rs.bak +├── Cargo.toml +├── CHANGELOG.md +├── lightwallet-protocol <=== this is the git subtree +│   ├── CHANGELOG.md +│   ├── LICENSE +│   └── walletrpc +│   ├── compact_formats.proto +│   └── service.proto +├── proto +│   ├── compact_formats.proto -> ../lightwallet-protocol/walletrpc/compact_formats.proto +│   ├── proposal.proto +│   └── service.proto -> ../lightwallet-protocol/walletrpc/service.proto +└── src + ├── lib.rs + ├── proto + │   ├── compact_formats.rs + │   ├── proposal.rs + │   ├── service.rs + │   └── utils.rs + └── proto.rs +``` + +Handling maintaining the git subtree history has its own tricks. We recommend developers updating +zaino proto that they are wary of these shortcomings. + +If you need to update the canonical files to for your feature, maintain a linear and simple git +commit history in your PR. + +We recommend that PRs that change the reference to the git subtree do so in this fashion. + +for example: +============ + +when doing +``` +git subtree --prefix=zaino-proto/lightwallet-protocol pull git@github.com:zcash/lightwallet-protocol.git v0.4.0 --squash +``` + +your branch's commits must be sequenced like this. + +``` + your-branch-name + - commit applying the git subtree command + - commit merging the canonical files + - commits fixing compiler errors + - commit indicating the version adopted in the CHANGELOG.md of zaino-proto +``` + +If you are developing the `lightclient-protocol` and adopting it on Zaino, it is recommended that +you don't do subsequent `git subtree` to revisions and always rebase against the latest latest version +that you will be using in your latest commit to avoid rebasing issues and also keeping a coherent +git commit history for when your branch merges to `dev`. + diff --git a/zaino-proto/build.rs.bak b/zaino-proto/build.rs similarity index 92% rename from zaino-proto/build.rs.bak rename to zaino-proto/build.rs index bf72a1d83..682b025ae 100644 --- a/zaino-proto/build.rs.bak +++ b/zaino-proto/build.rs @@ -2,7 +2,7 @@ use std::env; use std::fs; use std::io; use std::path::{Path, PathBuf}; -use std::process::Command; +use tonic_prost_build::{configure, compile_protos}; const COMPACT_FORMATS_PROTO: &str = "proto/compact_formats.proto"; const PROPOSAL_PROTO: &str = "proto/proposal.proto"; @@ -28,7 +28,7 @@ fn build() -> io::Result<()> { .into(); // Build the compact format types. - tonic_build::compile_protos(COMPACT_FORMATS_PROTO)?; + compile_protos(COMPACT_FORMATS_PROTO)?; // Copy the generated types into the source tree so changes can be committed. fs::copy( @@ -37,7 +37,7 @@ fn build() -> io::Result<()> { )?; // Build the gRPC types and client. - tonic_build::configure() + configure() .build_server(true) // .client_mod_attribute( // "cash.z.wallet.sdk.rpc", @@ -67,10 +67,10 @@ fn build() -> io::Result<()> { ".cash.z.wallet.sdk.rpc.CompactOrchardAction", "crate::proto::compact_formats::CompactOrchardAction", ) - .compile(&[SERVICE_PROTO], &["proto/"])?; + .compile_protos(&[SERVICE_PROTO], &["proto/"])?; // Build the proposal types. - tonic_build::compile_protos(PROPOSAL_PROTO)?; + compile_protos(PROPOSAL_PROTO)?; // Copy the generated types into the source tree so changes can be committed. fs::copy( diff --git a/zaino-proto/lightwallet-protocol/LICENSE b/zaino-proto/lightwallet-protocol/LICENSE new file mode 100644 index 000000000..a8b65b3ce --- /dev/null +++ b/zaino-proto/lightwallet-protocol/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Electric Coin Company + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto new file mode 100644 index 000000000..c62c7acbb --- /dev/null +++ b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto @@ -0,0 +1,125 @@ +// Copyright (c) 2019-2021 The Zcash developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://www.opensource.org/licenses/mit-license.php . + +syntax = "proto3"; +package cash.z.wallet.sdk.rpc; +option go_package = "lightwalletd/walletrpc"; +option swift_prefix = ""; + +// REMINDER: proto3 fields are all optional. A field that is not present will be set to its zero/false/empty +// value. + +// Information about the state of the chain as of a given block. +message ChainMetadata { + uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block + uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block +} + +// A compact representation of a Zcash block. +// +// CompactBlock is a packaging of ONLY the data from a block that's needed to: +// 1. Detect a payment to your Shielded address +// 2. Detect a spend of your Shielded notes +// 3. Update your witnesses to generate new spend proofs. +// 4. Spend UTXOs associated to t-addresses of your wallet. +message CompactBlock { + uint32 protoVersion = 1; // the version of this wire format, for storage + uint64 height = 2; // the height of this block + bytes hash = 3; // the ID (hash) of this block, same as in block explorers + bytes prevHash = 4; // the ID (hash) of this block's predecessor + uint32 time = 5; // Unix epoch time when the block was mined + bytes header = 6; // full header (as returned by the getblock RPC) + repeated CompactTx vtx = 7; // zero or more compact transactions from this block + ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block +} + +// A compact representation of a Zcash transaction. +// +// CompactTx contains the minimum information for a wallet to know if this transaction +// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +// it can optionally include the minimum necessary data to detect payments to transparent addresses +// related to your wallet. +message CompactTx { + // The index of the transaction within the block. + uint64 index = 1; + + // The id of the transaction as defined in + // [§ 7.1.1 ‘Transaction Identifiers’](https://zips.z.cash/protocol/protocol.pdf#txnidentifiers) + // This byte array MUST be in protocol order and MUST NOT be reversed + // or hex-encoded; the byte-reversed and hex-encoded representation is + // exclusively a textual representation of a txid. + bytes txid = 2; + + // The transaction fee: present if server can provide. In the case of a + // stateless server and a transaction with transparent inputs, this will be + // unset because the calculation requires reference to prior transactions. + // If there are no transparent inputs, the fee will be calculable as: + // valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) + uint32 fee = 3; + + repeated CompactSaplingSpend spends = 4; + repeated CompactSaplingOutput outputs = 5; + repeated CompactOrchardAction actions = 6; + + // `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + // + // Note: the single null-outpoint input for coinbase transactions is omitted. Light + // clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + // represents a coinbase transaction, as the coinbase transaction is always the + // first transaction in any block. + repeated CompactTxIn vin = 7; + + // A sequence of transparent outputs being created by the transaction. + repeated TxOut vout = 8; +} + +// A compact representation of a transparent transaction input. +message CompactTxIn { + // The id of the transaction that generated the output being spent. This + // byte array must be in protocol order and MUST NOT be reversed or + // hex-encoded. + bytes prevoutTxid = 1; + + // The index of the output being spent in the `vout` array of the + // transaction referred to by `prevoutTxid`. + uint32 prevoutIndex = 2; +} + +// A transparent output being created by the transaction. +// +// This contains identical data to the `TxOut` type in the transaction itself, and +// thus it is not "compact". +message TxOut { + // The value of the output, in Zatoshis. + uint64 value = 1; + + // The script pubkey that must be satisfied in order to spend this output. + bytes scriptPubKey = 2; +} + +// A compact representation of a [Sapling Spend](https://zips.z.cash/protocol/protocol.pdf#spendencodingandconsensus). +// +// CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash +// protocol specification. +message CompactSaplingSpend { + bytes nf = 1; // Nullifier (see the Zcash protocol specification) +} + +// A compact representation of a [Sapling Output](https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus). +// +// It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the +// `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. +message CompactSaplingOutput { + bytes cmu = 1; // Note commitment u-coordinate. + bytes ephemeralKey = 2; // Ephemeral public key. + bytes ciphertext = 3; // First 52 bytes of ciphertext. +} + +// A compact representation of an [Orchard Action](https://zips.z.cash/protocol/protocol.pdf#actionencodingandconsensus). +message CompactOrchardAction { + bytes nullifier = 1; // [32] The nullifier of the input note + bytes cmx = 2; // [32] The x-coordinate of the note commitment for the output note + bytes ephemeralKey = 3; // [32] An encoding of an ephemeral Pallas public key + bytes ciphertext = 4; // [52] The first 52 bytes of the encCiphertext field +} diff --git a/zaino-proto/lightwallet-protocol/walletrpc/service.proto b/zaino-proto/lightwallet-protocol/walletrpc/service.proto new file mode 100644 index 000000000..d3dc8ba04 --- /dev/null +++ b/zaino-proto/lightwallet-protocol/walletrpc/service.proto @@ -0,0 +1,303 @@ +// Copyright (c) 2019-2020 The Zcash developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://www.opensource.org/licenses/mit-license.php . + +syntax = "proto3"; +package cash.z.wallet.sdk.rpc; +option go_package = "lightwalletd/walletrpc"; +option swift_prefix = ""; +import "compact_formats.proto"; + +// An identifier for a Zcash value pool. +enum PoolType { + POOL_TYPE_INVALID = 0; + TRANSPARENT = 1; + SAPLING = 2; + ORCHARD = 3; +} + +// A BlockID message contains identifiers to select a block: a height or a +// hash. Specification by hash is not implemented, but may be in the future. +message BlockID { + uint64 height = 1; + bytes hash = 2; +} + +// BlockRange specifies a series of blocks from start to end inclusive. +// Both BlockIDs must be heights; specification by hash is not yet supported. +// +// If no pool types are specified, the server should default to the legacy +// behavior of returning only data relevant to the shielded (Sapling and +// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +// to include only data relevant to the requested pool types. Clients MUST +// verify that the version of the server they are connected to are capable +// of returning pruned and/or transparent data before setting `poolTypes` +// to a non-empty value. +message BlockRange { + BlockID start = 1; + BlockID end = 2; + repeated PoolType poolTypes = 3; +} + +// A TxFilter contains the information needed to identify a particular +// transaction: either a block and an index, or a direct transaction hash. +// Currently, only specification by hash is supported. +message TxFilter { + BlockID block = 1; // block identifier, height or hash + uint64 index = 2; // index within the block + bytes hash = 3; // transaction ID (hash, txid) +} + +// RawTransaction contains the complete transaction data. It also optionally includes +// the block height in which the transaction was included, or, when returned +// by GetMempoolStream(), the latest block height. +// +// FIXME: the documentation here about mempool status contradicts the documentation +// for the `height` field. See https://github.com/zcash/librustzcash/issues/1484 +message RawTransaction { + // The serialized representation of the Zcash transaction. + bytes data = 1; + // The height at which the transaction is mined, or a sentinel value. + // + // Due to an error in the original protobuf definition, it is necessary to + // reinterpret the result of the `getrawtransaction` RPC call. Zcashd will + // return the int64 value `-1` for the height of transactions that appear + // in the block index, but which are not mined in the main chain. Here, the + // height field of `RawTransaction` was erroneously created as a `uint64`, + // and as such we must map the response from the zcashd RPC API to be + // representable within this space. Additionally, the `height` field will + // be absent for transactions in the mempool, resulting in the default + // value of `0` being set. Therefore, the meanings of the `height` field of + // the `RawTransaction` type are as follows: + // + // * height 0: the transaction is in the mempool + // * height 0xffffffffffffffff: the transaction has been mined on a fork that + // is not currently the main chain + // * any other height: the transaction has been mined in the main chain at the + // given height + uint64 height = 2; +} + +// A SendResponse encodes an error code and a string. It is currently used +// only by SendTransaction(). If error code is zero, the operation was +// successful; if non-zero, it and the message specify the failure. +message SendResponse { + int32 errorCode = 1; + string errorMessage = 2; +} + +// Chainspec is a placeholder to allow specification of a particular chain fork. +message ChainSpec {} + +// Empty is for gRPCs that take no arguments, currently only GetLightdInfo. +message Empty {} + +// LightdInfo returns various information about this lightwalletd instance +// and the state of the blockchain. +message LightdInfo { + string version = 1; + string vendor = 2; + bool taddrSupport = 3; // true + string chainName = 4; // either "main" or "test" + uint64 saplingActivationHeight = 5; // depends on mainnet or testnet + string consensusBranchId = 6; // protocol identifier, see consensus/upgrades.cpp + uint64 blockHeight = 7; // latest block on the best chain + string gitCommit = 8; + string branch = 9; + string buildDate = 10; + string buildUser = 11; + uint64 estimatedHeight = 12; // less than tip height if zcashd is syncing + string zcashdBuild = 13; // example: "v4.1.1-877212414" + string zcashdSubversion = 14; // example: "/MagicBean:4.1.1/" + string donationAddress = 15; // Zcash donation UA address + string upgradeName = 16; // name of next pending network upgrade, empty if none scheduled + uint64 upgradeHeight = 17; // height of next pending upgrade, zero if none is scheduled + string lightwalletProtocolVersion = 18; // version of https://github.com/zcash/lightwallet-protocol served by this server +} + +// TransparentAddressBlockFilter restricts the results of the GRPC methods that +// use it to the transactions that involve the given address and were mined in +// the specified block range. Non-default values for both the address and the +// block range must be specified. Mempool transactions are not included. +// +// The `poolTypes` field of the `range` argument should be ignored. +// Implementations MAY consider it an error if any pool types are specified. +message TransparentAddressBlockFilter { + string address = 1; // t-address + BlockRange range = 2; // start, end heights only +} + +// Duration is currently used only for testing, so that the Ping rpc +// can simulate a delay, to create many simultaneous connections. Units +// are microseconds. +message Duration { + int64 intervalUs = 1; +} + +// PingResponse is used to indicate concurrency, how many Ping rpcs +// are executing upon entry and upon exit (after the delay). +// This rpc is used for testing only. +message PingResponse { + int64 entry = 1; + int64 exit = 2; +} + +message Address { + string address = 1; +} +message AddressList { + repeated string addresses = 1; +} +message Balance { + int64 valueZat = 1; +} + +// Request parameters for the `GetMempoolTx` RPC. +message GetMempoolTxRequest { + // A list of transaction ID byte string suffixes that should be excluded + // from the response. These suffixes may be produced either directly from + // the underlying txid bytes, or, if the source values are encoded txid + // strings, by truncating the hexadecimal representation of each + // transaction ID to an even number of characters, and then hex-decoding + // and then byte-reversing this value to obtain the byte representation. + repeated bytes exclude_txid_suffixes = 1; + // We reserve field number 2 for a potential future `exclude_txid_prefixes` + // field. + reserved 2; + // The server must prune `CompactTx`s returned to include only data + // relevant to the requested pool types. If no pool types are specified, + // the server should default to the legacy behavior of returning only data + // relevant to the shielded (Sapling and Orchard) pools. + repeated PoolType poolTypes = 3; +} + +// The TreeState is derived from the Zcash z_gettreestate rpc. +message TreeState { + string network = 1; // "main" or "test" + uint64 height = 2; // block height + string hash = 3; // block id + uint32 time = 4; // Unix epoch time when the block was mined + string saplingTree = 5; // sapling commitment tree state + string orchardTree = 6; // orchard commitment tree state +} + +enum ShieldedProtocol { + sapling = 0; + orchard = 1; +} + +message GetSubtreeRootsArg { + uint32 startIndex = 1; // Index identifying where to start returning subtree roots + ShieldedProtocol shieldedProtocol = 2; // Shielded protocol to return subtree roots for + uint32 maxEntries = 3; // Maximum number of entries to return, or 0 for all entries. +} +message SubtreeRoot { + bytes rootHash = 2; // The 32-byte Merkle root of the subtree. + bytes completingBlockHash = 3; // The hash of the block that completed this subtree. + uint64 completingBlockHeight = 4; // The height of the block that completed this subtree in the main chain. +} + +// Results are sorted by height, which makes it easy to issue another +// request that picks up from where the previous left off. +message GetAddressUtxosArg { + repeated string addresses = 1; + uint64 startHeight = 2; + uint32 maxEntries = 3; // zero means unlimited +} +message GetAddressUtxosReply { + string address = 6; + bytes txid = 1; + int32 index = 2; + bytes script = 3; + int64 valueZat = 4; + uint64 height = 5; +} +message GetAddressUtxosReplyList { + repeated GetAddressUtxosReply addressUtxos = 1; +} + +service CompactTxStreamer { + // Return the BlockID of the block at the tip of the best chain + rpc GetLatestBlock(ChainSpec) returns (BlockID) {} + + // Return the compact block corresponding to the given block identifier + rpc GetBlock(BlockID) returns (CompactBlock) {} + + // Same as GetBlock except the returned CompactBlock value contains only + // nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. + rpc GetBlockNullifiers(BlockID) returns (CompactBlock) {} + + // Return a list of consecutive compact blocks in the specified range, + // which is inclusive of `range.end`. + // + // If range.start <= range.end, blocks are returned increasing height order; + // otherwise blocks are returned in decreasing height order. + rpc GetBlockRange(BlockRange) returns (stream CompactBlock) {} + + // Same as GetBlockRange except the returned CompactBlock values contain + // only nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. + rpc GetBlockRangeNullifiers(BlockRange) returns (stream CompactBlock) {} + + // Return the requested full (not compact) transaction (as from zcashd) + rpc GetTransaction(TxFilter) returns (RawTransaction) {} + + // Submit the given transaction to the Zcash network + rpc SendTransaction(RawTransaction) returns (SendResponse) {} + + // Return RawTransactions that match the given transparent address filter. + // + // Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + // NOTE: this method is deprecated, please use GetTaddressTransactions instead. + rpc GetTaddressTxids(TransparentAddressBlockFilter) returns (stream RawTransaction) {} + + // Return the transactions corresponding to the given t-address within the given block range. + // Mempool transactions are not included in the results. + rpc GetTaddressTransactions(TransparentAddressBlockFilter) returns (stream RawTransaction) {} + + rpc GetTaddressBalance(AddressList) returns (Balance) {} + rpc GetTaddressBalanceStream(stream Address) returns (Balance) {} + + // Returns a stream of the compact transaction representation for transactions + // currently in the mempool. The results of this operation may be a few + // seconds out of date. If the `exclude_txid_suffixes` list is empty, + // return all transactions; otherwise return all *except* those in the + // `exclude_txid_suffixes` list (if any); this allows the client to avoid + // receiving transactions that it already has (from an earlier call to this + // RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + // shortened to any number of bytes to make the request more + // bandwidth-efficient; if two or more transactions in the mempool match a + // txid suffix, none of the matching transactions are excluded. Txid + // suffixes in the exclude list that don't match any transactions in the + // mempool are ignored. + rpc GetMempoolTx(GetMempoolTxRequest) returns (stream CompactTx) {} + + // Return a stream of current Mempool transactions. This will keep the output stream open while + // there are mempool transactions. It will close the returned stream when a new block is mined. + rpc GetMempoolStream(Empty) returns (stream RawTransaction) {} + + // GetTreeState returns the note commitment tree state corresponding to the given block. + // See section 3.7 of the Zcash protocol specification. It returns several other useful + // values also (even though they can be obtained using GetBlock). + // The block can be specified by either height or hash. + rpc GetTreeState(BlockID) returns (TreeState) {} + rpc GetLatestTreeState(Empty) returns (TreeState) {} + + // Returns a stream of information about roots of subtrees of the note commitment tree + // for the specified shielded protocol (Sapling or Orchard). + rpc GetSubtreeRoots(GetSubtreeRootsArg) returns (stream SubtreeRoot) {} + + rpc GetAddressUtxos(GetAddressUtxosArg) returns (GetAddressUtxosReplyList) {} + rpc GetAddressUtxosStream(GetAddressUtxosArg) returns (stream GetAddressUtxosReply) {} + + // Return information about this lightwalletd instance and the blockchain + rpc GetLightdInfo(Empty) returns (LightdInfo) {} + + // Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) + rpc Ping(Duration) returns (PingResponse) {} +} diff --git a/zaino-proto/proto/compact_formats.proto b/zaino-proto/proto/compact_formats.proto deleted file mode 100644 index e39e5225d..000000000 --- a/zaino-proto/proto/compact_formats.proto +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2019-2021 The Zcash developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or https://www.opensource.org/licenses/mit-license.php . - -syntax = "proto3"; -package cash.z.wallet.sdk.rpc; -option go_package = "walletrpc"; -option swift_prefix = ""; - -// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. -// bytes fields of hashes are in canonical little-endian format. - -// Information about the state of the chain as of a given block. -message ChainMetadata { - uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block - uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block -} - -// A compact representation of the shielded data in a Zcash block. -// -// CompactBlock is a packaging of ONLY the data from a block that's needed to: -// 1. Detect a payment to your shielded Sapling address -// 2. Detect a spend of your shielded Sapling notes -// 3. Update your witnesses to generate new Sapling spend proofs. -message CompactBlock { - uint32 protoVersion = 1; // the version of this wire format, for storage - uint64 height = 2; // the height of this block - bytes hash = 3; // the ID (hash) of this block, same as in block explorers - bytes prevHash = 4; // the ID (hash) of this block's predecessor - uint32 time = 5; // Unix epoch time when the block was mined - bytes header = 6; // (hash, prevHash, and time) OR (full header) - repeated CompactTx vtx = 7; // zero or more compact transactions from this block - ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block -} - -// A compact representation of the shielded data in a Zcash transaction. -// -// CompactTx contains the minimum information for a wallet to know if this transaction -// is relevant to it (either pays to it or spends from it) via shielded elements -// only. This message will not encode a transparent-to-transparent transaction. -message CompactTx { - // Index and hash will allow the receiver to call out to chain - // explorers or other data structures to retrieve more information - // about this transaction. - uint64 index = 1; // the index within the full block - bytes hash = 2; // the ID (hash) of this transaction, same as in block explorers - - // The transaction fee: present if server can provide. In the case of a - // stateless server and a transaction with transparent inputs, this will be - // unset because the calculation requires reference to prior transactions. - // If there are no transparent inputs, the fee will be calculable as: - // valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) - uint32 fee = 3; - - repeated CompactSaplingSpend spends = 4; - repeated CompactSaplingOutput outputs = 5; - repeated CompactOrchardAction actions = 6; -} - -// A compact representation of a [Sapling Spend](https://zips.z.cash/protocol/protocol.pdf#spendencodingandconsensus). -// -// CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash -// protocol specification. -message CompactSaplingSpend { - bytes nf = 1; // Nullifier (see the Zcash protocol specification) -} - -// A compact representation of a [Sapling Output](https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus). -// -// It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the -// `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. -message CompactSaplingOutput { - bytes cmu = 1; // Note commitment u-coordinate. - bytes ephemeralKey = 2; // Ephemeral public key. - bytes ciphertext = 3; // First 52 bytes of ciphertext. -} - -// A compact representation of an [Orchard Action](https://zips.z.cash/protocol/protocol.pdf#actionencodingandconsensus). -message CompactOrchardAction { - bytes nullifier = 1; // [32] The nullifier of the input note - bytes cmx = 2; // [32] The x-coordinate of the note commitment for the output note - bytes ephemeralKey = 3; // [32] An encoding of an ephemeral Pallas public key - bytes ciphertext = 4; // [52] The first 52 bytes of the encCiphertext field -} diff --git a/zaino-proto/proto/compact_formats.proto b/zaino-proto/proto/compact_formats.proto new file mode 120000 index 000000000..6f7c5efb1 --- /dev/null +++ b/zaino-proto/proto/compact_formats.proto @@ -0,0 +1 @@ +../lightwallet-protocol/walletrpc/compact_formats.proto \ No newline at end of file diff --git a/zaino-proto/proto/service.proto b/zaino-proto/proto/service.proto deleted file mode 100644 index 094566147..000000000 --- a/zaino-proto/proto/service.proto +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2019-2020 The Zcash developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or https://www.opensource.org/licenses/mit-license.php . - -syntax = "proto3"; -package cash.z.wallet.sdk.rpc; -option go_package = "lightwalletd/walletrpc"; -option swift_prefix = ""; -import "compact_formats.proto"; - -// A BlockID message contains identifiers to select a block: a height or a -// hash. Specification by hash is not implemented, but may be in the future. -message BlockID { - uint64 height = 1; - bytes hash = 2; -} - -// BlockRange specifies a series of blocks from start to end inclusive. -// Both BlockIDs must be heights; specification by hash is not yet supported. -message BlockRange { - BlockID start = 1; - BlockID end = 2; -} - -// A TxFilter contains the information needed to identify a particular -// transaction: either a block and an index, or a direct transaction hash. -// Currently, only specification by hash is supported. -message TxFilter { - BlockID block = 1; // block identifier, height or hash - uint64 index = 2; // index within the block - bytes hash = 3; // transaction ID (hash, txid) -} - -// RawTransaction contains the complete transaction data. It also optionally includes -// the block height in which the transaction was included, or, when returned -// by GetMempoolStream(), the latest block height. -message RawTransaction { - bytes data = 1; // exact data returned by Zcash 'getrawtransaction' - uint64 height = 2; // height that the transaction was mined (or -1) -} - -// A SendResponse encodes an error code and a string. It is currently used -// only by SendTransaction(). If error code is zero, the operation was -// successful; if non-zero, it and the message specify the failure. -message SendResponse { - int32 errorCode = 1; - string errorMessage = 2; -} - -// Chainspec is a placeholder to allow specification of a particular chain fork. -message ChainSpec {} - -// Empty is for gRPCs that take no arguments, currently only GetLightdInfo. -message Empty {} - -// LightdInfo returns various information about this lightwalletd instance -// and the state of the blockchain. -message LightdInfo { - string version = 1; - string vendor = 2; - bool taddrSupport = 3; // true - string chainName = 4; // either "main" or "test" - uint64 saplingActivationHeight = 5; // depends on mainnet or testnet - string consensusBranchId = 6; // protocol identifier, see consensus/upgrades.cpp - uint64 blockHeight = 7; // latest block on the best chain - string gitCommit = 8; - string branch = 9; - string buildDate = 10; - string buildUser = 11; - uint64 estimatedHeight = 12; // less than tip height if zcashd is syncing - string zcashdBuild = 13; // example: "v4.1.1-877212414" - string zcashdSubversion = 14; // example: "/MagicBean:4.1.1/" -} - -// TransparentAddressBlockFilter restricts the results to the given address -// or block range. -message TransparentAddressBlockFilter { - string address = 1; // t-address - BlockRange range = 2; // start, end heights -} - -// Duration is currently used only for testing, so that the Ping rpc -// can simulate a delay, to create many simultaneous connections. Units -// are microseconds. -message Duration { - int64 intervalUs = 1; -} - -// PingResponse is used to indicate concurrency, how many Ping rpcs -// are executing upon entry and upon exit (after the delay). -// This rpc is used for testing only. -message PingResponse { - int64 entry = 1; - int64 exit = 2; -} - -message Address { - string address = 1; -} -message AddressList { - repeated string addresses = 1; -} -message Balance { - int64 valueZat = 1; -} - -message Exclude { - repeated bytes txid = 1; -} - -// The TreeState is derived from the Zcash z_gettreestate rpc. -message TreeState { - string network = 1; // "main" or "test" - uint64 height = 2; // block height - string hash = 3; // block id - uint32 time = 4; // Unix epoch time when the block was mined - string saplingTree = 5; // sapling commitment tree state - string orchardTree = 6; // orchard commitment tree state -} - -enum ShieldedProtocol { - sapling = 0; - orchard = 1; -} - -message GetSubtreeRootsArg { - uint32 startIndex = 1; // Index identifying where to start returning subtree roots - ShieldedProtocol shieldedProtocol = 2; // Shielded protocol to return subtree roots for - uint32 maxEntries = 3; // Maximum number of entries to return, or 0 for all entries. -} -message SubtreeRoot { - bytes rootHash = 2; // The 32-byte Merkle root of the subtree. - bytes completingBlockHash = 3; // The hash of the block that completed this subtree. - uint64 completingBlockHeight = 4; // The height of the block that completed this subtree in the main chain. -} - -// Results are sorted by height, which makes it easy to issue another -// request that picks up from where the previous left off. -message GetAddressUtxosArg { - repeated string addresses = 1; - uint64 startHeight = 2; - uint32 maxEntries = 3; // zero means unlimited -} -message GetAddressUtxosReply { - string address = 6; - bytes txid = 1; - int32 index = 2; - bytes script = 3; - int64 valueZat = 4; - uint64 height = 5; -} -message GetAddressUtxosReplyList { - repeated GetAddressUtxosReply addressUtxos = 1; -} - -service CompactTxStreamer { - // Return the height of the tip of the best chain - rpc GetLatestBlock(ChainSpec) returns (BlockID) {} - // Return the compact block corresponding to the given block identifier - rpc GetBlock(BlockID) returns (CompactBlock) {} - // Same as GetBlock except actions contain only nullifiers - rpc GetBlockNullifiers(BlockID) returns (CompactBlock) {} - // Return a list of consecutive compact blocks - rpc GetBlockRange(BlockRange) returns (stream CompactBlock) {} - // Same as GetBlockRange except actions contain only nullifiers - rpc GetBlockRangeNullifiers(BlockRange) returns (stream CompactBlock) {} - - // Return the requested full (not compact) transaction (as from zcashd) - rpc GetTransaction(TxFilter) returns (RawTransaction) {} - // Submit the given transaction to the Zcash network - rpc SendTransaction(RawTransaction) returns (SendResponse) {} - - // Return the txids corresponding to the given t-address within the given block range - rpc GetTaddressTxids(TransparentAddressBlockFilter) returns (stream RawTransaction) {} - rpc GetTaddressBalance(AddressList) returns (Balance) {} - rpc GetTaddressBalanceStream(stream Address) returns (Balance) {} - - // Return the compact transactions currently in the mempool; the results - // can be a few seconds out of date. If the Exclude list is empty, return - // all transactions; otherwise return all *except* those in the Exclude list - // (if any); this allows the client to avoid receiving transactions that it - // already has (from an earlier call to this rpc). The transaction IDs in the - // Exclude list can be shortened to any number of bytes to make the request - // more bandwidth-efficient; if two or more transactions in the mempool - // match a shortened txid, they are all sent (none is excluded). Transactions - // in the exclude list that don't exist in the mempool are ignored. - rpc GetMempoolTx(Exclude) returns (stream CompactTx) {} - - // Return a stream of current Mempool transactions. This will keep the output stream open while - // there are mempool transactions. It will close the returned stream when a new block is mined. - rpc GetMempoolStream(Empty) returns (stream RawTransaction) {} - - // GetTreeState returns the note commitment tree state corresponding to the given block. - // See section 3.7 of the Zcash protocol specification. It returns several other useful - // values also (even though they can be obtained using GetBlock). - // The block can be specified by either height or hash. - rpc GetTreeState(BlockID) returns (TreeState) {} - rpc GetLatestTreeState(Empty) returns (TreeState) {} - - // Returns a stream of information about roots of subtrees of the Sapling and Orchard - // note commitment trees. - rpc GetSubtreeRoots(GetSubtreeRootsArg) returns (stream SubtreeRoot) {} - - rpc GetAddressUtxos(GetAddressUtxosArg) returns (GetAddressUtxosReplyList) {} - rpc GetAddressUtxosStream(GetAddressUtxosArg) returns (stream GetAddressUtxosReply) {} - - // Return information about this lightwalletd instance and the blockchain - rpc GetLightdInfo(Empty) returns (LightdInfo) {} - // Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) - rpc Ping(Duration) returns (PingResponse) {} -} diff --git a/zaino-proto/proto/service.proto b/zaino-proto/proto/service.proto new file mode 120000 index 000000000..431df7923 --- /dev/null +++ b/zaino-proto/proto/service.proto @@ -0,0 +1 @@ +../lightwallet-protocol/walletrpc/service.proto \ No newline at end of file diff --git a/zaino-proto/src/proto.rs b/zaino-proto/src/proto.rs index 7e04b9499..888984b3b 100644 --- a/zaino-proto/src/proto.rs +++ b/zaino-proto/src/proto.rs @@ -1,5 +1,12 @@ //! Holds tonic generated code for the lightwallet service RPCs and compact formats. +#[allow(clippy::all)] +#[rustfmt::skip] pub mod compact_formats; +#[allow(clippy::all)] +#[rustfmt::skip] pub mod proposal; +#[allow(clippy::all)] +#[rustfmt::skip] pub mod service; +pub mod utils; diff --git a/zaino-proto/src/proto/compact_formats.rs b/zaino-proto/src/proto/compact_formats.rs index 44455378f..2de2751ed 100644 --- a/zaino-proto/src/proto/compact_formats.rs +++ b/zaino-proto/src/proto/compact_formats.rs @@ -1,6 +1,6 @@ +// This file is @generated by prost-build. /// Information about the state of the chain as of a given block. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct ChainMetadata { /// the size of the Sapling note commitment tree as of the end of this block #[prost(uint32, tag = "1")] @@ -9,13 +9,14 @@ pub struct ChainMetadata { #[prost(uint32, tag = "2")] pub orchard_commitment_tree_size: u32, } -/// A compact representation of the shielded data in a Zcash block. +/// A compact representation of a Zcash block. /// /// CompactBlock is a packaging of ONLY the data from a block that's needed to: -/// 1. Detect a payment to your shielded Sapling address -/// 2. Detect a spend of your shielded Sapling notes -/// 3. Update your witnesses to generate new Sapling spend proofs. -#[allow(clippy::derive_partial_eq_without_eq)] +/// +/// 1. Detect a payment to your Shielded address +/// 1. Detect a spend of your Shielded notes +/// 1. Update your witnesses to generate new spend proofs. +/// 1. Spend UTXOs associated to t-addresses of your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactBlock { /// the version of this wire format, for storage @@ -33,7 +34,7 @@ pub struct CompactBlock { /// Unix epoch time when the block was mined #[prost(uint32, tag = "5")] pub time: u32, - /// (hash, prevHash, and time) OR (full header) + /// full header (as returned by the getblock RPC) #[prost(bytes = "vec", tag = "6")] pub header: ::prost::alloc::vec::Vec, /// zero or more compact transactions from this block @@ -43,29 +44,29 @@ pub struct CompactBlock { #[prost(message, optional, tag = "8")] pub chain_metadata: ::core::option::Option, } -/// A compact representation of the shielded data in a Zcash transaction. +/// A compact representation of a Zcash transaction. /// /// CompactTx contains the minimum information for a wallet to know if this transaction -/// is relevant to it (either pays to it or spends from it) via shielded elements -/// only. This message will not encode a transparent-to-transparent transaction. -#[allow(clippy::derive_partial_eq_without_eq)] +/// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +/// it can optionally include the minimum necessary data to detect payments to transparent addresses +/// related to your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactTx { - /// Index and hash will allow the receiver to call out to chain - /// explorers or other data structures to retrieve more information - /// about this transaction. - /// - /// the index within the full block + /// The index of the transaction within the block. #[prost(uint64, tag = "1")] pub index: u64, - /// the ID (hash) of this transaction, same as in block explorers + /// The id of the transaction as defined in + /// [§ 7.1.1 ‘Transaction Identifiers’]() + /// This byte array MUST be in protocol order and MUST NOT be reversed + /// or hex-encoded; the byte-reversed and hex-encoded representation is + /// exclusively a textual representation of a txid. #[prost(bytes = "vec", tag = "2")] - pub hash: ::prost::alloc::vec::Vec, + pub txid: ::prost::alloc::vec::Vec, /// The transaction fee: present if server can provide. In the case of a /// stateless server and a transaction with transparent inputs, this will be /// unset because the calculation requires reference to prior transactions. /// If there are no transparent inputs, the fee will be calculable as: - /// valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) + /// valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) #[prost(uint32, tag = "3")] pub fee: u32, #[prost(message, repeated, tag = "4")] @@ -74,13 +75,49 @@ pub struct CompactTx { pub outputs: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, + /// `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + /// + /// Note: the single null-outpoint input for coinbase transactions is omitted. Light + /// clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + /// represents a coinbase transaction, as the coinbase transaction is always the + /// first transaction in any block. + #[prost(message, repeated, tag = "7")] + pub vin: ::prost::alloc::vec::Vec, + /// A sequence of transparent outputs being created by the transaction. + #[prost(message, repeated, tag = "8")] + pub vout: ::prost::alloc::vec::Vec, +} +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, } /// A compact representation of a [Sapling Spend](). /// /// CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash /// protocol specification. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct CompactSaplingSpend { /// Nullifier (see the Zcash protocol specification) #[prost(bytes = "vec", tag = "1")] @@ -90,8 +127,7 @@ pub struct CompactSaplingSpend { /// /// It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the /// `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct CompactSaplingOutput { /// Note commitment u-coordinate. #[prost(bytes = "vec", tag = "1")] @@ -104,8 +140,7 @@ pub struct CompactSaplingOutput { pub ciphertext: ::prost::alloc::vec::Vec, } /// A compact representation of an [Orchard Action](). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct CompactOrchardAction { /// \[32\] The nullifier of the input note #[prost(bytes = "vec", tag = "1")] diff --git a/zaino-proto/src/proto/proposal.rs b/zaino-proto/src/proto/proposal.rs index 1ea321afc..499a0b0ba 100644 --- a/zaino-proto/src/proto/proposal.rs +++ b/zaino-proto/src/proto/proposal.rs @@ -1,5 +1,5 @@ +// This file is @generated by prost-build. /// A data structure that describes a series of transactions to be created. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Proposal { /// The version of this serialization format. @@ -20,7 +20,6 @@ pub struct Proposal { } /// A data structure that describes the inputs to be consumed and outputs to /// be produced in a proposed transaction. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProposalStep { /// ZIP 321 serialized transaction request @@ -50,8 +49,7 @@ pub struct ProposalStep { /// A mapping from ZIP 321 payment index to the output pool that has been chosen /// for that payment, based upon the payment address and the selected inputs to /// the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct PaymentOutputPool { #[prost(uint32, tag = "1")] pub payment_index: u32, @@ -60,8 +58,7 @@ pub struct PaymentOutputPool { } /// The unique identifier and value for each proposed input that does not /// require a back-reference to a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ReceivedOutput { #[prost(bytes = "vec", tag = "1")] pub txid: ::prost::alloc::vec::Vec, @@ -74,8 +71,7 @@ pub struct ReceivedOutput { } /// A reference to a payment in a prior step of the proposal. This payment must /// belong to the wallet. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct PriorStepOutput { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -83,8 +79,7 @@ pub struct PriorStepOutput { pub payment_index: u32, } /// A reference to a change or ephemeral output from a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct PriorStepChange { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -92,16 +87,14 @@ pub struct PriorStepChange { pub change_index: u32, } /// The unique identifier and value for an input to be used in the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProposedInput { #[prost(oneof = "proposed_input::Value", tags = "1, 2, 3")] pub value: ::core::option::Option, } /// Nested message and enum types in `ProposedInput`. pub mod proposed_input { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)] pub enum Value { #[prost(message, tag = "1")] ReceivedOutput(super::ReceivedOutput), @@ -112,7 +105,6 @@ pub mod proposed_input { } } /// The proposed change outputs and fee value. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionBalance { /// A list of change or ephemeral output values. @@ -129,8 +121,7 @@ pub struct TransactionBalance { /// an ephemeral output, which must be spent by a subsequent step. This is /// only supported for transparent outputs. Each ephemeral output will be /// given a unique t-address. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ChangeValue { /// The value of a change or ephemeral output to be created, in zatoshis. #[prost(uint64, tag = "1")] @@ -148,8 +139,7 @@ pub struct ChangeValue { } /// An object wrapper for memo bytes, to facilitate representing the /// `change_memo == None` case. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct MemoBytes { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, @@ -176,10 +166,10 @@ impl ValuePool { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ValuePool::PoolNotSpecified => "PoolNotSpecified", - ValuePool::Transparent => "Transparent", - ValuePool::Sapling => "Sapling", - ValuePool::Orchard => "Orchard", + Self::PoolNotSpecified => "PoolNotSpecified", + Self::Transparent => "Transparent", + Self::Sapling => "Sapling", + Self::Orchard => "Orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -216,10 +206,10 @@ impl FeeRule { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - FeeRule::NotSpecified => "FeeRuleNotSpecified", - FeeRule::PreZip313 => "PreZip313", - FeeRule::Zip313 => "Zip313", - FeeRule::Zip317 => "Zip317", + Self::NotSpecified => "FeeRuleNotSpecified", + Self::PreZip313 => "PreZip313", + Self::Zip313 => "Zip313", + Self::Zip317 => "Zip317", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index 2a8dc3e65..559d2efbe 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -1,7 +1,33 @@ +// This file is @generated by prost-build. +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, +} /// A BlockID message contains identifiers to select a block: a height or a /// hash. Specification by hash is not implemented, but may be in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct BlockId { #[prost(uint64, tag = "1")] pub height: u64, @@ -10,19 +36,27 @@ pub struct BlockId { } /// BlockRange specifies a series of blocks from start to end inclusive. /// Both BlockIDs must be heights; specification by hash is not yet supported. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +/// +/// If no pool types are specified, the server should default to the legacy +/// behavior of returning only data relevant to the shielded (Sapling and +/// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +/// to include only data relevant to the requested pool types. Clients MUST +/// verify that the version of the server they are connected to are capable +/// of returning pruned and/or transparent data before setting `poolTypes` +/// to a non-empty value. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct BlockRange { #[prost(message, optional, tag = "1")] pub start: ::core::option::Option, #[prost(message, optional, tag = "2")] pub end: ::core::option::Option, + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// A TxFilter contains the information needed to identify a particular /// transaction: either a block and an index, or a direct transaction hash. /// Currently, only specification by hash is supported. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct TxFilter { /// block identifier, height or hash #[prost(message, optional, tag = "1")] @@ -37,21 +71,39 @@ pub struct TxFilter { /// RawTransaction contains the complete transaction data. It also optionally includes /// the block height in which the transaction was included, or, when returned /// by GetMempoolStream(), the latest block height. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +/// +/// FIXME: the documentation here about mempool status contradicts the documentation +/// for the `height` field. See +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct RawTransaction { - /// exact data returned by Zcash 'getrawtransaction' + /// The serialized representation of the Zcash transaction. #[prost(bytes = "vec", tag = "1")] pub data: ::prost::alloc::vec::Vec, - /// height that the transaction was mined (or -1) + /// The height at which the transaction is mined, or a sentinel value. + /// + /// Due to an error in the original protobuf definition, it is necessary to + /// reinterpret the result of the `getrawtransaction` RPC call. Zcashd will + /// return the int64 value `-1` for the height of transactions that appear + /// in the block index, but which are not mined in the main chain. Here, the + /// height field of `RawTransaction` was erroneously created as a `uint64`, + /// and as such we must map the response from the zcashd RPC API to be + /// representable within this space. Additionally, the `height` field will + /// be absent for transactions in the mempool, resulting in the default + /// value of `0` being set. Therefore, the meanings of the `height` field of + /// the `RawTransaction` type are as follows: + /// + /// * height 0: the transaction is in the mempool + /// * height 0xffffffffffffffff: the transaction has been mined on a fork that + /// is not currently the main chain + /// * any other height: the transaction has been mined in the main chain at the + /// given height #[prost(uint64, tag = "2")] pub height: u64, } /// A SendResponse encodes an error code and a string. It is currently used /// only by SendTransaction(). If error code is zero, the operation was /// successful; if non-zero, it and the message specify the failure. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SendResponse { #[prost(int32, tag = "1")] pub error_code: i32, @@ -59,17 +111,14 @@ pub struct SendResponse { pub error_message: ::prost::alloc::string::String, } /// Chainspec is a placeholder to allow specification of a particular chain fork. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct ChainSpec {} /// Empty is for gRPCs that take no arguments, currently only GetLightdInfo. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct Empty {} /// LightdInfo returns various information about this lightwalletd instance /// and the state of the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct LightdInfo { #[prost(string, tag = "1")] pub version: ::prost::alloc::string::String, @@ -107,24 +156,39 @@ pub struct LightdInfo { /// example: "/MagicBean:4.1.1/" #[prost(string, tag = "14")] pub zcashd_subversion: ::prost::alloc::string::String, + /// Zcash donation UA address + #[prost(string, tag = "15")] + pub donation_address: ::prost::alloc::string::String, + /// name of next pending network upgrade, empty if none scheduled + #[prost(string, tag = "16")] + pub upgrade_name: ::prost::alloc::string::String, + /// height of next pending upgrade, zero if none is scheduled + #[prost(uint64, tag = "17")] + pub upgrade_height: u64, + /// version of served by this server + #[prost(string, tag = "18")] + pub lightwallet_protocol_version: ::prost::alloc::string::String, } -/// TransparentAddressBlockFilter restricts the results to the given address -/// or block range. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +/// TransparentAddressBlockFilter restricts the results of the GRPC methods that +/// use it to the transactions that involve the given address and were mined in +/// the specified block range. Non-default values for both the address and the +/// block range must be specified. Mempool transactions are not included. +/// +/// The `poolTypes` field of the `range` argument should be ignored. +/// Implementations MAY consider it an error if any pool types are specified. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct TransparentAddressBlockFilter { /// t-address #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, - /// start, end heights + /// start, end heights only #[prost(message, optional, tag = "2")] pub range: ::core::option::Option, } /// Duration is currently used only for testing, so that the Ping rpc /// can simulate a delay, to create many simultaneous connections. Units /// are microseconds. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct Duration { #[prost(int64, tag = "1")] pub interval_us: i64, @@ -132,41 +196,48 @@ pub struct Duration { /// PingResponse is used to indicate concurrency, how many Ping rpcs /// are executing upon entry and upon exit (after the delay). /// This rpc is used for testing only. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct PingResponse { #[prost(int64, tag = "1")] pub entry: i64, #[prost(int64, tag = "2")] pub exit: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Address { #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct AddressList { #[prost(string, repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct Balance { #[prost(int64, tag = "1")] pub value_zat: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Exclude { +/// Request parameters for the `GetMempoolTx` RPC. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetMempoolTxRequest { + /// A list of transaction ID byte string suffixes that should be excluded + /// from the response. These suffixes may be produced either directly from + /// the underlying txid bytes, or, if the source values are encoded txid + /// strings, by truncating the hexadecimal representation of each + /// transaction ID to an even number of characters, and then hex-decoding + /// and then byte-reversing this value to obtain the byte representation. #[prost(bytes = "vec", repeated, tag = "1")] - pub txid: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + pub exclude_txid_suffixes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// The server must prune `CompactTx`s returned to include only data + /// relevant to the requested pool types. If no pool types are specified, + /// the server should default to the legacy behavior of returning only data + /// relevant to the shielded (Sapling and Orchard) pools. + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// The TreeState is derived from the Zcash z_gettreestate rpc. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct TreeState { /// "main" or "test" #[prost(string, tag = "1")] @@ -187,8 +258,7 @@ pub struct TreeState { #[prost(string, tag = "6")] pub orchard_tree: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetSubtreeRootsArg { /// Index identifying where to start returning subtree roots #[prost(uint32, tag = "1")] @@ -200,8 +270,7 @@ pub struct GetSubtreeRootsArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SubtreeRoot { /// The 32-byte Merkle root of the subtree. #[prost(bytes = "vec", tag = "2")] @@ -215,8 +284,7 @@ pub struct SubtreeRoot { } /// Results are sorted by height, which makes it easy to issue another /// request that picks up from where the previous left off. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetAddressUtxosArg { #[prost(string, repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, @@ -226,8 +294,7 @@ pub struct GetAddressUtxosArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetAddressUtxosReply { #[prost(string, tag = "6")] pub address: ::prost::alloc::string::String, @@ -242,12 +309,44 @@ pub struct GetAddressUtxosReply { #[prost(uint64, tag = "5")] pub height: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosReplyList { #[prost(message, repeated, tag = "1")] pub address_utxos: ::prost::alloc::vec::Vec, } +/// An identifier for a Zcash value pool. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PoolType { + Invalid = 0, + Transparent = 1, + Sapling = 2, + Orchard = 3, +} +impl PoolType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Invalid => "POOL_TYPE_INVALID", + Self::Transparent => "TRANSPARENT", + Self::Sapling => "SAPLING", + Self::Orchard => "ORCHARD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "POOL_TYPE_INVALID" => Some(Self::Invalid), + "TRANSPARENT" => Some(Self::Transparent), + "SAPLING" => Some(Self::Sapling), + "ORCHARD" => Some(Self::Orchard), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ShieldedProtocol { @@ -261,8 +360,8 @@ impl ShieldedProtocol { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ShieldedProtocol::Sapling => "sapling", - ShieldedProtocol::Orchard => "orchard", + Self::Sapling => "sapling", + Self::Orchard => "orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -276,7 +375,13 @@ impl ShieldedProtocol { } /// Generated client implementations. pub mod compact_tx_streamer_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] @@ -296,10 +401,10 @@ pub mod compact_tx_streamer_client { } impl CompactTxStreamerClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -317,14 +422,14 @@ pub mod compact_tx_streamer_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -359,7 +464,7 @@ pub mod compact_tx_streamer_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// Return the height of the tip of the best chain + /// Return the BlockID of the block at the tip of the best chain pub async fn get_latest_block( &mut self, request: impl tonic::IntoRequest, @@ -368,12 +473,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); @@ -399,12 +503,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); @@ -418,7 +521,11 @@ pub mod compact_tx_streamer_client { ); self.inner.unary(req, path, codec).await } - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_nullifiers( &mut self, request: impl tonic::IntoRequest, @@ -430,12 +537,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); @@ -449,7 +555,11 @@ pub mod compact_tx_streamer_client { ); self.inner.unary(req, path, codec).await } - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start \<= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. pub async fn get_block_range( &mut self, request: impl tonic::IntoRequest, @@ -463,12 +573,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); @@ -482,7 +591,11 @@ pub mod compact_tx_streamer_client { ); self.inner.server_streaming(req, path, codec).await } - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_range_nullifiers( &mut self, request: impl tonic::IntoRequest, @@ -496,12 +609,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); @@ -524,12 +636,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); @@ -552,12 +663,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); @@ -571,7 +681,10 @@ pub mod compact_tx_streamer_client { ); self.inner.unary(req, path, codec).await } - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. pub async fn get_taddress_txids( &mut self, request: impl tonic::IntoRequest, @@ -583,12 +696,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); @@ -602,6 +714,37 @@ pub mod compact_tx_streamer_client { ); self.inner.server_streaming(req, path, codec).await } + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + pub async fn get_taddress_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + ), + ); + self.inner.server_streaming(req, path, codec).await + } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, @@ -610,12 +753,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); @@ -637,12 +779,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); @@ -656,18 +797,21 @@ pub mod compact_tx_streamer_client { ); self.inner.client_streaming(req, path, codec).await } - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. pub async fn get_mempool_tx( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response< tonic::codec::Streaming, @@ -678,12 +822,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); @@ -710,12 +853,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); @@ -741,12 +883,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); @@ -768,12 +909,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); @@ -787,8 +927,8 @@ pub mod compact_tx_streamer_client { ); self.inner.unary(req, path, codec).await } - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). pub async fn get_subtree_roots( &mut self, request: impl tonic::IntoRequest, @@ -800,12 +940,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); @@ -830,12 +969,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); @@ -860,12 +998,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); @@ -888,12 +1025,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); @@ -916,12 +1052,11 @@ pub mod compact_tx_streamer_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); @@ -936,12 +1071,18 @@ pub mod compact_tx_streamer_client { } /// Generated server implementations. pub mod compact_tx_streamer_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. #[async_trait] - pub trait CompactTxStreamer: Send + Sync + 'static { - /// Return the height of the tip of the best chain + pub trait CompactTxStreamer: std::marker::Send + std::marker::Sync + 'static { + /// Return the BlockID of the block at the tip of the best chain async fn get_latest_block( &self, request: tonic::Request, @@ -954,7 +1095,11 @@ pub mod compact_tx_streamer_server { tonic::Response, tonic::Status, >; - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_nullifiers( &self, request: tonic::Request, @@ -969,9 +1114,13 @@ pub mod compact_tx_streamer_server { tonic::Status, >, > - + Send + + std::marker::Send + 'static; - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start \<= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. async fn get_block_range( &self, request: tonic::Request, @@ -986,9 +1135,13 @@ pub mod compact_tx_streamer_server { tonic::Status, >, > - + Send + + std::marker::Send + 'static; - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_range_nullifiers( &self, request: tonic::Request, @@ -1010,9 +1163,12 @@ pub mod compact_tx_streamer_server { type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. async fn get_taddress_txids( &self, request: tonic::Request, @@ -1020,6 +1176,21 @@ pub mod compact_tx_streamer_server { tonic::Response, tonic::Status, >; + /// Server streaming response type for the GetTaddressTransactions method. + type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + async fn get_taddress_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_taddress_balance( &self, request: tonic::Request, @@ -1035,20 +1206,23 @@ pub mod compact_tx_streamer_server { tonic::Status, >, > - + Send + + std::marker::Send + 'static; - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. async fn get_mempool_tx( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, @@ -1057,7 +1231,7 @@ pub mod compact_tx_streamer_server { type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. @@ -1084,10 +1258,10 @@ pub mod compact_tx_streamer_server { type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, @@ -1106,7 +1280,7 @@ pub mod compact_tx_streamer_server { type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, @@ -1127,20 +1301,18 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] - pub struct CompactTxStreamerServer { - inner: _Inner, + pub struct CompactTxStreamerServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl CompactTxStreamerServer { + impl CompactTxStreamerServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -1190,10 +1362,10 @@ pub mod compact_tx_streamer_server { impl tonic::codegen::Service> for CompactTxStreamerServer where T: CompactTxStreamer, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -1203,7 +1375,6 @@ pub mod compact_tx_streamer_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] @@ -1235,9 +1406,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestBlockSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1280,9 +1450,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1330,9 +1499,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockNullifiersSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1378,9 +1546,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1429,9 +1596,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeNullifiersSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1476,9 +1642,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1523,9 +1688,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SendTransactionSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1575,9 +1739,59 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressTxidsSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { + #[allow(non_camel_case_types)] + struct GetTaddressTransactionsSvc(pub Arc); + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTransactionsSvc { + type Response = super::RawTransaction; + type ResponseStream = T::GetTaddressTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_taddress_transactions( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTaddressTransactionsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1625,9 +1839,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1675,9 +1888,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceStreamSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1697,7 +1909,7 @@ pub mod compact_tx_streamer_server { struct GetMempoolTxSvc(pub Arc); impl< T: CompactTxStreamer, - > tonic::server::ServerStreamingService + > tonic::server::ServerStreamingService for GetMempoolTxSvc { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; @@ -1707,7 +1919,7 @@ pub mod compact_tx_streamer_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1723,9 +1935,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolTxSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1774,9 +1985,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolStreamSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1821,9 +2031,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTreeStateSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1869,9 +2078,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestTreeStateSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1917,9 +2125,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetSubtreeRootsSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1964,9 +2171,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2015,9 +2221,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosStreamSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2060,9 +2265,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLightdInfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2105,9 +2309,8 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = PingSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2124,20 +2327,27 @@ pub mod compact_tx_streamer_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for CompactTxStreamerServer { + impl Clone for CompactTxStreamerServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -2149,18 +2359,9 @@ pub mod compact_tx_streamer_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService - for CompactTxStreamerServer { - const NAME: &'static str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + impl tonic::server::NamedService for CompactTxStreamerServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs new file mode 100644 index 000000000..1670a7c39 --- /dev/null +++ b/zaino-proto/src/proto/utils.rs @@ -0,0 +1,449 @@ +use crate::proto::{ + compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, + service::{BlockId, BlockRange, PoolType}, +}; +use zebra_chain::block::Height; +use zebra_state::HashOrHeight; + +#[derive(Debug, PartialEq, Eq)] +/// Errors that can arise when mapping `PoolType` from an `i32` value. +pub enum PoolTypeError { + /// Pool Type value was map to the enum `PoolType::Invalid`. + InvalidPoolType, + /// Pool Type value was mapped to value that can't be mapped to a known pool type. + UnknownPoolType(i32), +} + +/// Converts a vector of pool_types (i32) into its rich-type representation +/// Returns `PoolTypeError::InvalidPoolType` when invalid `pool_types` are found +/// or `PoolTypeError::UnknownPoolType` if unknown ones are found. +pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { + let pools = if pool_types.is_empty() { + vec![PoolType::Sapling, PoolType::Orchard] + } else { + let mut pools: Vec = vec![]; + + for pool in pool_types.iter() { + match PoolType::try_from(*pool) { + Ok(pool_type) => { + if pool_type == PoolType::Invalid { + return Err(PoolTypeError::InvalidPoolType); + } else { + pools.push(pool_type); + } + } + Err(_) => { + return Err(PoolTypeError::UnknownPoolType(*pool)); + } + }; + } + + pools.clone() + }; + Ok(pools) +} + +/// Converts a `Vec` into a `Vec` +pub fn pool_types_into_i32_vec(pool_types: Vec) -> Vec { + pool_types.iter().map(|p| *p as i32).collect() +} + +/// Errors that can be present in the request of the GetBlockRange RPC +pub enum GetBlockRangeError { + /// Error: No start height given. + NoStartHeightProvided, + /// Error: No end height given. + NoEndHeightProvided, + /// Start height out of range. Failed to convert to u32. + StartHeightOutOfRange, + + /// End height out of range. Failed to convert to u32. + EndHeightOutOfRange, + /// An invalid pool type request was provided. + PoolTypeArgumentError(PoolTypeError), +} + +/// `BlockRange` request that has been validated in terms of the semantics +/// of `GetBlockRange` RPC. +/// +/// # Guarantees +/// +/// - `start` and `end` were provided in the request. +/// - `start` and `end` are in the inclusive range `0..=u32::MAX`, so they can be +/// safely converted to `u32` (for example via `u32::try_from(...)`) without +/// failing. +/// - `pool_types` has been validated via `pool_types_from_vector`. +pub struct ValidatedBlockRangeRequest { + start: u64, + end: u64, + pool_types: Vec, +} + +impl ValidatedBlockRangeRequest { + /// Validates a `BlockRange` in terms of the `GetBlockRange` RPC. + /// + /// # Errors + /// + /// Returns: + /// - [`GetBlockRangeError::NoStartHeightProvided`] if `request.start` is `None`. + /// - [`GetBlockRangeError::NoEndHeightProvided`] if `request.end` is `None`. + /// - [`GetBlockRangeError::StartHeightOutOfRange`] if `start` does not fit in a `u32`. + /// - [`GetBlockRangeError::EndHeightOutOfRange`] if `end` does not fit in a `u32`. + /// - [`GetBlockRangeError::PoolTypeArgumentError`] if pool types are invalid. + pub fn new_from_block_range( + request: &BlockRange, + ) -> Result { + let start = match &request.start { + Some(block_id) => block_id.height, + None => { + return Err(GetBlockRangeError::NoStartHeightProvided); + } + }; + let end = match &request.end { + Some(block_id) => block_id.height, + None => { + return Err(GetBlockRangeError::NoEndHeightProvided); + } + }; + + if u32::try_from(start).is_err() { + return Err(GetBlockRangeError::StartHeightOutOfRange); + } + if u32::try_from(end).is_err() { + return Err(GetBlockRangeError::EndHeightOutOfRange); + } + + let pool_types = pool_types_from_vector(&request.pool_types) + .map_err(GetBlockRangeError::PoolTypeArgumentError)?; + + Ok(ValidatedBlockRangeRequest { + start, + end, + pool_types, + }) + } + + /// Start Height of the BlockRange Request + pub fn start(&self) -> u64 { + self.start + } + + /// End Height of the BlockRange Request + pub fn end(&self) -> u64 { + self.end + } + + /// Pool Types of the BlockRange request + pub fn pool_types(&self) -> Vec { + self.pool_types.clone() + } + + /// checks whether this request is specified in reversed order + pub fn is_reverse_ordered(&self) -> bool { + self.start > self.end + } + + /// Reverses the order of this request + pub fn reverse(&mut self) { + (self.start, self.end) = (self.end, self.start); + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct PoolTypeFilter { + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, +} + +impl std::default::Default for PoolTypeFilter { + /// By default PoolType includes `Sapling` and `Orchard` pools. + fn default() -> Self { + PoolTypeFilter { + include_transparent: false, + include_sapling: true, + include_orchard: true, + } + } +} + +impl PoolTypeFilter { + /// A PoolType Filter that will include all existing pool types. + pub fn includes_all() -> Self { + PoolTypeFilter { + include_transparent: true, + include_sapling: true, + include_orchard: true, + } + } + + /// create a `PoolTypeFilter` from a vector of raw i32 `PoolType`s + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_slice(pool_types: &[i32]) -> Result { + let pool_types = pool_types_from_vector(pool_types)?; + + Self::new_from_pool_types(&pool_types) + } + + /// create a `PoolTypeFilter` from a vector of `PoolType` + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_pool_types( + pool_types: &Vec, + ) -> Result { + if pool_types.len() > PoolType::Orchard as usize { + return Err(PoolTypeError::InvalidPoolType); + } + + if pool_types.is_empty() { + Ok(Self::default()) + } else { + let mut filter = PoolTypeFilter::empty(); + + for pool_type in pool_types { + match pool_type { + PoolType::Invalid => return Err(PoolTypeError::InvalidPoolType), + PoolType::Transparent => filter.include_transparent = true, + PoolType::Sapling => filter.include_sapling = true, + PoolType::Orchard => filter.include_orchard = true, + } + } + + // guard against returning an invalid state this shouls never happen. + if filter.is_empty() { + Ok(Self::default()) + } else { + Ok(filter) + } + } + } + + /// only internal use. this in an invalid state. + fn empty() -> Self { + Self { + include_transparent: false, + include_sapling: false, + include_orchard: false, + } + } + + /// only internal use + fn is_empty(&self) -> bool { + !self.include_transparent && !self.include_sapling && !self.include_orchard + } + + /// retuns whether the filter includes transparent data + pub fn includes_transparent(&self) -> bool { + self.include_transparent + } + + /// returns whether the filter includes orchard data + pub fn includes_sapling(&self) -> bool { + self.include_sapling + } + + // returnw whether the filter includes orchard data + pub fn includes_orchard(&self) -> bool { + self.include_orchard + } + + /// Convert this filter into the corresponding `Vec`. + /// + /// The resulting vector contains each included pool type at most once. + pub fn to_pool_types_vector(&self) -> Vec { + let mut pool_types: Vec = Vec::new(); + + if self.include_transparent { + pool_types.push(PoolType::Transparent); + } + + if self.include_sapling { + pool_types.push(PoolType::Sapling); + } + + if self.include_orchard { + pool_types.push(PoolType::Orchard); + } + + pool_types + } + + /// testing only + #[allow(dead_code)] + pub(crate) fn from_checked_parts( + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, + ) -> Self { + PoolTypeFilter { + include_transparent, + include_sapling, + include_orchard, + } + } +} + +/// Converts [`BlockId`] into [`HashOrHeight`] Zebra type +pub fn blockid_to_hashorheight(block_id: BlockId) -> Option { + <[u8; 32]>::try_from(block_id.hash) + .map(zebra_chain::block::Hash) + .map(HashOrHeight::from) + .or_else(|_| { + block_id + .height + .try_into() + .map(|height| HashOrHeight::Height(Height(height))) + }) + .ok() +} + +/// prunes a compact block from transaction in formation related to pools not included in the +/// `pool_types` vector. +/// Note: for backwards compatibility an empty vector will return Sapling and Orchard Tx info. +pub fn compact_block_with_pool_types( + mut block: CompactBlock, + pool_types: &[PoolType], +) -> CompactBlock { + if pool_types.is_empty() { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + + // Omit transactions that have no Sapling/Orchard elements. + block.vtx.retain(|compact_tx| { + !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); + } else { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + if !pool_types.contains(&PoolType::Transparent) { + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + // strip out sapling if not requested + if !pool_types.contains(&PoolType::Sapling) { + compact_tx.spends.clear(); + compact_tx.outputs.clear(); + } + // strip out orchard if not requested + if !pool_types.contains(&PoolType::Orchard) { + compact_tx.actions.clear(); + } + } + + // Omit transactions that have no elements in any requested pool type. + block.vtx.retain(|compact_tx| { + !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); + } + + block +} + +/// Strips the ouputs and from all transactions, retains only +/// the nullifier from all orcard actions, and clears the chain +/// metadata from the block +pub fn compact_block_to_nullifiers(mut block: CompactBlock) -> CompactBlock { + for ctransaction in &mut block.vtx { + ctransaction.outputs = Vec::new(); + for caction in &mut ctransaction.actions { + *caction = CompactOrchardAction { + nullifier: caction.nullifier.clone(), + ..Default::default() + } + } + } + + block.chain_metadata = Some(ChainMetadata { + sapling_commitment_tree_size: 0, + orchard_commitment_tree_size: 0, + }); + block +} + +#[cfg(test)] +mod test { + use crate::proto::{ + service::PoolType, + utils::{PoolTypeError, PoolTypeFilter}, + }; + + #[test] + fn test_pool_type_filter_fails_when_invalid() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Invalid, + ] + .to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); + } + + #[test] + fn test_pool_type_filter_fails_when_too_many_items() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Orchard, + ] + .to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); + } + + #[test] + fn test_pool_type_filter_t_z_o() { + let pools = [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Ok(PoolTypeFilter::from_checked_parts(true, true, true)) + ); + } + + #[test] + fn test_pool_type_filter_t() { + let pools = [PoolType::Transparent].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Ok(PoolTypeFilter::from_checked_parts(true, false, false)) + ); + } + + #[test] + fn test_pool_type_filter_default() { + assert_eq!( + PoolTypeFilter::new_from_pool_types(&vec![]), + Ok(PoolTypeFilter::default()) + ); + } + + #[test] + fn test_pool_type_filter_includes_all() { + assert_eq!( + PoolTypeFilter::from_checked_parts(true, true, true), + PoolTypeFilter::includes_all() + ); + } +} diff --git a/zaino-serve/Cargo.toml b/zaino-serve/Cargo.toml index 31b2f16e1..dcc814f1d 100644 --- a/zaino-serve/Cargo.toml +++ b/zaino-serve/Cargo.toml @@ -1,33 +1,46 @@ [package] name = "zaino-serve" description = "Crate containing Zingo's gRPC server implementation." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } + +[features] +# Removes network restrictions. +no_tls_use_unencrypted_traffic = ["tonic/tls-native-roots"] + +# **Experimental and alpha features** +# Exposes the **complete** set of experimental / alpha features currently implemented in Zaino. +experimental_features = ["transparent_address_history_experimental"] + +# Activates transparent address history capability in zaino +# +# NOTE: currently this is only implemented in the finalised state. +transparent_address_history_experimental = ["zaino-state/transparent_address_history_experimental"] [dependencies] -zaino-proto = { path = "../zaino-proto" } -zaino-fetch = { path = "../zaino-fetch" } +zaino-proto = { workspace = true } +zaino-fetch = { workspace = true } +zaino-state = { workspace = true } # Zebra zebra-chain = { workspace = true } zebra-rpc = { workspace = true } +# Tracing +tracing = { workspace = true } + # Miscellaneous Workspace tokio = { workspace = true, features = ["full"] } -tonic = { workspace = true } -http = { workspace = true } +tonic = { workspace = true, features = ["tls-native-roots"] } thiserror = { workspace = true } - -# Miscellaneous Crate -prost = { workspace = true } -hex = { workspace = true, features = ["serde"] } -tokio-stream = { workspace = true } futures = { workspace = true } -async-stream = { workspace = true } -crossbeam-channel = { workspace = true } -lazy-regex = { workspace = true } +jsonrpsee = { workspace = true, features = ["server", "macros"] } +serde = { workspace = true, features = ["derive"] } +tower = { workspace = true } [build-dependencies] whoami = { workspace = true } diff --git a/zaino-serve/build.rs b/zaino-serve/build.rs index 120a8c899..24b2a8129 100644 --- a/zaino-serve/build.rs +++ b/zaino-serve/build.rs @@ -31,11 +31,11 @@ fn main() -> io::Result<()> { // Set the build user let build_user = whoami::username(); - println!("cargo:rustc-env=BUILD_USER={}", build_user); + println!("cargo:rustc-env=BUILD_USER={build_user}"); // Set the version from Cargo.toml let version = env::var("CARGO_PKG_VERSION").expect("Failed to get version from Cargo.toml"); - println!("cargo:rustc-env=VERSION={}", version); + println!("cargo:rustc-env=VERSION={version}"); Ok(()) } diff --git a/zaino-serve/src/lib.rs b/zaino-serve/src/lib.rs index fc4c59cb6..df52dbd67 100644 --- a/zaino-serve/src/lib.rs +++ b/zaino-serve/src/lib.rs @@ -1,4 +1,4 @@ -//! Holds a gRPC server capable of servicing clients over TCP. +//! Holds gRPC and JSON RPC servers capable of servicing clients over TCP. //! //! - server::ingestor has been built so that other ingestors may be added that use different transport protocols (Nym, TOR). //! @@ -9,4 +9,3 @@ pub mod rpc; pub mod server; -pub(crate) mod utils; diff --git a/zaino-serve/src/rpc.rs b/zaino-serve/src/rpc.rs index a9da92eb6..2b1fd1e7e 100644 --- a/zaino-serve/src/rpc.rs +++ b/zaino-serve/src/rpc.rs @@ -1,17 +1,20 @@ -//! Lightwallet service RPC implementations. +//! gRPC / JsonRPC service implementations. -use std::sync::{atomic::AtomicBool, Arc}; +use zaino_state::{IndexerSubscriber, LightWalletIndexer, ZcashIndexer}; -pub mod service; +pub mod grpc; +pub mod jsonrpc; -#[derive(Debug, Clone)] -/// Configuration data for gRPC server. -pub struct GrpcClient { - /// Lightwalletd uri. - /// Used by grpc_passthrough to pass on unimplemented RPCs. - pub lightwalletd_uri: http::Uri, - /// Zebrad uri. - pub zebrad_uri: http::Uri, - /// Represents the Online status of the gRPC server. - pub online: Arc, +#[derive(Clone)] +/// Zaino gRPC service. +pub struct GrpcClient { + /// Chain fetch service subscriber. + pub service_subscriber: IndexerSubscriber, +} + +#[derive(Clone)] +/// Zaino JSONRPC service. +pub struct JsonRpcClient { + /// Chain fetch service subscriber. + pub service_subscriber: IndexerSubscriber, } diff --git a/zaino-serve/src/rpc/grpc.rs b/zaino-serve/src/rpc/grpc.rs new file mode 100644 index 000000000..da072f594 --- /dev/null +++ b/zaino-serve/src/rpc/grpc.rs @@ -0,0 +1,3 @@ +//! Grpc service implementations. + +pub mod service; diff --git a/zaino-serve/src/rpc/grpc/service.rs b/zaino-serve/src/rpc/grpc/service.rs new file mode 100644 index 000000000..cf723095b --- /dev/null +++ b/zaino-serve/src/rpc/grpc/service.rs @@ -0,0 +1,273 @@ +//! Lightwallet service RPC implementations. + +use futures::StreamExt; +use tracing::info; + +use crate::rpc::GrpcClient; +use zaino_proto::proto::{ + compact_formats::CompactBlock, + service::{ + compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, + BlockRange, ChainSpec, Duration, Empty, GetAddressUtxosArg, GetAddressUtxosReplyList, + GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, + SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + }, +}; +use zaino_state::{ + AddressStream, CompactBlockStream, CompactTransactionStream, LightWalletIndexer, + RawTransactionStream, SubtreeRootReplyStream, UtxoReplyStream, ZcashIndexer, +}; + +/// A helper macro invoked by implement_client_methods, as the +/// internals differ slightly in the streaming return case +/// compared to the 'normal' case. This should never +/// be invoked directly. +macro_rules! client_method_helper { + // all of these are hard-coded by implement_client_methods, + // and need to be passed in due to macros not being able to + // access variables in an outer scope unless they are passed in + ($self:ident $input:ident $method_name:ident) => { + tonic::Response::new( + // offload to the service_subscriber's implementation of + // $method_name, unpacking $input + $self + .service_subscriber + .inner_ref() + .$method_name($input.into_inner()) + .await + .map_err(Into::into)?, + ) + }; + // in the case of Streaming return types, we need an additional + // invocation of Box::pin + (streaming $self:ident $input:ident $method_name:ident) => { + // extra Box::pin here + tonic::Response::new(Box::pin( + $self + .service_subscriber + .inner_ref() + .$method_name($input.into_inner()) + .await + .map_err(Into::into)?, + )) + }; + // for the no-input variant + (empty $self:ident $input:ident $method_name:ident) => { + tonic::Response::new( + $self + .service_subscriber + .inner_ref() + .$method_name() + .await + .map_err(Into::into)?, + ) + }; + // WOMBO-COMBO!! + (streamingempty $self:ident $input:ident $method_name:ident) => { + // extra Box::pin here + tonic::Response::new(Box::pin( + $self + .service_subscriber + .inner_ref() + .$method_name() + .await + .map_err(Into::into)?, + )) + }; +} + +/// A macro to remove the boilerplate of implementing 30-ish client +/// methods with the same logic, aside from the types, names, and +/// streaming/nonstreaming. +/// +/// Arguments: +/// comment method_name(input_type) -> return \[as streaming\], +/// \[comment\] A str literal to be used a doc-comment for the method. +/// \[method_name\] The name of the method to implement +/// \[input_type\] The type of the tonic Request to accept as an argument +/// \[as streaming/empty\] the optional literal characters +/// 'as streaming', 'as empty', or 'as streamingempty' +/// needed when the return type is a Streaming type, and/or +/// the argument type isn't used +/// \[return\] the return type of the function +macro_rules! implement_client_methods { + ($($comment:literal $method_name:ident($input_type:ty ) -> $return:ty $( as $streaming:ident)? ,)+) => { + $( + #[doc = $comment] + fn $method_name<'life, 'async_trait>( + &'life self, + __input: tonic::Request<$input_type>, + ) -> core::pin::Pin< + Box< + dyn core::future::Future< + Output = std::result::Result, tonic::Status>, + > + core::marker::Send + + 'async_trait, + >, + > + where + 'life: 'async_trait, + Self: 'async_trait, + { + info!("[TEST] Received call of {}.", stringify!($method_name)); + Box::pin(async { + Ok( + // here we pass in pinbox, to optionally add + // Box::pin to the returned response type for + // streaming + client_method_helper!($($streaming)? self __input $method_name) + ) + }) + } + )+ + }; +} + +impl CompactTxStreamer for GrpcClient +where + Indexer::Error: Into, +{ + implement_client_methods!( + "Return the height of the tip of the best chain." + get_latest_block(ChainSpec) -> BlockId as empty, + "Return the compact block corresponding to the given block identifier." + get_block(BlockId) -> CompactBlock, + "Same as GetBlock except actions contain only nullifiers." + get_block_nullifiers(BlockId) -> CompactBlock, + "Return a list of consecutive compact blocks." + get_block_range(BlockRange) -> Self::GetBlockRangeStream as streaming, + "Same as GetBlockRange except actions contain only nullifiers." + get_block_range_nullifiers(BlockRange) -> Self::GetBlockRangeStream as streaming, + "Return the requested full (not compact) transaction (as from zcashd)." + get_transaction(TxFilter) -> RawTransaction, + "submit the given transaction to the zcash network." + send_transaction(RawTransaction) -> SendResponse, + "Return the transactions corresponding to the given t-address within the given block range" + get_taddress_transactions(TransparentAddressBlockFilter) -> Self::GetTaddressTransactionsStream as streaming, + "This name is misleading, returns the full transactions that have either inputs or outputs connected to the given transparent address." + get_taddress_txids(TransparentAddressBlockFilter) -> Self::GetTaddressTxidsStream as streaming, + "Returns the total balance for a list of taddrs" + get_taddress_balance(AddressList) -> Balance, + + "Returns a stream of the compact transaction representation for transactions \ + currently in the mempool. The results of this operation may be a few \ + seconds out of date. If the `exclude_txid_suffixes` list is empty, \ + return all transactions; otherwise return all *except* those in the \ + `exclude_txid_suffixes` list (if any); this allows the client to avoid \ + receiving transactions that it already has (from an earlier call to this \ + RPC). The transaction IDs in the `exclude_txid_suffixes` list can be \ + shortened to any number of bytes to make the request more \ + bandwidth-efficient; if two or more transactions in the mempool match a \ + txid suffix, none of the matching transactions are excluded. Txid \ + suffixes in the exclude list that don't match any transactions in the \ + mempool are ignored." + get_mempool_tx(GetMempoolTxRequest) -> Self::GetMempoolTxStream as streaming, + "GetTreeState returns the note commitment tree state corresponding to the given block. \ + See section 3.7 of the Zcash protocol specification. It returns several other useful \ + values also (even though they can be obtained using GetBlock). + The block can be specified by either height or hash." + get_tree_state(BlockId) -> TreeState, + "Returns a stream of information about roots of subtrees of the Sapling and Orchard \ + note commitment trees." + get_subtree_roots(GetSubtreeRootsArg) -> Self::GetSubtreeRootsStream as streaming, + "Returns all unspent outputs for a list of addresses. \ + \ + Ignores all utxos below block height [GetAddressUtxosArg.start_height]. \ + Returns max [GetAddressUtxosArg.max_entries] utxos, or \ + unrestricted if [GetAddressUtxosArg.max_entries] = 0. \ + Utxos are collected and returned as a single Vec." + get_address_utxos(GetAddressUtxosArg) -> GetAddressUtxosReplyList, + "Returns all unspent outputs for a list of addresses. \ + Ignores all utxos below block height [GetAddressUtxosArg. start_height]. \ + Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. \ + Utxos are returned in a stream." + get_address_utxos_stream(GetAddressUtxosArg) -> Self::GetAddressUtxosStreamStream as streaming, + "Return information about this lightwalletd instance and the blockchain" + get_lightd_info(Empty) -> LightdInfo as empty, + "GetLatestTreeState returns the note commitment tree state corresponding to the chain tip." + get_latest_tree_state(Empty) -> TreeState as empty, + "Return a stream of current Mempool transactions. This will keep the output stream open while \ + there are mempool transactions. It will close the returned stream when a new block is mined." + get_mempool_stream(Empty) -> Self::GetMempoolStreamStream as streamingempty, + "Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) [from zebrad] \ + This RPC has not been implemented as it is not currently used by zingolib. \ + If you require this RPC please open an issue or PR at [https://github.com/zingolabs/zaino]\ + (https://github.com/zingolabs/zaino)." + + ping(Duration) -> PingResponse, + ); + + /// Server streaming response type for the GetBlockRange method. + #[doc = "Server streaming response type for the GetBlockRange method."] + type GetBlockRangeStream = std::pin::Pin>; + + /// Server streaming response type for the GetBlockRangeNullifiers method. + #[doc = " Server streaming response type for the GetBlockRangeNullifiers method."] + type GetBlockRangeNullifiersStream = std::pin::Pin>; + + /// Server streaming response type for the GetTaddressTransactions method. + #[doc = "Server streaming response type for the GetTaddressTransactions method."] + type GetTaddressTransactionsStream = std::pin::Pin>; + + /// Server streaming response type for the GetTaddressTxids method. + #[doc = "Server streaming response type for the GetTaddressTxids method."] + type GetTaddressTxidsStream = std::pin::Pin>; + + /// Returns the total balance for a list of taddrs + #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] + fn get_taddress_balance_stream<'life0, 'async_trait>( + &'life0 self, + request: tonic::Request>, + ) -> ::core::pin::Pin< + Box< + dyn ::core::future::Future, tonic::Status>> + + ::core::marker::Send + + 'async_trait, + >, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + info!("[TEST] Received call of get_taddress_balance_stream."); + Box::pin(async { + let (channel_tx, channel_rx) = + tokio::sync::mpsc::channel::>(32); + let mut request_stream = request.into_inner(); + + tokio::spawn(async move { + while let Some(address_result) = request_stream.next().await { + if channel_tx.send(address_result).await.is_err() { + break; + } + } + drop(channel_tx); + }); + let address_stream = AddressStream::new(channel_rx); + + Ok(tonic::Response::new( + self.service_subscriber + .inner_ref() + .get_taddress_balance_stream(address_stream) + .await + .map_err(Into::into)?, + )) + }) + } + + /// Server streaming response type for the GetMempoolTx method. + #[doc = "Server streaming response type for the GetMempoolTx method."] + type GetMempoolTxStream = std::pin::Pin>; + + /// Server streaming response type for the GetMempoolStream method. + #[doc = "Server streaming response type for the GetMempoolStream method."] + type GetMempoolStreamStream = std::pin::Pin>; + + /// Server streaming response type for the GetSubtreeRoots method. + #[doc = " Server streaming response type for the GetSubtreeRoots method."] + type GetSubtreeRootsStream = std::pin::Pin>; + + /// Server streaming response type for the GetAddressUtxosStream method. + #[doc = "Server streaming response type for the GetAddressUtxosStream method."] + type GetAddressUtxosStreamStream = std::pin::Pin>; +} diff --git a/zaino-serve/src/rpc/jsonrpc.rs b/zaino-serve/src/rpc/jsonrpc.rs new file mode 100644 index 000000000..ec2dff99c --- /dev/null +++ b/zaino-serve/src/rpc/jsonrpc.rs @@ -0,0 +1,3 @@ +//! JsonRPC service implementations. + +pub mod service; diff --git a/zaino-serve/src/rpc/jsonrpc/service.rs b/zaino-serve/src/rpc/jsonrpc/service.rs new file mode 100644 index 000000000..2825ffa1f --- /dev/null +++ b/zaino-serve/src/rpc/jsonrpc/service.rs @@ -0,0 +1,756 @@ +//! Zcash RPC implementations. + +use zaino_fetch::jsonrpsee::response::block_deltas::BlockDeltas; +use zaino_fetch::jsonrpsee::response::block_header::GetBlockHeader; +use zaino_fetch::jsonrpsee::response::block_subsidy::GetBlockSubsidy; +use zaino_fetch::jsonrpsee::response::mining_info::GetMiningInfoWire; +use zaino_fetch::jsonrpsee::response::peer_info::GetPeerInfo; +use zaino_fetch::jsonrpsee::response::{GetMempoolInfoResponse, GetNetworkSolPsResponse}; +use zaino_state::{LightWalletIndexer, ZcashIndexer}; + +use zebra_chain::{block::Height, subtree::NoteCommitmentSubtreeIndex}; +use zebra_rpc::client::{ + GetBlockchainInfoResponse, GetSubtreesByIndexResponse, GetTreestateResponse, + ValidateAddressResponse, +}; +use zebra_rpc::methods::{ + AddressBalance, GetAddressBalanceRequest, GetAddressTxIdsRequest, GetAddressUtxos, GetBlock, + GetBlockHash, GetInfo, GetRawTransaction, SentTransactionHash, +}; + +use jsonrpsee::types::ErrorObjectOwned; +use jsonrpsee::{proc_macros::rpc, types::ErrorCode}; + +use crate::rpc::JsonRpcClient; + +/// Zcash RPC method signatures. +/// +/// Doc comments taken from Zebra for consistency. +#[rpc(server)] +pub trait ZcashIndexerRpc { + /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. + /// + /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) + /// method: post + /// tags: control + /// + /// # Notes + /// + /// [The zcashd reference](https://zcash.github.io/rpc/getinfo.html) might not show some fields + /// in Zebra's [`GetInfo`]. Zebra uses the field names and formats from the + /// [zcashd code](https://github.com/zcash/zcash/blob/v4.6.0-1/src/rpc/misc.cpp#L86-L87). + /// + /// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields + /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95) + #[method(name = "getinfo")] + async fn get_info(&self) -> Result; + + /// Returns blockchain state information, as a [`GetBlockchainInfoResponse`] JSON struct. + /// + /// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html) + /// method: post + /// tags: blockchain + /// + /// # Notes + /// + /// Some fields from the zcashd reference are missing from Zebra's [`GetBlockchainInfoResponse`]. It only contains the fields + /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L72-L89) + #[method(name = "getblockchaininfo")] + async fn get_blockchain_info(&self) -> Result; + + /// Returns details on the active state of the TX memory pool. + /// + /// online zcash rpc reference: [`getmempoolinfo`](https://zcash.github.io/rpc/getmempoolinfo.html) + /// method: post + /// tags: mempool + /// + /// Canonical source code implementation: [`getmempoolinfo`](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/blockchain.cpp#L1555) + #[method(name = "getmempoolinfo")] + async fn get_mempool_info(&self) -> Result; + + /// Returns a json object containing mining-related information. + /// + /// `zcashd` reference (may be outdated): [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) + #[method(name = "getmininginfo")] + async fn get_mining_info(&self) -> Result; + + /// Returns the hash of the best block (tip) of the longest chain. + /// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) + /// method: post + /// tags: blockchain + /// + /// # Notes + /// + /// The zcashd doc reference above says there are no parameters and the result is a "hex" (string) of the block hash hex encoded. + /// The Zcash source code is considered canonical: + /// [In the rpc definition](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/common.h#L48) there are no required params, or optional params. + /// [The function in rpc/blockchain.cpp](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L325) + /// where `return chainActive.Tip()->GetBlockHash().GetHex();` is the [return expression](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L339)returning a `std::string` + #[method(name = "getbestblockhash")] + async fn get_best_blockhash(&self) -> Result; + + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. + /// + /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) + /// method: post + /// tags: blockchain + #[method(name = "getdifficulty")] + async fn get_difficulty(&self) -> Result; + + /// Returns information about the given block and its transactions. + /// + /// zcashd reference: [`getblockdeltas`](https://zcash.github.io/rpc/getblockdeltas.html) + /// method: post + /// tags: blockchain + #[method(name = "getblockdeltas")] + async fn get_block_deltas(&self, hash: String) -> Result; + + /// Returns data about each connected network node as a json array of objects. + /// + /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) + /// tags: network + /// + /// Current `zebrad` does not include the same fields as `zcashd`. + #[method(name = "getpeerinfo")] + async fn get_peer_info(&self) -> Result; + + /// Returns block subsidy reward, taking into account the mining slow start and the founders reward, of block at index provided. + /// + /// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `height`: (number, optional) The block height. If not provided, defaults to the current height of the chain. + #[method(name = "getblocksubsidy")] + async fn get_block_subsidy(&self, height: u32) -> Result; + + /// Returns the current block count in the best valid block chain. + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + #[method(name = "getblockcount")] + async fn get_block_count(&self) -> Result; + + /// Return information about the given Zcash address. + /// + /// # Parameters + /// - `address`: (string, required, example="tmHMBeeYRuc2eVicLNfP15YLxbQsooCA6jb") The Zcash transparent address to validate. + /// + /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) + /// method: post + /// tags: blockchain + #[method(name = "validateaddress")] + async fn validate_address( + &self, + address: String, + ) -> Result; + + /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. + /// + /// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry + /// - `addresses`: (array of strings) A list of base-58 encoded addresses. + /// + /// # Notes + /// + /// zcashd also accepts a single string parameter instead of an array of strings, but Zebra + /// doesn't because lightwalletd always calls this RPC with an array of addresses. + /// + /// zcashd also returns the total amount of Zatoshis received by the addresses, but Zebra + /// doesn't because lightwalletd doesn't use that information. + /// + /// The RPC documentation says that the returned object has a string `balance` field, but + /// zcashd actually [returns an + /// integer](https://github.com/zcash/lightwalletd/blob/bdaac63f3ee0dbef62bde04f6817a9f90d483b00/common/common.go#L128-L130). + #[method(name = "getaddressbalance")] + async fn z_get_address_balance( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result; + + /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. + /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. + /// + /// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. + /// + /// # Notes + /// + /// zcashd accepts an optional `allowhighfees` parameter. Zebra doesn't support this parameter, + /// because lightwalletd doesn't use it. + #[method(name = "sendrawtransaction")] + async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result; + + /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. + /// If the block is not in Zebra's state, returns + /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) if a height was + /// passed or -5 if a hash was passed. + /// + /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. + /// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data. + /// + /// # Notes + /// + /// Zebra previously partially supported verbosity=1 by returning only the + /// fields required by lightwalletd ([`lightwalletd` only reads the `tx` + /// field of the result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152)). + /// That verbosity level was migrated to "3"; so while lightwalletd will + /// still work by using verbosity=1, it will sync faster if it is changed to + /// use verbosity=3. + /// + /// The undocumented `chainwork` field is not returned. + #[method(name = "getblock")] + async fn z_get_block( + &self, + hash_or_height: String, + verbosity: Option, + ) -> Result; + + /// If verbose is false, returns a string that is serialized, hex-encoded data for blockheader `hash`. + /// If verbose is true, returns an Object with information about blockheader `hash`. + /// + /// # Parameters + /// + /// - hash: (string, required) The block hash + /// - verbose: (boolean, optional, default=true) true for a json object, false for the hex encoded data + /// + /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) + /// zcashd implementation [here](https://github.com/zcash/zcash/blob/16ac743764a513e41dafb2cd79c2417c5bb41e81/src/rpc/blockchain.cpp#L668) + /// + /// method: post + /// tags: blockchain + #[method(name = "getblockheader")] + async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result; + + /// Returns all transaction ids in the memory pool, as a JSON array. + /// + /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) + /// method: post + /// tags: blockchain + #[method(name = "getrawmempool")] + async fn get_raw_mempool(&self) -> Result, ErrorObjectOwned>; + + /// Returns information about the given block's Sapling & Orchard tree state. + /// + /// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height. + /// + /// # Notes + /// + /// The zcashd doc reference above says that the parameter "`height` can be + /// negative where -1 is the last known valid block". On the other hand, + /// `lightwalletd` only uses positive heights, so Zebra does not support + /// negative heights. + #[method(name = "z_gettreestate")] + async fn z_get_treestate( + &self, + hash_or_height: String, + ) -> Result; + + /// Returns information about a range of Sapling or Orchard subtrees. + /// + /// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard". + /// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return. + /// - `limit`: (number, optional) The maximum number of subtree values to return. + /// + /// # Notes + /// + /// While Zebra is doing its initial subtree index rebuild, subtrees will become available + /// starting at the chain tip. This RPC will return an empty list if the `start_index` subtree + /// exists, but has not been rebuilt yet. This matches `zcashd`'s behaviour when subtrees aren't + /// available yet. (But `zcashd` does its rebuild before syncing any blocks.) + #[method(name = "z_getsubtreesbyindex")] + async fn z_get_subtrees_by_index( + &self, + pool: String, + start_index: NoteCommitmentSubtreeIndex, + limit: Option, + ) -> Result; + + /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. + /// + /// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned. + /// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object. + /// + /// # Notes + /// + /// We don't currently support the `blockhash` parameter since lightwalletd does not + /// use it. + /// + /// In verbose mode, we only expose the `hex` and `height` fields since + /// lightwalletd uses only those: + /// + #[method(name = "getrawtransaction")] + async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result; + + /// Returns the transaction ids made by the provided transparent addresses. + /// + /// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: + /// - `addresses`: (json array of string, required) The addresses to get transactions from. + /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). + /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). + /// + /// # Notes + /// + /// Only the multi-argument format is used by lightwalletd and this is what we currently support: + /// + #[method(name = "getaddresstxids")] + async fn get_address_tx_ids( + &self, + request: GetAddressTxIdsRequest, + ) -> Result, ErrorObjectOwned>; + + /// Returns all unspent outputs for a list of addresses. + /// + /// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. + /// + /// # Notes + /// + /// lightwalletd always uses the multi-address request, without chaininfo: + /// + #[method(name = "getaddressutxos")] + async fn z_get_address_utxos( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result, ErrorObjectOwned>; + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + #[method(name = "getnetworksolps")] + async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result; +} +/// Uses ErrorCode::InvalidParams as this is converted to zcash legacy "minsc" ErrorCode in RPC middleware. +#[jsonrpsee::core::async_trait] +impl ZcashIndexerRpcServer for JsonRpcClient { + async fn get_info(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_info() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_mining_info(&self) -> Result { + Ok(self + .service_subscriber + .inner_ref() + .get_mining_info() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + })?) + } + + async fn get_best_blockhash(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_best_blockhash() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_blockchain_info(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_blockchain_info() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_mempool_info(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_mempool_info() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_difficulty(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_difficulty() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_block_deltas(&self, hash: String) -> Result { + self.service_subscriber + .inner_ref() + .get_block_deltas(hash) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_peer_info(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_peer_info() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_block_subsidy(&self, height: u32) -> Result { + self.service_subscriber + .inner_ref() + .get_block_subsidy(height) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_block_count(&self) -> Result { + self.service_subscriber + .inner_ref() + .get_block_count() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn validate_address( + &self, + address: String, + ) -> Result { + self.service_subscriber + .inner_ref() + .validate_address(address) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn z_get_address_balance( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result { + self.service_subscriber + .inner_ref() + .z_get_address_balance(address_strings) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result { + self.service_subscriber + .inner_ref() + .send_raw_transaction(raw_transaction_hex) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn z_get_block( + &self, + hash_or_height: String, + verbosity: Option, + ) -> Result { + self.service_subscriber + .inner_ref() + .z_get_block(hash_or_height, verbosity) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result { + self.service_subscriber + .inner_ref() + .get_block_header(hash, verbose) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_raw_mempool(&self) -> Result, ErrorObjectOwned> { + self.service_subscriber + .inner_ref() + .get_raw_mempool() + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn z_get_treestate( + &self, + hash_or_height: String, + ) -> Result { + self.service_subscriber + .inner_ref() + .z_get_treestate(hash_or_height) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn z_get_subtrees_by_index( + &self, + pool: String, + start_index: NoteCommitmentSubtreeIndex, + limit: Option, + ) -> Result { + self.service_subscriber + .inner_ref() + .z_get_subtrees_by_index(pool, start_index, limit) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result { + self.service_subscriber + .inner_ref() + .get_raw_transaction(txid_hex, verbose) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn get_address_tx_ids( + &self, + request: GetAddressTxIdsRequest, + ) -> Result, ErrorObjectOwned> { + self.service_subscriber + .inner_ref() + .get_address_tx_ids(request) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + async fn z_get_address_utxos( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result, ErrorObjectOwned> { + self.service_subscriber + .inner_ref() + .z_get_address_utxos(address_strings) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// This RPC is implemented in the [mining.cpp](https://github.com/zcash/zcash/blob/d00fc6f4365048339c83f463874e4d6c240b63af/src/rpc/mining.cpp#L104) + /// file of the Zcash repository. The Zebra implementation can be found [here](https://github.com/ZcashFoundation/zebra/blob/19bca3f1159f9cb9344c9944f7e1cb8d6a82a07f/zebra-rpc/src/methods.rs#L2687). + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result { + self.service_subscriber + .inner_ref() + .get_network_sol_ps(blocks, height) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + "Internal server error", + Some(e.to_string()), + ) + }) + } +} diff --git a/zaino-serve/src/rpc/service.rs b/zaino-serve/src/rpc/service.rs deleted file mode 100644 index 64a263b8a..000000000 --- a/zaino-serve/src/rpc/service.rs +++ /dev/null @@ -1,1959 +0,0 @@ -//! Lightwallet service RPC implementations. - -use futures::StreamExt; -use hex::FromHex; -use tokio::time::timeout; -use tokio_stream::wrappers::ReceiverStream; - -use crate::{rpc::GrpcClient, utils::get_build_info}; -use zaino_fetch::{ - chain::{ - block::{get_block_from_node, get_nullifiers_from_node}, - mempool::Mempool, - transaction::FullTransaction, - utils::ParseFromSlice, - }, - jsonrpc::{connector::JsonRpcConnector, response::{GetBlockResponse, GetTransactionResponse}}, -}; -use zaino_proto::proto::{ - compact_formats::{CompactBlock, CompactTx}, - service::{ - compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, - BlockRange, ChainSpec, Duration, Empty, Exclude, GetAddressUtxosArg, GetAddressUtxosReply, - GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, - SendResponse, ShieldedProtocol, SubtreeRoot, TransparentAddressBlockFilter, TreeState, - TxFilter - }, -}; - -/// T Address Regex -static TADDR_REGEX: lazy_regex::Lazy = - lazy_regex::lazy_regex!(r"^t[a-zA-Z0-9]{34}$"); - -/// Checks for valid t Address. -/// -/// Returns Some(taddress) if address is valid else none. -fn check_taddress(taddr: &str) -> Option<&str> { - if TADDR_REGEX.is_match(taddr) { - Some(taddr) - } else { - None - } -} - -/// Stream of RawTransactions, output type of get_taddress_txids. -pub struct RawTransactionStream { - inner: ReceiverStream>, -} - -impl RawTransactionStream { - /// Returns new instanse of RawTransactionStream. - pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { - RawTransactionStream { - inner: ReceiverStream::new(rx), - } - } -} - -impl futures::Stream for RawTransactionStream { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); - match poll { - std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), - std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), - std::task::Poll::Ready(None) => std::task::Poll::Ready(None), - std::task::Poll::Pending => std::task::Poll::Pending, - } - } -} - -/// Stream of RawTransactions, output type of get_taddress_txids. -pub struct CompactTransactionStream { - inner: ReceiverStream>, -} - -impl CompactTransactionStream { - /// Returns new instanse of RawTransactionStream. - pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { - CompactTransactionStream { - inner: ReceiverStream::new(rx), - } - } -} - -impl futures::Stream for CompactTransactionStream { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); - match poll { - std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), - std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), - std::task::Poll::Ready(None) => std::task::Poll::Ready(None), - std::task::Poll::Pending => std::task::Poll::Pending, - } - } -} - -/// Stream of CompactBlocks, output type of get_block_range. -pub struct CompactBlockStream { - inner: ReceiverStream>, -} - -impl CompactBlockStream { - /// Returns new instanse of CompactBlockStream. - pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { - CompactBlockStream { - inner: ReceiverStream::new(rx), - } - } -} - -impl futures::Stream for CompactBlockStream { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); - match poll { - std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), - std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), - std::task::Poll::Ready(None) => std::task::Poll::Ready(None), - std::task::Poll::Pending => std::task::Poll::Pending, - } - } -} - -/// Stream of CompactBlocks, output type of get_block_range. -pub struct UtxoReplyStream { - inner: ReceiverStream>, -} - -impl UtxoReplyStream { - /// Returns new instanse of CompactBlockStream. - pub fn new( - rx: tokio::sync::mpsc::Receiver>, - ) -> Self { - UtxoReplyStream { - inner: ReceiverStream::new(rx), - } - } -} - -impl futures::Stream for UtxoReplyStream { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); - match poll { - std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), - std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), - std::task::Poll::Ready(None) => std::task::Poll::Ready(None), - std::task::Poll::Pending => std::task::Poll::Pending, - } - } -} - -/// Stream of CompactBlocks, output type of get_block_range. -pub struct SubtreeRootReplyStream { - inner: ReceiverStream>, -} - -impl SubtreeRootReplyStream { - /// Returns new instanse of CompactBlockStream. - pub fn new( - rx: tokio::sync::mpsc::Receiver>, - ) -> Self { - SubtreeRootReplyStream { - inner: ReceiverStream::new(rx), - } - } -} - -impl futures::Stream for SubtreeRootReplyStream { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); - match poll { - std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), - std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), - std::task::Poll::Ready(None) => std::task::Poll::Ready(None), - std::task::Poll::Pending => std::task::Poll::Pending, - } - } -} - -impl CompactTxStreamer for GrpcClient { - /// Return the height of the tip of the best chain. - fn get_latest_block<'life0, 'async_trait>( - &'life0 self, - _request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_latest_block."); - Box::pin(async { - let blockchain_info = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())?; - - let block_id = BlockId { - height: blockchain_info.blocks.0 as u64, - hash: blockchain_info - .best_block_hash - .bytes_in_display_order() - .to_vec(), - }; - - Ok(tonic::Response::new(block_id)) - }) - } - - /// Return the compact block corresponding to the given block identifier. - /// - /// TODO: This implementation is slow. An internal block cache should be implemented that this rpc, along with the get_block rpc, can rely on. - /// - add get_block function that queries the block cache / internal state for block and calls get_block_from_node to fetch block if not present. - /// - use chain height held in internal state to validate block height being requested. - fn get_block<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_block."); - Box::pin(async { - let zebrad_uri = self.zebrad_uri.clone(); - let height: u32 = match request.into_inner().height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: Height out of range. Failed to convert to u32.", - )); - } - }; - match get_block_from_node(&zebrad_uri, &height).await { - Ok(block) => Ok(tonic::Response::new(block)), - Err(e) => { - let chain_height = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())? - .blocks - .0; - if height >= chain_height { - Err(tonic::Status::out_of_range( - format!( - "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", - height, chain_height, - ) - )) - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - Err(tonic::Status::unknown(format!( - "Error: Failed to retrieve block from node. Server Error: {}", - e, - ))) - } - } - } - }) - } - - /// Same as GetBlock except actions contain only nullifiers. - /// - /// NOTE: This should be reimplemented with the introduction of the BlockCache. - /// - use chain height held in internal state to validate block height being requested. - fn get_block_nullifiers<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_block_nullifiers."); - Box::pin(async { - let zebrad_uri = self.zebrad_uri.clone(); - let height: u32 = match request.into_inner().height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: Height out of range. Failed to convert to u32.", - )); - } - }; - match get_nullifiers_from_node(&zebrad_uri, &height).await { - Ok(block) => Ok(tonic::Response::new(block)), - Err(e) => { - let chain_height = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())? - .blocks - .0; - if height >= chain_height { - Err(tonic::Status::out_of_range( - format!( - "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", - height, chain_height, - ) - )) - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - Err(tonic::Status::unknown(format!( - "Error: Failed to retrieve nullifiers from node. Server Error: {}", - e, - ))) - } - } - } - }) - } - - /// Server streaming response type for the GetBlockRange method. - #[doc = "Server streaming response type for the GetBlockRange method."] - type GetBlockRangeStream = std::pin::Pin>; - - /// Return a list of consecutive compact blocks. - /// - /// TODO: This implementation is slow. An internal block cache should be implemented that this rpc, along with the get_block rpc, can rely on. - /// - add get_block function that queries the block cache for block and calls get_block_from_node to fetch block if not present. - /// - use chain height held in internal state to validate block height being requested. - fn get_block_range<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_block_range."); - let zebrad_uri = self.zebrad_uri.clone(); - Box::pin(async move { - let blockrange = request.into_inner(); - let mut start: u32 = match blockrange.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - )); - } - }, - None => { - return Err(tonic::Status::invalid_argument( - "Error: No start height given.", - )); - } - }; - let mut end: u32 = match blockrange.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - )); - } - }, - None => { - return Err(tonic::Status::invalid_argument( - "Error: No start height given.", - )); - } - }; - let rev_order = if start > end { - (start, end) = (end, start); - true - } else { - false - }; - let chain_height = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())? - .blocks - .0; - println!("[TEST] Fetching blocks in range: {}-{}.", start, end); - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(120), async { - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - println!("[TEST] Fetching block at height: {}.", height); - match get_block_from_node(&zebrad_uri, &height).await { - Ok(block) => { - if channel_tx.send(Ok(block)).await.is_err() { - break; - } - } - Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", - height, chain_height, - )))) - .await - - { - Ok(_) => break, - Err(e) => { - eprintln!("Error: Channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range gRPC request timed out.", - ))) - .await - .ok(); - } - } - }); - let output_stream = CompactBlockStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Server streaming response type for the GetBlockRangeNullifiers method. - #[doc = " Server streaming response type for the GetBlockRangeNullifiers method."] - type GetBlockRangeNullifiersStream = std::pin::Pin>; - - /// Same as GetBlockRange except actions contain only nullifiers. - /// - /// NOTE: This should be reimplemented with the introduction of the BlockCache. - /// - use chain height held in internal state to validate block height being requested. - fn get_block_range_nullifiers<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_block_range_nullifiers."); - let zebrad_uri = self.zebrad_uri.clone(); - Box::pin(async move { - let blockrange = request.into_inner(); - let mut start: u32 = match blockrange.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - )); - } - }, - None => { - return Err(tonic::Status::invalid_argument( - "Error: No start height given.", - )); - } - }; - let mut end: u32 = match blockrange.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - )); - } - }, - None => { - return Err(tonic::Status::invalid_argument( - "Error: No end height given.", - )); - } - }; - let rev_order = if start > end { - (start, end) = (end, start); - true - } else { - false - }; - let chain_height = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())? - .blocks - .0; - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(120), async { - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - let compact_block = get_nullifiers_from_node(&zebrad_uri, &height).await; - match compact_block { - Ok(block) => { - if channel_tx.send(Ok(block)).await.is_err() { - break; - } - } - Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", - height, chain_height, - )))) - .await - - { - Ok(_) => break, - Err(e) => { - eprintln!("Error: Channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range_nullifiers gRPC request timed out.", - ))) - .await - .ok(); - } - } - }); - let output_stream = CompactBlockStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Return the requested full (not compact) transaction (as from zcashd). - fn get_transaction<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_transaction."); - Box::pin(async { - let hash = request.into_inner().hash; - if hash.len() == 32 { - let reversed_hash = hash.iter().rev().copied().collect::>(); - let hash_hex = hex::encode(reversed_hash); - let tx = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .get_raw_transaction(hash_hex, Some(1)) - .await - .map_err(|e| e.to_grpc_status())?; - - let (hex, height) = if let GetTransactionResponse::Object { hex, height, .. } = tx { - (hex, height) - } else { - return Err(tonic::Status::not_found("Error: Transaction not received")); - }; - let height: u64 = height.try_into().map_err(|_e| { - tonic::Status::unknown( - "Error: Invalid response from server - Height conversion failed", - ) - })?; - - Ok(tonic::Response::new(RawTransaction { - data: hex.as_ref().to_vec(), - height, - })) - } else { - Err(tonic::Status::invalid_argument( - "Error: Transaction hash incorrect", - )) - } - }) - } - - /// Submit the given transaction to the Zcash network. - fn send_transaction<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of send_transaction."); - Box::pin(async { - let hex_tx = hex::encode(request.into_inner().data); - let tx_output = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await? - .send_raw_transaction(hex_tx) - .await - .map_err(|e| e.to_grpc_status())?; - - Ok(tonic::Response::new(SendResponse { - error_code: 0, - error_message: tx_output.0.to_string(), - })) - }) - } - - /// Server streaming response type for the GetTaddressTxids method. - #[doc = "Server streaming response type for the GetTaddressTxids method."] - type GetTaddressTxidsStream = std::pin::Pin>; - - /// This name is misleading, returns the full transactions that have either inputs or outputs connected to the given transparent address. - fn get_taddress_txids<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_taddress_txids."); - Box::pin(async move { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let chain_height = zebrad_client.get_blockchain_info().await?.blocks.0; - let block_filter = request.into_inner(); - let (start, end) = - match block_filter.range { - Some(range) => match (range.start, range.end) { - (Some(start), Some(end)) => { - let start = match u32::try_from(start.height) { - Ok(height) => height.min(chain_height), - Err(_) => return Err(tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - )), - }; - let end = - match u32::try_from(end.height) { - Ok(height) => height.min(chain_height), - Err(_) => return Err(tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - )), - }; - if start > end { - (end, start) - } else { - (start, end) - } - } - _ => { - return Err(tonic::Status::invalid_argument( - "Error: Incomplete block range given.", - )) - } - }, - None => { - return Err(tonic::Status::invalid_argument( - "Error: No block range given.", - )) - } - }; - let txids = zebrad_client - .get_address_txids(vec![block_filter.address], start, end) - .await - .map_err(|e| e.to_grpc_status())?; - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(120), async { - for txid in txids.transactions { - let transaction = zebrad_client.get_raw_transaction(txid, Some(1)).await; - match transaction { - Ok(GetTransactionResponse::Object { hex, height, .. }) => { - if channel_tx - .send(Ok(RawTransaction { - data: hex.as_ref().to_vec(), - height: height as u64, - })) - .await - .is_err() - { - break; - } - } - Ok(GetTransactionResponse::Raw(_)) => { - if channel_tx - .send(Err(tonic::Status::unknown( - "Received raw transaction type, this should not be impossible.", - ))) - .await - .is_err() - { - break; - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::internal( - "Error: get_taddress_txids gRPC request timed out", - ))) - .await - .ok(); - } - } - }); - let output_stream = RawTransactionStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Returns the total balance for a list of taddrs - fn get_taddress_balance<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_taddress_balance."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let taddrs = request.into_inner().addresses; - if !taddrs.iter().all(|taddr| check_taddress(taddr).is_some()) { - return Err(tonic::Status::invalid_argument( - "Error: One or more invalid taddresses given.", - )); - } - let balance = zebrad_client.get_address_balance(taddrs).await?; - let checked_balance: i64 = match i64::try_from(balance.balance) { - Ok(balance) => balance, - Err(_) => { - return Err(tonic::Status::unknown( - "Error: Error converting balance from u64 to i64.", - )); - } - }; - Ok(tonic::Response::new(Balance { - value_zat: checked_balance, - })) - }) - } - - /// Returns the total balance for a list of taddrs - #[must_use] - #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] - fn get_taddress_balance_stream<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request>, - ) -> ::core::pin::Pin< - Box< - dyn ::core::future::Future, tonic::Status>> - + ::core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_taddress_balance_stream."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let (channel_tx, mut channel_rx) = tokio::sync::mpsc::channel::(32); - let fetcher_task_handle = tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let fetcher_timeout = timeout(std::time::Duration::from_secs(120), async { - let mut total_balance: u64 = 0; - loop { - match channel_rx.recv().await { - Some(taddr) => { - if check_taddress(taddr.as_str()).is_some() { - let balance = - zebrad_client.get_address_balance(vec![taddr]).await?; - total_balance += balance.balance; - } else { - return Err(tonic::Status::invalid_argument( - "Error: One or more invalid taddresses given.", - )); - } - } - None => { - return Ok(total_balance); - } - } - } - }) - .await; - match fetcher_timeout { - Ok(result) => result, - Err(_) => Err(tonic::Status::deadline_exceeded( - "Error: get_taddress_balance_stream request timed out.", - )), - } - }); - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let addr_recv_timeout = timeout(std::time::Duration::from_secs(120), async { - let mut address_stream = request.into_inner(); - while let Some(address_result) = address_stream.next().await { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - let address = address_result.map_err(|e| { - tonic::Status::unknown(format!("Failed to read from stream: {}", e)) - })?; - if channel_tx.send(address.address).await.is_err() { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - return Err(tonic::Status::unknown( - "Error: Failed to send address to balance task.", - )); - } - } - drop(channel_tx); - Ok::<(), tonic::Status>(()) - }) - .await; - match addr_recv_timeout { - Ok(Ok(())) => {} - Ok(Err(e)) => { - fetcher_task_handle.abort(); - return Err(e); - } - Err(_) => { - fetcher_task_handle.abort(); - return Err(tonic::Status::deadline_exceeded( - "Error: get_taddress_balance_stream request timed out in address loop.", - )); - } - } - match fetcher_task_handle.await { - Ok(Ok(total_balance)) => { - let checked_balance: i64 = match i64::try_from(total_balance) { - Ok(balance) => balance, - Err(_) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - return Err(tonic::Status::unknown( - "Error: Error converting balance from u64 to i64.", - )); - } - }; - Ok(tonic::Response::new(Balance { - value_zat: checked_balance, - })) - } - Ok(Err(e)) => Err(e), - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - Err(e) => Err(tonic::Status::unknown(format!( - "Fetcher Task failed: {}", - e - ))), - } - }) - } - - /// Server streaming response type for the GetMempoolTx method. - #[doc = "Server streaming response type for the GetMempoolTx method."] - type GetMempoolTxStream = std::pin::Pin>; - - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. - /// - /// NOTE: This implementation is slow and should be re-implemented with the addition of the internal mempool and blockcache. - fn get_mempool_tx<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_mempool_tx."); - Box::pin(async { - let zebrad_uri = self.zebrad_uri.clone(); - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let exclude_txids: Vec = request - .into_inner() - .txid - .iter() - .map(|txid_bytes| { - let reversed_txid_bytes: Vec = txid_bytes.iter().cloned().rev().collect(); - hex::encode(&reversed_txid_bytes) - }) - .collect(); - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(480), async { - let mempool = Mempool::new(); - if let Err(e) = mempool.update(&zebrad_uri).await { - channel_tx.send(Err(tonic::Status::unknown(e.to_string()))) - .await - .ok(); - return; - } - match mempool.get_filtered_mempool_txids(exclude_txids).await { - Ok(mempool_txids) => { - for txid in mempool_txids { - match zebrad_client - .get_raw_transaction(txid.clone(), Some(0)) - .await { - Ok(GetTransactionResponse::Object { .. }) => { - if channel_tx - .send(Err(tonic::Status::internal( - "Error: Received transaction object type, this should not be impossible.", - ))) - .await - .is_err() - { - break; - } - - } - Ok(GetTransactionResponse::Raw(raw)) => { - let txid_bytes = match hex::decode(txid) { - Ok(bytes) => bytes, - Err(e) => { - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } else { - continue; - } - } - }; - match FullTransaction::parse_from_slice(raw.as_ref(), Some(vec!(txid_bytes)), None) { - Ok(transaction) => { - if !transaction.0.is_empty() { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown("Error: "))) - .await - .is_err() - { - break; - } - } else { - match transaction.1.to_compact(0) { - Ok(compact_tx) => { - if channel_tx - .send(Ok(compact_tx)) - .await - .is_err() - { - break; - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - } - } - } - }) - .await; - match timeout { - Ok(_) => { - } - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_mempool_stream gRPC request timed out", - ))) - .await - .ok(); - } - } - }); - let output_stream = CompactTransactionStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Server streaming response type for the GetMempoolStream method. - #[doc = "Server streaming response type for the GetMempoolStream method."] - type GetMempoolStreamStream = std::pin::Pin>; - - /// Return a stream of current Mempool transactions. This will keep the output stream open while - /// there are mempool transactions. It will close the returned stream when a new block is mined. - /// - /// TODO: This implementation is slow. Zingo-Indexer's blockcache state engine should keep its own internal mempool state. - /// - This RPC should query Zingo-Indexer's internal mempool state rather than creating its own mempool and directly querying zebrad. - fn get_mempool_stream<'life0, 'async_trait>( - &'life0 self, - _request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_mempool_stream."); - Box::pin(async { - let zebrad_uri = self.zebrad_uri.clone(); - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let mempool_height = zebrad_client.get_blockchain_info().await?.blocks.0; - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(480), async { - let mempool = Mempool::new(); - if let Err(e) = mempool.update(&zebrad_uri).await { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - channel_tx.send(Err(tonic::Status::unknown(e.to_string()))) - .await - .ok(); - return; - } - let mut mined = false; - let mut txid_index: usize = 0; - while !mined { - match mempool.get_mempool_txids().await { - Ok(mempool_txids) => { - for txid in &mempool_txids[txid_index..] { - match zebrad_client - .get_raw_transaction(txid.clone(), Some(1)) - .await { - Ok(GetTransactionResponse::Object { hex, height: _, .. }) => { - txid_index += 1; - if channel_tx - .send(Ok(RawTransaction { - data: hex.as_ref().to_vec(), - height: mempool_height as u64, - })) - .await - .is_err() - { - break; - } - } - Ok(GetTransactionResponse::Raw(_)) => { - if channel_tx - .send(Err(tonic::Status::internal( - "Error: Received raw transaction type, this should not be impossible.", - ))) - .await - .is_err() - { - break; - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } - } - } - tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; - mined = match mempool.update(&zebrad_uri).await { - Ok(mined) => mined, - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - channel_tx.send(Err(tonic::Status::unknown(e.to_string()))) - .await - .ok(); - break; - } - }; - } - }) - .await; - match timeout { - Ok(_) => { - } - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_mempool_stream gRPC request timed out", - ))) - .await - .ok(); - } - } - }); - let output_stream = RawTransactionStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// GetTreeState returns the note commitment tree state corresponding to the given block. - /// See section 3.7 of the Zcash protocol specification. It returns several other useful - /// values also (even though they can be obtained using GetBlock). - /// The block can be specified by either height or hash. - /// - /// TODO: This is slow. Chain, along with other blockchain info should be saved on startup and used here [blockcache?]. - fn get_tree_state<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_tree_state."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let chain_info = zebrad_client - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())?; - let block_id = request.into_inner(); - let hash_or_height = if block_id.height != 0 { - match u32::try_from(block_id.height) { - Ok(height) => { - if height >= chain_info.blocks.0 { - return Err(tonic::Status::out_of_range( - format!( - "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", - height, chain_info.blocks.0, - ) - )); - } else { - height.to_string() - } - } - Err(_) => { - return Err(tonic::Status::invalid_argument( - "Error: Height out of range. Failed to convert to u32.", - )); - } - } - } else { - hex::encode(block_id.hash) - }; - match zebrad_client.get_treestate(hash_or_height).await { - Ok(state) => Ok(tonic::Response::new(TreeState { - network: chain_info.chain, - height: state.height as u64, - hash: state.hash.to_string(), - time: state.time, - sapling_tree: state.sapling.inner().inner().clone(), - orchard_tree: state.orchard.inner().inner().clone(), - })), - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - Err(tonic::Status::unknown(format!( - "Error: Failed to retrieve treestate from node. Server Error: {}", - e, - ))) - } - } - }) - } - - /// GetLatestTreeState returns the note commitment tree state corresponding to the chain tip. - /// - /// TODO: This is slow. Chain, along with other blockchain info should be saved on startup and used here [blockcache?]. - fn get_latest_tree_state<'life0, 'async_trait>( - &'life0 self, - _request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_latest_tree_state."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let chain_info = zebrad_client - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())?; - match zebrad_client - .get_treestate(chain_info.blocks.0.to_string()) - .await - { - Ok(state) => Ok(tonic::Response::new(TreeState { - network: chain_info.chain, - height: state.height as u64, - hash: state.hash.to_string(), - time: state.time, - sapling_tree: state.sapling.inner().inner().clone(), - orchard_tree: state.orchard.inner().inner().clone(), - })), - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - Err(tonic::Status::unknown(format!( - "Error: Failed to retrieve treestate from node. Server Error: {}", - e, - ))) - } - } - }) - } - - /// Server streaming response type for the GetSubtreeRoots method. - #[doc = " Server streaming response type for the GetSubtreeRoots method."] - type GetSubtreeRootsStream = std::pin::Pin>; - - - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. - fn get_subtree_roots<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_subtree_roots."); - Box::pin(async move { - let zebrad_uri =self.zebrad_uri.clone(); - let zebrad_client = JsonRpcConnector::new( - zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let subtree_roots_args = request.into_inner(); - let pool = match ShieldedProtocol::try_from(subtree_roots_args.shielded_protocol) { - Ok(protocol) => protocol.as_str_name(), - Err(_) => return Err(tonic::Status::invalid_argument("Error: Invalid shielded protocol value.")), - }; - let start_index = match u16::try_from(subtree_roots_args.start_index) { - Ok(value) => value, - Err(_) => return Err(tonic::Status::invalid_argument("Error: start_index value exceeds u16 range.")), - }; - let limit = if subtree_roots_args.max_entries == 0 { - None - } else { - match u16::try_from(subtree_roots_args.max_entries) { - Ok(value) => Some(value), - Err(_) => return Err(tonic::Status::invalid_argument("Error: max_entries value exceeds u16 range.")), - } - }; - let subtrees = zebrad_client.get_subtrees_by_index(pool.to_string(), start_index, limit).await?; - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(120), async { - for subtree in subtrees.subtrees { - match zebrad_client.get_block(subtree.end_height.0.to_string(), Some(1)).await { - Ok(GetBlockResponse::Object { - hash, - confirmations: _, - height, - time: _, - tx: _, - trees: _, - }) => { - let checked_height = match height { - Some(h) => h.0 as u64, - None => { - match channel_tx - .send(Err(tonic::Status::unknown("Error: No block height returned by node."))) - .await - { - Ok(_) => break, - Err(e) => { - eprintln!("Error: Channel closed unexpectedly: {}", e); - break; - } - } - } - }; - let checked_root_hash = match hex::decode(&subtree.root) { - Ok(hash) => hash, - Err(e) => { - match channel_tx - .send(Err(tonic::Status::unknown(format!("Error: Failed to hex decode root hash: {}.", - e - )))) - .await - { - Ok(_) => break, - Err(e) => { - eprintln!("Error: Channel closed unexpectedly: {}", e); - break; - } - } - } - }; - if channel_tx.send( - Ok(SubtreeRoot { - root_hash: checked_root_hash, - completing_block_hash: hash.0.bytes_in_display_order().to_vec(), - completing_block_height: checked_height, - })).await.is_err() - { - break; - } - - } - Ok(GetBlockResponse::Raw(_)) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown("Error: Received raw block type, this should not be possible."))) - .await - .is_err() - { - break; - } - } - Err(e) => { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(format!("Error: Could not fetch block at height [{}] from node: {}", - subtree.end_height.0, - e - )))) - .await - .is_err() - { - break; - } - } - } - } - }) - .await; - match timeout { - Ok(_) => { - } - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_mempool_stream gRPC request timed out", - ))) - .await - .ok(); - } - } - }); - let output_stream = SubtreeRootReplyStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Returns all unspent outputs for a list of addresses. - /// - /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. - /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. - /// Utxos are collected and returned as a single Vec. - fn get_address_utxos<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_address_utxos."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let addr_args = request.into_inner(); - if !addr_args - .addresses - .iter() - .all(|taddr| check_taddress(taddr).is_some()) - { - return Err(tonic::Status::invalid_argument( - "Error: One or more invalid taddresses given.", - )); - } - let utxos = zebrad_client.get_address_utxos(addr_args.addresses).await?; - let mut address_utxos: Vec = Vec::new(); - let mut entries: u32 = 0; - for utxo in utxos { - if (utxo.height.0 as u64) < addr_args.start_height { - continue; - } - entries += 1; - if addr_args.max_entries > 0 && entries > addr_args.max_entries { - break; - } - let checked_index = match i32::try_from(utxo.output_index) { - Ok(index) => index, - Err(_) => { - return Err(tonic::Status::unknown( - "Error: Index out of range. Failed to convert to i32.", - )); - } - }; - let checked_satoshis = match i64::try_from(utxo.satoshis) { - Ok(satoshis) => satoshis, - Err(_) => { - return Err(tonic::Status::unknown( - "Error: Satoshis out of range. Failed to convert to i64.", - )); - } - }; - let utxo_reply = GetAddressUtxosReply { - address: utxo.address.to_string(), - txid: utxo.txid.0.to_vec(), - index: checked_index, - script: utxo.script.as_ref().to_vec(), - value_zat: checked_satoshis, - height: utxo.height.0 as u64, - }; - address_utxos.push(utxo_reply) - } - Ok(tonic::Response::new(GetAddressUtxosReplyList { - address_utxos, - })) - }) - } - - /// Server streaming response type for the GetAddressUtxosStream method. - #[doc = "Server streaming response type for the GetAddressUtxosStream method."] - type GetAddressUtxosStreamStream = std::pin::Pin>; - - /// Returns all unspent outputs for a list of addresses. - /// - /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. - /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. - /// Utxos are returned in a stream. - fn get_address_utxos_stream<'life0, 'async_trait>( - &'life0 self, - request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result< - tonic::Response, - tonic::Status, - >, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_address_utxos_stream."); - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - let addr_args = request.into_inner(); - if !addr_args - .addresses - .iter() - .all(|taddr| check_taddress(taddr).is_some()) - { - return Err(tonic::Status::invalid_argument( - "Error: One or more invalid taddresses given.", - )); - } - let utxos = zebrad_client.get_address_utxos(addr_args.addresses).await?; - let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); - tokio::spawn(async move { - // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. - // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] - let timeout = timeout(std::time::Duration::from_secs(120), async { - let mut entries: u32 = 0; - for utxo in utxos { - if (utxo.height.0 as u64) < addr_args.start_height { - continue; - } - entries += 1; - if addr_args.max_entries > 0 && entries > addr_args.max_entries { - break; - } - let checked_index = match i32::try_from(utxo.output_index) { - Ok(index) => index, - Err(_) => { - let _ = channel_tx - .send(Err(tonic::Status::unknown( - "Error: Index out of range. Failed to convert to i32.", - ))) - .await; - return; - } - }; - let checked_satoshis = match i64::try_from(utxo.satoshis) { - Ok(satoshis) => satoshis, - Err(_) => { - let _ = channel_tx - .send(Err(tonic::Status::unknown( - "Error: Satoshis out of range. Failed to convert to i64.", - ))) - .await; - return; - } - }; - let utxo_reply = GetAddressUtxosReply { - address: utxo.address.to_string(), - txid: utxo.txid.0.to_vec(), - index: checked_index, - script: utxo.script.as_ref().to_vec(), - value_zat: checked_satoshis, - height: utxo.height.0 as u64, - }; - if channel_tx.send(Ok(utxo_reply)).await.is_err() { - return; - } - } - }) - .await; - match timeout { - Ok(_) => { - } - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_mempool_stream gRPC request timed out", - ))) - .await - .ok(); - } - } - }); - let output_stream = UtxoReplyStream::new(channel_rx); - let stream_boxed = Box::pin(output_stream); - Ok(tonic::Response::new(stream_boxed)) - }) - } - - /// Return information about this lightwalletd instance and the blockchain - fn get_lightd_info<'life0, 'async_trait>( - &'life0 self, - _request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of get_lightd_info."); - // TODO: Add user and password as fields of GrpcClient and use here. - Box::pin(async { - let zebrad_client = JsonRpcConnector::new( - self.zebrad_uri.clone(), - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await?; - - let zebra_info = zebrad_client - .get_info() - .await - .map_err(|e| e.to_grpc_status())?; - let blockchain_info = zebrad_client - .get_blockchain_info() - .await - .map_err(|e| e.to_grpc_status())?; - let build_info = get_build_info(); - - let sapling_id = zebra_rpc::methods::ConsensusBranchIdHex::new( - zebra_chain::parameters::ConsensusBranchId::from_hex("76b809bb") - .map_err(|_e| { - tonic::Status::internal( - "Internal Error - Consesnsus Branch ID hex conversion failed", - ) - })? - .into(), - ); - let sapling_activation_height = blockchain_info - .upgrades - .get(&sapling_id) - .map_or(zebra_chain::block::Height(1), |sapling_json| { - sapling_json.into_parts().1 - }); - - let consensus_branch_id = zebra_chain::parameters::ConsensusBranchId::from( - blockchain_info.consensus.into_parts().0, - ) - .to_string(); - - Ok(tonic::Response::new(LightdInfo { - version: build_info.version, - vendor: "ZingoLabs ZainoD".to_string(), - taddr_support: true, - chain_name: blockchain_info.chain, - sapling_activation_height: sapling_activation_height.0 as u64, - consensus_branch_id, - block_height: blockchain_info.blocks.0 as u64, - git_commit: build_info.commit_hash, - branch: build_info.branch, - build_date: build_info.build_date, - build_user: build_info.build_user, - estimated_height: blockchain_info.estimated_height.0 as u64, - zcashd_build: zebra_info.build, - zcashd_subversion: zebra_info.subversion, - })) - }) - } - - /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) [from zebrad] - /// This RPC has not been implemented as it is not currently used by zingolib. - /// If you require this RPC please open an issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer). - fn ping<'life0, 'async_trait>( - &'life0 self, - _request: tonic::Request, - ) -> core::pin::Pin< - Box< - dyn core::future::Future< - Output = std::result::Result, tonic::Status>, - > + core::marker::Send - + 'async_trait, - >, - > - where - 'life0: 'async_trait, - Self: 'async_trait, - { - println!("[TEST] Received call of ping."); - Box::pin(async { - Err(tonic::Status::unimplemented("ping not yet implemented. If you require this RPC please open an issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer).")) - }) - } -} diff --git a/zaino-serve/src/server.rs b/zaino-serve/src/server.rs index 9d7ab47d9..1631c2714 100644 --- a/zaino-serve/src/server.rs +++ b/zaino-serve/src/server.rs @@ -1,88 +1,6 @@ -//! Zaino's gRPC server implementation. +//! Zaino's RPC Server implementation. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - -pub mod director; +pub mod config; pub mod error; -pub(crate) mod ingestor; -pub(crate) mod queue; -pub mod request; -pub(crate) mod worker; - -/// Holds a thread safe reperesentation of a StatusType. -/// Possible values: -/// - [0: Spawning] -/// - [1: Listening] -/// - [2: Working] -/// - [3: Inactive] -/// - [4: Closing]. -/// - [>=5: Offline]. -/// - [>=6: Error]. -/// TODO: Define error code spec. -#[derive(Debug, Clone)] -pub struct AtomicStatus(Arc); - -impl AtomicStatus { - /// Creates a new AtomicStatus - pub fn new(status: u16) -> Self { - Self(Arc::new(AtomicUsize::new(status as usize))) - } - - /// Loads the value held in the AtomicStatus - pub fn load(&self) -> usize { - self.0.load(Ordering::SeqCst) - } - - /// Sets the value held in the AtomicStatus - pub fn store(&self, status: usize) { - self.0.store(status, Ordering::SeqCst); - } -} - -/// Status of the server's components. -#[derive(Debug, PartialEq, Clone)] -pub enum StatusType { - /// Running initial startup routine. - Spawning = 0, - /// Waiting for requests from the queue. - Listening = 1, - /// Processing requests from the queue.StatusType - Working = 2, - /// On hold, due to blockcache / node error. - Inactive = 3, - /// Running shutdown routine. - Closing = 4, - /// Offline. - Offline = 5, - /// Offline. - Error = 6, -} - -impl From for StatusType { - fn from(value: usize) -> Self { - match value { - 0 => StatusType::Spawning, - 1 => StatusType::Listening, - 2 => StatusType::Working, - 3 => StatusType::Inactive, - 4 => StatusType::Closing, - 5 => StatusType::Offline, - _ => StatusType::Error, - } - } -} - -impl From for usize { - fn from(status: StatusType) -> Self { - status as usize - } -} - -impl From for StatusType { - fn from(status: AtomicStatus) -> Self { - status.load().into() - } -} +pub mod grpc; +pub mod jsonrpc; diff --git a/zaino-serve/src/server/config.rs b/zaino-serve/src/server/config.rs new file mode 100644 index 000000000..31661dd99 --- /dev/null +++ b/zaino-serve/src/server/config.rs @@ -0,0 +1,81 @@ +//! Server configuration data. + +use std::{net::SocketAddr, path::PathBuf}; + +use tonic::transport::{Identity, ServerTlsConfig}; + +use super::error::ServerError; + +/// Settings for a Zaino configured with gRPC TLS: paths to key and certificate. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct GrpcTls { + /// Path to the TLS certificate file in PEM format. + pub cert_path: PathBuf, + /// Path to the TLS private key file in PEM format. + pub key_path: PathBuf, +} + +/// Configuration data for Zaino's gRPC server. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct GrpcServerConfig { + /// gRPC server bind addr. + #[serde(alias = "grpc_listen_address")] + pub listen_address: SocketAddr, + /// Enables TLS. + pub tls: Option, +} + +impl GrpcServerConfig { + /// If TLS is enabled, reads the certificate and key files and returns a valid + /// `ServerTlsConfig`. If TLS is not enabled, returns `Ok(None)`. + pub async fn get_valid_tls(&self) -> Result, ServerError> { + match self.tls.clone() { + Some(tls) => { + if !tls.cert_path.exists() { + return Err(ServerError::ServerConfigError( + "TLS enabled but tls_cert_path does not exist".into(), + )); + } + let cert_path = tls.cert_path; + + if !tls.key_path.exists() { + return Err(ServerError::ServerConfigError( + "TLS enabled but tls_key_path does not exist".into(), + )); + } + let key_path = tls.key_path; + let cert = tokio::fs::read(cert_path).await.map_err(|e| { + ServerError::ServerConfigError(format!("Failed to read TLS certificate: {e}")) + })?; + let key = tokio::fs::read(key_path).await.map_err(|e| { + ServerError::ServerConfigError(format!("Failed to read TLS key: {e}")) + })?; + let tls_id = Identity::from_pem(cert, key); + let tls_config = ServerTlsConfig::new().identity(tls_id); + Ok(Some(tls_config)) + } + None => Ok(None), + } + } +} + +/// Configuration data for Zaino's JSON RPC server, capable of servicing clients over TCP. +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct JsonRpcServerConfig { + /// Server bind addr. + pub json_rpc_listen_address: SocketAddr, + + /// Enable cookie-based authentication with a valid `Some()` value: Directory to store authentication cookie file. + /// An empty PathBuf that is still Some will have an emphemeral path assigned to it when zaino loads the config. + #[serde(default)] + pub cookie_dir: Option, +} + +impl Default for JsonRpcServerConfig { + fn default() -> Self { + Self { + json_rpc_listen_address: "127.0.0.1:0".parse().unwrap(), + cookie_dir: None, + } + } +} diff --git a/zaino-serve/src/server/director.rs b/zaino-serve/src/server/director.rs deleted file mode 100644 index 6f5ccc07c..000000000 --- a/zaino-serve/src/server/director.rs +++ /dev/null @@ -1,249 +0,0 @@ -//! Zingo-Indexer gRPC server. - -use http::Uri; -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, - }, -}; - -use crate::server::{ - error::{IngestorError, ServerError, WorkerError}, - ingestor::TcpIngestor, - queue::Queue, - request::ZingoIndexerRequest, - worker::{WorkerPool, WorkerPoolStatus}, - AtomicStatus, StatusType, -}; - -/// Holds the status of the server and all its components. -#[derive(Debug, Clone)] -pub struct ServerStatus { - /// Status of the Server. - pub server_status: AtomicStatus, - tcp_ingestor_status: AtomicStatus, - workerpool_status: WorkerPoolStatus, - request_queue_status: Arc, -} - -impl ServerStatus { - /// Creates a ServerStatus. - pub fn new(max_workers: u16) -> Self { - ServerStatus { - server_status: AtomicStatus::new(5), - tcp_ingestor_status: AtomicStatus::new(5), - workerpool_status: WorkerPoolStatus::new(max_workers), - request_queue_status: Arc::new(AtomicUsize::new(0)), - } - } - - /// Returns the ServerStatus. - pub fn load(&self) -> ServerStatus { - self.server_status.load(); - self.tcp_ingestor_status.load(); - self.workerpool_status.load(); - self.request_queue_status.load(Ordering::SeqCst); - self.clone() - } -} - -/// LightWallet server capable of servicing clients over TCP. -pub struct Server { - /// Listens for incoming gRPC requests over HTTP. - tcp_ingestor: Option, - /// Dynamically sized pool of workers. - worker_pool: WorkerPool, - /// Request queue. - request_queue: Queue, - /// Servers current status. - status: ServerStatus, - /// Represents the Online status of the Server. - pub online: Arc, -} - -impl Server { - /// Spawns a new Server. - #[allow(clippy::too_many_arguments)] - pub async fn spawn( - tcp_active: bool, - tcp_ingestor_listen_addr: Option, - lightwalletd_uri: Uri, - zebrad_uri: Uri, - max_queue_size: u16, - max_worker_pool_size: u16, - idle_worker_pool_size: u16, - status: ServerStatus, - online: Arc, - ) -> Result { - if !tcp_active { - return Err(ServerError::ServerConfigError( - "Cannot start server with no ingestors selected.".to_string(), - )); - } - if tcp_active && tcp_ingestor_listen_addr.is_none() { - return Err(ServerError::ServerConfigError( - "TCP is active but no address provided.".to_string(), - )); - } - println!("Launching Server!\n"); - status.server_status.store(0); - let request_queue: Queue = - Queue::new(max_queue_size as usize, status.request_queue_status.clone()); - status.request_queue_status.store(0, Ordering::SeqCst); - let tcp_ingestor = if tcp_active { - println!("Launching TcpIngestor.."); - Some( - TcpIngestor::spawn( - tcp_ingestor_listen_addr - .expect("tcp_ingestor_listen_addr returned none when used."), - request_queue.tx().clone(), - status.tcp_ingestor_status.clone(), - online.clone(), - ) - .await?, - ) - } else { - None - }; - println!("Launching WorkerPool.."); - let worker_pool = WorkerPool::spawn( - max_worker_pool_size, - idle_worker_pool_size, - request_queue.rx().clone(), - request_queue.tx().clone(), - lightwalletd_uri, - zebrad_uri, - status.workerpool_status.clone(), - online.clone(), - ) - .await; - Ok(Server { - tcp_ingestor, - worker_pool, - request_queue, - status: status.clone(), - online, - }) - } - - /// Starts the gRPC service. - /// - /// Launches all components then enters command loop: - /// - Checks request queue and workerpool to spawn / despawn workers as required. - /// - Updates the ServerStatus. - /// - Checks for shutdown signal, shutting down server if received. - pub async fn serve(mut self) -> tokio::task::JoinHandle> { - tokio::task::spawn(async move { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - let mut tcp_ingestor_handle = None; - let mut worker_handles; - if let Some(ingestor) = self.tcp_ingestor.take() { - tcp_ingestor_handle = Some(ingestor.serve().await); - } - worker_handles = self.worker_pool.clone().serve().await; - self.status.server_status.store(1); - loop { - if self.request_queue.queue_length() >= (self.request_queue.max_length() / 4) - && (self.worker_pool.workers() < self.worker_pool.max_size() as usize) - { - match self.worker_pool.push_worker().await { - Ok(handle) => { - worker_handles.push(handle); - } - Err(_e) => { - eprintln!("WorkerPool at capacity"); - } - } - } else if (self.request_queue.queue_length() <= 1) - && (self.worker_pool.workers() > self.worker_pool.idle_size() as usize) - { - let worker_index = self.worker_pool.workers() - 1; - let worker_handle = worker_handles.remove(worker_index); - match self.worker_pool.pop_worker(worker_handle).await { - Ok(_) => {} - Err(e) => { - eprintln!("Failed to pop worker from pool: {}", e); - // TODO: Handle this error. - } - } - } - self.statuses(); - // TODO: Implement check_statuses() and run here. - if self.check_for_shutdown().await { - self.status.server_status.store(4); - let worker_handle_options: Vec< - Option>>, - > = worker_handles.into_iter().map(Some).collect(); - self.shutdown_components(tcp_ingestor_handle, worker_handle_options) - .await; - self.status.server_status.store(5); - return Ok(()); - } - interval.tick().await; - } - }) - } - - /// Checks indexers online status and servers internal status for closure signal. - pub async fn check_for_shutdown(&self) -> bool { - if self.status() >= 4 { - return true; - } - if !self.check_online() { - return true; - } - false - } - - /// Sets the servers to close gracefully. - pub async fn shutdown(&mut self) { - self.status.server_status.store(4) - } - - /// Sets the server's components to close gracefully. - async fn shutdown_components( - &mut self, - tcp_ingestor_handle: Option>>, - mut worker_handles: Vec>>>, - ) { - if let Some(handle) = tcp_ingestor_handle { - self.status.tcp_ingestor_status.store(4); - handle.await.ok(); - } - self.worker_pool.shutdown(&mut worker_handles).await; - } - - /// Returns the servers current status usize. - pub fn status(&self) -> usize { - self.status.server_status.load() - } - - /// Returns the servers current statustype. - pub fn statustype(&self) -> StatusType { - StatusType::from(self.status()) - } - - /// Updates and returns the status of the server and its parts. - pub fn statuses(&mut self) -> ServerStatus { - self.status.server_status.load(); - self.status.tcp_ingestor_status.load(); - self.status - .request_queue_status - .store(self.request_queue.queue_length(), Ordering::SeqCst); - self.worker_pool.status(); - self.status.clone() - } - - /// Checks statuses, handling errors. - pub async fn check_statuses(&mut self) { - todo!() - } - - /// Check the online status on the indexer. - fn check_online(&self) -> bool { - self.online.load(Ordering::SeqCst) - } -} diff --git a/zaino-serve/src/server/error.rs b/zaino-serve/src/server/error.rs index 9ca6d999d..f79383c8b 100644 --- a/zaino-serve/src/server/error.rs +++ b/zaino-serve/src/server/error.rs @@ -1,79 +1,13 @@ //! Hold error types for the server and related functionality. -use std::io; -use tokio::sync::mpsc::error::TrySendError; - -use crate::server::request::ZingoIndexerRequest; - -/// Zingo-Indexer queue errors. -#[derive(Debug, thiserror::Error)] -pub enum QueueError { - /// Returned when a requests is pushed to a full queue. - #[error("Queue Full")] - QueueFull(T), - /// Returned when a worker or dispatcher tries to receive from an empty queue. - #[error("Queue Empty")] - QueueEmpty, - /// Returned when a worker or dispatcher tries to receive from a closed queue. - #[error("Queue Disconnected")] - QueueClosed, -} - -/// Zingo-Indexer request errors. -#[derive(Debug, thiserror::Error)] -pub enum RequestError { - /// Errors originating from incorrect enum types being called. - #[error("Incorrect variant")] - IncorrectVariant, - /// System time errors. - #[error("System time error: {0}")] - SystemTimeError(#[from] std::time::SystemTimeError), -} - -/// Zingo-Indexer ingestor errors. -#[derive(Debug, thiserror::Error)] -pub enum IngestorError { - /// Request based errors. - #[error("Request error: {0}")] - RequestError(#[from] RequestError), - /// Tcp listener based error. - #[error("Failed to accept TcpStream: {0}")] - ClientConnectionError(#[from] io::Error), - /// Error from failing to send new request to the queue. - #[error("Failed to send request to the queue: {0}")] - QueuePushError(#[from] TrySendError), -} - -/// Zingo-Indexer worker errors. -#[derive(Debug, thiserror::Error)] -pub enum WorkerError { - /// Tonic transport error. - #[error("Tonic transport error: {0}")] - TonicTransportError(#[from] tonic::transport::Error), - /// Tokio join error. - #[error("Tokio join error: {0}")] - TokioJoinError(#[from] tokio::task::JoinError), - /// Worker Pool Full. - #[error("Worker Pool Full")] - WorkerPoolFull, - /// Worker Pool at idle. - #[error("Worker Pool a idle")] - WorkerPoolIdle, -} - /// Zingo-Indexer server errors. #[derive(Debug, thiserror::Error)] pub enum ServerError { - /// Request based errors. - #[error("Request error: {0}")] - RequestError(#[from] RequestError), - /// Ingestor based errors. - #[error("Ingestor error: {0}")] - IngestorError(#[from] IngestorError), - /// Worker based errors. - #[error("Worker error: {0}")] - WorkerError(#[from] WorkerError), /// Server configuration errors. #[error("Server configuration error: {0}")] ServerConfigError(String), + + /// Errors returned by Tonic's transport layer. + #[error("Tonic transport error: {0}")] + TonicTransportError(#[from] tonic::transport::Error), } diff --git a/zaino-serve/src/server/grpc.rs b/zaino-serve/src/server/grpc.rs new file mode 100644 index 000000000..fb6ec0559 --- /dev/null +++ b/zaino-serve/src/server/grpc.rs @@ -0,0 +1,97 @@ +//! Zaino's gRPC Server Implementation. + +use std::time::Duration; + +use tokio::time::interval; +use tonic::transport::Server; +use tracing::warn; +use zaino_proto::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; +use zaino_state::{AtomicStatus, IndexerSubscriber, LightWalletIndexer, StatusType, ZcashIndexer}; + +use crate::{ + rpc::GrpcClient, + server::{config::GrpcServerConfig, error::ServerError}, +}; + +/// LightWallet gRPC server capable of servicing clients over TCP. +pub struct TonicServer { + /// Current status of the server. + pub status: AtomicStatus, + /// JoinHandle for the servers `serve` task. + pub server_handle: Option>>, +} + +impl TonicServer { + /// Starts the gRPC service. + /// + /// Launches all components then enters command loop: + /// - Updates the ServerStatus. + /// - Checks for shutdown signal, shutting down server if received. + pub async fn spawn( + service_subscriber: IndexerSubscriber, + server_config: GrpcServerConfig, + ) -> Result { + let status = AtomicStatus::new(StatusType::Spawning); + + let svc = CompactTxStreamerServer::new(GrpcClient { + service_subscriber: service_subscriber.clone(), + }); + + let mut server_builder = Server::builder(); + if let Some(tls_config) = server_config.get_valid_tls().await? { + server_builder = server_builder.tls_config(tls_config).map_err(|e| { + ServerError::ServerConfigError(format!("TLS configuration error: {e}")) + })?; + } + + let shutdown_check_status = status.clone(); + let mut shutdown_check_interval = interval(Duration::from_millis(100)); + let shutdown_signal = async move { + loop { + shutdown_check_interval.tick().await; + if shutdown_check_status.load() == StatusType::Closing { + break; + } + } + }; + let server_future = server_builder + .add_service(svc) + .serve_with_shutdown(server_config.listen_address, shutdown_signal); + + let task_status = status.clone(); + let server_handle = tokio::task::spawn(async move { + task_status.store(StatusType::Ready); + server_future.await?; + task_status.store(StatusType::Offline); + Ok(()) + }); + + Ok(TonicServer { + status, + server_handle: Some(server_handle), + }) + } + + /// Sets the servers to close gracefully. + pub async fn close(&mut self) { + self.status.store(StatusType::Closing); + + if let Some(handle) = self.server_handle.take() { + let _ = handle.await; + } + } + + /// Returns the servers current status. + pub fn status(&self) -> StatusType { + self.status.load() + } +} + +impl Drop for TonicServer { + fn drop(&mut self) { + if let Some(handle) = self.server_handle.take() { + handle.abort(); + warn!("Warning: TonicServer dropped without explicit shutdown. Aborting server task."); + } + } +} diff --git a/zaino-serve/src/server/ingestor.rs b/zaino-serve/src/server/ingestor.rs deleted file mode 100644 index cfec2cb67..000000000 --- a/zaino-serve/src/server/ingestor.rs +++ /dev/null @@ -1,127 +0,0 @@ -//! Holds the server ingestor (listener) implementations. - -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; -use tokio::net::TcpListener; - -use crate::server::{ - error::{IngestorError, QueueError}, - queue::QueueSender, - request::ZingoIndexerRequest, - AtomicStatus, StatusType, -}; - -/// Listens for incoming gRPC requests over HTTP. -pub(crate) struct TcpIngestor { - /// Tcp Listener. - ingestor: TcpListener, - /// Used to send requests to the queue. - queue: QueueSender, - /// Current status of the ingestor. - status: AtomicStatus, - /// Represents the Online status of the gRPC server. - online: Arc, -} - -impl TcpIngestor { - /// Creates a Tcp Ingestor. - pub(crate) async fn spawn( - listen_addr: SocketAddr, - queue: QueueSender, - status: AtomicStatus, - online: Arc, - ) -> Result { - status.store(0); - let listener = TcpListener::bind(listen_addr).await?; - println!("TcpIngestor listening at: {}.", listen_addr); - Ok(TcpIngestor { - ingestor: listener, - queue, - online, - status, - }) - } - - /// Starts Tcp service. - pub(crate) async fn serve(self) -> tokio::task::JoinHandle> { - tokio::task::spawn(async move { - // NOTE: This interval may need to be changed or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // TODO Check blockcache sync status and wait on server / node if on hold. - self.status.store(1); - loop { - tokio::select! { - _ = interval.tick() => { - if self.check_for_shutdown().await { - self.status.store(5); - return Ok(()); - } - } - incoming = self.ingestor.accept() => { - // NOTE: This may need to be removed / moved for scale use. - if self.check_for_shutdown().await { - self.status.store(5); - return Ok(()); - } - match incoming { - Ok((stream, _)) => { - match self.queue.try_send(ZingoIndexerRequest::new_from_grpc(stream)) { - Ok(_) => { - println!("[TEST] Requests in Queue: {}", self.queue.queue_length()); - } - Err(QueueError::QueueFull(_request)) => { - eprintln!("Queue Full."); - // TODO: Return queue full tonic status over tcpstream and close (that TcpStream..). - } - Err(e) => { - eprintln!("Queue Closed. Failed to send request to queue: {}", e); - // TODO: Handle queue closed error here. - } - } - } - Err(e) => { - eprintln!("Failed to accept connection with client: {}", e); - // TODO: Handle failed connection errors here (count errors and restart ingestor / proxy or initiate shotdown?) - } - } - } - } - } - }) - } - - /// Checks indexers online status and ingestors internal status for closure signal. - pub(crate) async fn check_for_shutdown(&self) -> bool { - if self.status() >= 4 { - return true; - } - if !self.check_online() { - return true; - } - false - } - - /// Sets the ingestor to close gracefully. - pub(crate) async fn _shutdown(&mut self) { - self.status.store(4) - } - - /// Returns the ingestor current status usize. - pub(crate) fn status(&self) -> usize { - self.status.load() - } - - /// Returns the ingestor current statustype. - pub(crate) fn _statustype(&self) -> StatusType { - StatusType::from(self.status()) - } - - fn check_online(&self) -> bool { - self.online.load(Ordering::SeqCst) - } -} diff --git a/zaino-serve/src/server/jsonrpc.rs b/zaino-serve/src/server/jsonrpc.rs new file mode 100644 index 000000000..5ae65d076 --- /dev/null +++ b/zaino-serve/src/server/jsonrpc.rs @@ -0,0 +1,150 @@ +//! Zaino's JsonRPC Server Implementation. + +use crate::{ + rpc::{jsonrpc::service::ZcashIndexerRpcServer as _, JsonRpcClient}, + server::{config::JsonRpcServerConfig, error::ServerError}, +}; + +use zaino_state::{AtomicStatus, IndexerSubscriber, LightWalletIndexer, StatusType, ZcashIndexer}; + +use zebra_rpc::server::{ + cookie::{remove_from_disk, write_to_disk, Cookie}, + http_request_compatibility::HttpRequestMiddlewareLayer, + rpc_call_compatibility::FixRpcResponseMiddleware, +}; + +use jsonrpsee::server::{RpcServiceBuilder, ServerBuilder}; +use std::{path::PathBuf, time::Duration}; +use tokio::time::interval; +use tracing::warn; + +/// JSON-RPC server capable of servicing clients over TCP. +pub struct JsonRpcServer { + /// Current status of the server. + pub status: AtomicStatus, + /// JoinHandle for the servers `serve` task. + pub server_handle: Option>>, + /// Cookie dir. + cookie_dir: Option, +} + +impl JsonRpcServer { + /// Starts the JSON-RPC service. + /// + /// Launches all components then enters command loop: + /// - Updates the ServerStatus. + /// - Checks for shutdown signal, shutting down server if received. + pub async fn spawn( + service_subscriber: IndexerSubscriber, + server_config: JsonRpcServerConfig, + ) -> Result { + let status = AtomicStatus::new(StatusType::Spawning); + + let rpc_impl = JsonRpcClient { + service_subscriber: service_subscriber.clone(), + }; + + // Initialize Zebra-compatible cookie-based authentication if enabled. + let (cookie, cookie_dir) = if server_config.cookie_dir.is_some() { + let cookie = Cookie::default(); + if let Some(dir) = &server_config.cookie_dir { + write_to_disk(&cookie, dir).map_err(|e| { + ServerError::ServerConfigError(format!("Failed to write cookie: {e}")) + })?; + } else { + return Err(ServerError::ServerConfigError( + "Cookie dir must be provided when auth is enabled".into(), + )); + } + (Some(cookie), server_config.cookie_dir) + } else { + (None, None) + }; + + // Set up Zebra HTTP request compatibility middleware (handles auth and content-type issues) + let http_middleware_layer = HttpRequestMiddlewareLayer::new(cookie); + + // Set up Zebra JSON-RPC call compatibility middleware (RPC version fixes) + let rpc_middleware = RpcServiceBuilder::new() + .rpc_logger(1024) + .layer_fn(FixRpcResponseMiddleware::new); + + // Build the JSON-RPC server with middleware integrated + let server = ServerBuilder::default() + .set_http_middleware(tower::ServiceBuilder::new().layer(http_middleware_layer)) + .set_rpc_middleware(rpc_middleware) + .build(server_config.json_rpc_listen_address) + .await + .map_err(|e| { + ServerError::ServerConfigError(format!("JSON-RPC server build error: {e}")) + })?; + + let server_handle = server.start(rpc_impl.into_rpc()); + + let shutdown_check_status = status.clone(); + let mut shutdown_check_interval = interval(Duration::from_millis(100)); + let shutdown_signal = async move { + loop { + shutdown_check_interval.tick().await; + if shutdown_check_status.load() == StatusType::Closing { + break; + } + } + }; + + let task_status = status.clone(); + let server_task_handle = tokio::task::spawn({ + let server_handle_clone = server_handle.clone(); + async move { + task_status.store(StatusType::Ready); + + tokio::select! { + _ = shutdown_signal => { + let _ = server_handle_clone.stop(); + } + _ = server_handle.stopped() => {}, + } + + task_status.store(StatusType::Offline); + Ok(()) + } + }); + + Ok(JsonRpcServer { + status, + server_handle: Some(server_task_handle), + cookie_dir, + }) + } + + /// Sets the servers to close gracefully. + pub async fn close(&mut self) { + self.status.store(StatusType::Closing); + + if let Some(dir) = &self.cookie_dir { + if let Err(e) = remove_from_disk(dir) { + warn!("Error removing cookie: {e}"); + } + } + + if let Some(handle) = self.server_handle.take() { + let _ = handle.await; + } + } + + /// Returns the servers current status. + pub fn status(&self) -> StatusType { + self.status.load() + } +} + +impl Drop for JsonRpcServer { + fn drop(&mut self) { + if let Some(handle) = self.server_handle.take() { + handle.abort(); + warn!( + "Warning: JsonRpcServer dropped without explicit shutdown. Aborting server task." + ); + } + } +} diff --git a/zaino-serve/src/server/queue.rs b/zaino-serve/src/server/queue.rs deleted file mode 100644 index 3d7755014..000000000 --- a/zaino-serve/src/server/queue.rs +++ /dev/null @@ -1,155 +0,0 @@ -//! Zingo-Indexer queue implementation. - -use crate::server::error::QueueError; -use crossbeam_channel::{bounded, Receiver, Sender}; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - -/// Queue with max length. -#[derive(Debug, Clone)] -pub(crate) struct Queue { - /// Max number of messages allowed in the queue. - max_length: usize, - /// Used to track current messages in the queue. - queue_status: Arc, - /// Queue sender. - queue_tx: QueueSender, - /// Queue receiver. - queue_rx: QueueReceiver, -} - -impl Queue { - /// Creates a new queue with a maximum size. - pub(crate) fn new(max_length: usize, queue_status: Arc) -> Self { - let (queue_tx, queue_rx) = bounded(max_length); - queue_status.store(0, Ordering::SeqCst); - Queue { - max_length, - queue_status: queue_status.clone(), - queue_tx: QueueSender { - inner: queue_tx, - queue_status: queue_status.clone(), - }, - queue_rx: QueueReceiver { - inner: queue_rx, - queue_status, - }, - } - } - - /// Returns a queue transmitter. - pub(crate) fn tx(&self) -> QueueSender { - self.queue_tx.clone() - } - - /// Returns a queue receiver. - pub(crate) fn rx(&self) -> QueueReceiver { - self.queue_rx.clone() - } - - /// Returns the max length of the queue. - pub(crate) fn max_length(&self) -> usize { - self.max_length - } - - /// Returns the current length of the queue. - pub(crate) fn queue_length(&self) -> usize { - self.queue_status.load(Ordering::SeqCst) - } -} - -/// Sends messages to a queue. -#[derive(Debug)] -pub(crate) struct QueueSender { - /// Crossbeam_Channel Sender. - inner: Sender, - /// Used to track current messages in the queue. - queue_status: Arc, -} - -impl Clone for QueueSender { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - queue_status: Arc::clone(&self.queue_status), - } - } -} - -impl QueueSender { - /// Tries to add a request to the queue, updating the queue size. - pub(crate) fn try_send(&self, message: T) -> Result<(), QueueError> { - match self.inner.try_send(message) { - Ok(_) => { - self.queue_status.fetch_add(1, Ordering::SeqCst); - Ok(()) - } - Err(crossbeam_channel::TrySendError::Full(t)) => Err(QueueError::QueueFull(t)), - Err(crossbeam_channel::TrySendError::Disconnected(_)) => Err(QueueError::QueueClosed), - } - } - - /// Returns the current length of the queue. - pub(crate) fn queue_length(&self) -> usize { - self.queue_status.load(Ordering::SeqCst) - } -} - -/// Receives messages from a queue. -#[derive(Debug)] -pub(crate) struct QueueReceiver { - /// Crossbeam_Channel Receiver. - inner: Receiver, - /// Used to track current messages in the queue. - queue_status: Arc, -} - -impl Clone for QueueReceiver { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - queue_status: Arc::clone(&self.queue_status), - } - } -} - -impl QueueReceiver { - /// Try to receive a request from the queue, updatig queue size. - pub(crate) fn try_recv(&self) -> Result> { - match self.inner.try_recv() { - Ok(message) => { - self.queue_status.fetch_sub(1, Ordering::SeqCst); - Ok(message) - } - Err(crossbeam_channel::TryRecvError::Empty) => Err(QueueError::QueueEmpty), - Err(crossbeam_channel::TryRecvError::Disconnected) => Err(QueueError::QueueClosed), - } - } - - /// Listens indefinately for an incoming message on the queue. Returns message if received or error if queue is closed. - pub(crate) async fn listen(&self) -> Result> { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - loop { - match self.try_recv() { - Ok(message) => { - return Ok(message); - } - Err(QueueError::QueueEmpty) => { - interval.tick().await; - continue; - } - Err(e) => { - return Err(e); - } - } - } - } - - /// Returns the current length of the queue. - pub(crate) fn _queue_length(&self) -> usize { - self.queue_status.load(Ordering::SeqCst) - } -} diff --git a/zaino-serve/src/server/request.rs b/zaino-serve/src/server/request.rs deleted file mode 100644 index 0726236dd..000000000 --- a/zaino-serve/src/server/request.rs +++ /dev/null @@ -1,115 +0,0 @@ -//! Request types. - -use crate::server::error::RequestError; -use std::time::SystemTime; -use tokio::net::TcpStream; - -/// Requests queuing metadata. -#[derive(Debug, Clone)] -struct QueueData { - // / Exclusive request id. - // request_id: u64, // TODO: implement with request queue (implement exlusive request_id generator in queue object). - /// Time which the request was received. - #[allow(dead_code)] - time_received: SystemTime, - /// Number of times the request has been requeued. - #[allow(dead_code)] - requeue_attempts: u32, -} - -impl QueueData { - /// Returns a new instance of QueueData. - fn new() -> Self { - QueueData { - time_received: SystemTime::now(), - requeue_attempts: 0, - } - } - - /// Increases the requeue attempts for the request. - #[allow(dead_code)] - pub(crate) fn increase_requeues(&mut self) { - self.requeue_attempts += 1; - } - - /// Returns the duration sunce the request was received. - #[allow(dead_code)] - fn duration(&self) -> Result { - self.time_received.elapsed().map_err(RequestError::from) - } - - /// Returns the number of times the request has been requeued. - #[allow(dead_code)] - fn requeues(&self) -> u32 { - self.requeue_attempts - } -} - -/// TcpStream holing an incoming gRPC request. -#[derive(Debug)] -pub(crate) struct TcpRequest(TcpStream); - -impl TcpRequest { - /// Returns the underlying TcpStream help by the request - pub(crate) fn get_stream(self) -> TcpStream { - self.0 - } -} - -/// Requests originating from the Tcp server. -#[derive(Debug)] -pub struct TcpServerRequest { - #[allow(dead_code)] - queuedata: QueueData, - request: TcpRequest, -} - -impl TcpServerRequest { - /// Returns the underlying request. - pub(crate) fn get_request(self) -> TcpRequest { - self.request - } -} - -/// Zingo-Indexer request, used by request queue. -#[derive(Debug)] -pub enum ZingoIndexerRequest { - /// Requests originating from the gRPC server. - TcpServerRequest(TcpServerRequest), -} - -impl ZingoIndexerRequest { - /// Creates a ZingoIndexerRequest from a gRPC service call, recieved by the gRPC server. - /// - /// TODO: implement proper functionality along with queue. - pub(crate) fn new_from_grpc(stream: TcpStream) -> Self { - ZingoIndexerRequest::TcpServerRequest(TcpServerRequest { - queuedata: QueueData::new(), - request: TcpRequest(stream), - }) - } - - /// Increases the requeue attempts for the request. - #[allow(dead_code)] - pub(crate) fn increase_requeues(&mut self) { - match self { - ZingoIndexerRequest::TcpServerRequest(ref mut req) => req.queuedata.increase_requeues(), - } - } - - /// Returns the duration sunce the request was received. - #[allow(dead_code)] - pub(crate) fn duration(&self) -> Result { - match self { - ZingoIndexerRequest::TcpServerRequest(ref req) => req.queuedata.duration(), - } - } - - /// Returns the number of times the request has been requeued. - #[allow(dead_code)] - pub(crate) fn requeues(&self) -> u32 { - match self { - ZingoIndexerRequest::TcpServerRequest(ref req) => req.queuedata.requeues(), - } - } -} diff --git a/zaino-serve/src/server/worker.rs b/zaino-serve/src/server/worker.rs deleted file mode 100644 index 4c6db84ee..000000000 --- a/zaino-serve/src/server/worker.rs +++ /dev/null @@ -1,365 +0,0 @@ -//! Holds the server worker implementation. - -use std::sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, -}; - -use http::Uri; -use tonic::transport::Server; - -use crate::{ - rpc::GrpcClient, - server::{ - error::WorkerError, - queue::{QueueReceiver, QueueSender}, - request::ZingoIndexerRequest, - AtomicStatus, - }, -}; - -use zaino_proto::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; - -/// A queue working is the entity that takes requests from the queue and processes them. -/// -/// TODO: - Add JsonRpcConnector to worker and pass to underlying RPC services. -/// - Currently a new JsonRpcConnector is spawned for every new RPC serviced. -#[derive(Debug, Clone)] -pub(crate) struct Worker { - /// Worker ID. - _worker_id: usize, - /// Used to pop requests from the queue - queue: QueueReceiver, - /// Used to requeue requests. - requeue: QueueSender, - /// gRPC client used for processing requests received over http. - grpc_client: GrpcClient, - /// Thread safe worker status. - atomic_status: AtomicStatus, - /// Represents the Online status of the Worker. - pub online: Arc, -} - -impl Worker { - /// Creates a new queue worker. - #[allow(clippy::too_many_arguments)] - pub(crate) async fn spawn( - _worker_id: usize, - queue: QueueReceiver, - requeue: QueueSender, - lightwalletd_uri: Uri, - zebrad_uri: Uri, - atomic_status: AtomicStatus, - online: Arc, - ) -> Self { - let grpc_client = GrpcClient { - lightwalletd_uri, - zebrad_uri, - online: online.clone(), - }; - Worker { - _worker_id, - queue, - requeue, - grpc_client, - atomic_status, - online, - } - } - - /// Starts queue worker service routine. - /// - /// TODO: Add requeue logic for node errors. - pub(crate) async fn serve(self) -> tokio::task::JoinHandle> { - tokio::task::spawn(async move { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(100)); - let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); - // TODO: create tonic server here for use within loop. - self.atomic_status.store(1); - loop { - tokio::select! { - _ = interval.tick() => { - if self.check_for_shutdown().await { - return Ok(()); - } - } - incoming = self.queue.listen() => { - match incoming { - Ok(request) => { - self.atomic_status.store(2); - match request { - ZingoIndexerRequest::TcpServerRequest(request) => { - Server::builder().add_service(svc.clone()) - .serve_with_incoming( async_stream::stream! { - yield Ok::<_, std::io::Error>( - request.get_request().get_stream() - ); - } - ) - .await?; - } - } - // NOTE: This may need to be removed for scale use. - if self.check_for_shutdown().await { - self.atomic_status.store(5); - return Ok(()); - } else { - self.atomic_status.store(1); - } - } - Err(_e) => { - self.atomic_status.store(5); - eprintln!("Queue closed, worker shutting down."); - // TODO: Handle queue closed error here. (return correct error / undate status to correct err code.) - return Ok(()); - } - } - } - } - } - }) - } - - /// Checks for closure signals. - /// - /// Checks AtomicStatus for closure signal. - /// Checks (online) AtomicBool for fatal error signal. - pub(crate) async fn check_for_shutdown(&self) -> bool { - if self.atomic_status() >= 4 { - return true; - } - if !self.check_online() { - return true; - } - false - } - - /// Sets the worker to close gracefully. - pub(crate) async fn shutdown(&mut self) { - self.atomic_status.store(4) - } - - /// Returns the worker's ID. - pub(crate) fn _id(&self) -> usize { - self._worker_id - } - - /// Loads the workers current atomic status. - pub(crate) fn atomic_status(&self) -> usize { - self.atomic_status.load() - } - - /// Check the online status on the server. - fn check_online(&self) -> bool { - self.online.load(Ordering::SeqCst) - } -} - -/// Holds the status of the worker pool and its workers. -#[derive(Debug, Clone)] -pub struct WorkerPoolStatus { - workers: Arc, - statuses: Vec, -} - -impl WorkerPoolStatus { - /// Creates a WorkerPoolStatus. - pub(crate) fn new(max_workers: u16) -> Self { - WorkerPoolStatus { - workers: Arc::new(AtomicUsize::new(0)), - statuses: vec![AtomicStatus::new(5); max_workers as usize], - } - } - - /// Returns the WorkerPoolStatus. - pub(crate) fn load(&self) -> WorkerPoolStatus { - self.workers.load(Ordering::SeqCst); - for i in 0..self.statuses.len() { - self.statuses[i].load(); - } - self.clone() - } -} - -/// Dynamically sized pool of workers. -#[derive(Debug, Clone)] -pub(crate) struct WorkerPool { - /// Maximun number of concurrent workers allowed. - max_size: u16, - /// Minimum number of workers kept running on stanby. - idle_size: u16, - /// Workers currently in the pool - workers: Vec, - /// Status of the workerpool and its workers. - status: WorkerPoolStatus, - /// Represents the Online status of the WorkerPool. - pub online: Arc, -} - -impl WorkerPool { - /// Creates a new worker pool containing [idle_workers] workers. - #[allow(clippy::too_many_arguments)] - pub(crate) async fn spawn( - max_size: u16, - idle_size: u16, - queue: QueueReceiver, - _requeue: QueueSender, - lightwalletd_uri: Uri, - zebrad_uri: Uri, - status: WorkerPoolStatus, - online: Arc, - ) -> Self { - let mut workers: Vec = Vec::with_capacity(max_size as usize); - for _ in 0..idle_size { - workers.push( - Worker::spawn( - workers.len(), - queue.clone(), - _requeue.clone(), - lightwalletd_uri.clone(), - zebrad_uri.clone(), - status.statuses[workers.len()].clone(), - online.clone(), - ) - .await, - ); - } - status.workers.store(idle_size as usize, Ordering::SeqCst); - WorkerPool { - max_size, - idle_size, - workers, - status, - online, - } - } - - /// Sets workers in the worker pool to start servicing the queue. - pub(crate) async fn serve(self) -> Vec>> { - let mut worker_handles = Vec::new(); - for worker in self.workers { - worker_handles.push(worker.serve().await); - } - worker_handles - } - - /// Adds a worker to the worker pool, returns error if the pool is already at max size. - pub(crate) async fn push_worker( - &mut self, - ) -> Result>, WorkerError> { - if self.workers.len() >= self.max_size as usize { - Err(WorkerError::WorkerPoolFull) - } else { - let worker_index = self.workers(); - self.workers.push( - Worker::spawn( - worker_index, - self.workers[0].queue.clone(), - self.workers[0].requeue.clone(), - self.workers[0].grpc_client.lightwalletd_uri.clone(), - self.workers[0].grpc_client.zebrad_uri.clone(), - self.status.statuses[worker_index].clone(), - self.online.clone(), - ) - .await, - ); - self.status.workers.fetch_add(1, Ordering::SeqCst); - Ok(self.workers[worker_index].clone().serve().await) - } - } - - /// Removes a worker from the worker pool, returns error if the pool is already at idle size. - pub(crate) async fn pop_worker( - &mut self, - worker_handle: tokio::task::JoinHandle>, - ) -> Result<(), WorkerError> { - if self.workers.len() <= self.idle_size as usize { - Err(WorkerError::WorkerPoolIdle) - } else { - let worker_index = self.workers.len() - 1; - self.workers[worker_index].shutdown().await; - match worker_handle.await { - Ok(worker) => match worker { - Ok(()) => { - self.status.statuses[worker_index].store(5); - self.workers.pop(); - self.status.workers.fetch_sub(1, Ordering::SeqCst); - Ok(()) - } - Err(e) => { - self.status.statuses[worker_index].store(6); - eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the inner WorkerError. Return error. - self.status.workers.fetch_sub(1, Ordering::SeqCst); - Ok(()) - } - }, - Err(e) => { - self.status.statuses[worker_index].store(6); - eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the JoinError. Return error. - self.status.workers.fetch_sub(1, Ordering::SeqCst); - Ok(()) - } - } - } - } - - /// Returns the max size of the pool - pub(crate) fn max_size(&self) -> u16 { - self.max_size - } - - /// Returns the idle size of the pool - pub(crate) fn idle_size(&self) -> u16 { - self.idle_size - } - - /// Returns the current number of workers in the pool. - pub(crate) fn workers(&self) -> usize { - self.workers.len() - } - - /// Fetches and returns the status of the workerpool and its workers. - pub(crate) fn status(&self) -> WorkerPoolStatus { - self.status.workers.load(Ordering::SeqCst); - for i in 0..self.workers() { - self.status.statuses[i].load(); - } - self.status.clone() - } - - /// Shuts down all the workers in the pool. - pub(crate) async fn shutdown( - &mut self, - worker_handles: &mut [Option>>], - ) { - for i in (0..self.workers.len()).rev() { - self.workers[i].shutdown().await; - if let Some(worker_handle) = worker_handles[i].take() { - match worker_handle.await { - Ok(worker) => match worker { - Ok(()) => { - self.status.statuses[i].store(5); - self.workers.pop(); - self.status.workers.fetch_sub(1, Ordering::SeqCst); - } - Err(e) => { - self.status.statuses[i].store(6); - eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the inner WorkerError - self.status.workers.fetch_sub(1, Ordering::SeqCst); - } - }, - Err(e) => { - self.status.statuses[i].store(6); - eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the JoinError - self.status.workers.fetch_sub(1, Ordering::SeqCst); - } - }; - } - } - } -} diff --git a/zaino-serve/src/utils.rs b/zaino-serve/src/utils.rs deleted file mode 100644 index fc51c11ce..000000000 --- a/zaino-serve/src/utils.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Utility functions for Zingo-RPC. - -/// Zingo-Indexer build info. -pub(crate) struct BuildInfo { - /// Git commit hash. - pub commit_hash: String, - /// Git Branch. - pub branch: String, - /// Build date. - pub build_date: String, - /// Build user. - pub build_user: String, - /// Zingo-Indexer version. - pub version: String, -} - -/// Returns build info for Zingo-Indexer. -pub(crate) fn get_build_info() -> BuildInfo { - BuildInfo { - commit_hash: env!("GIT_COMMIT").to_string(), - branch: env!("BRANCH").to_string(), - build_date: env!("BUILD_DATE").to_string(), - build_user: env!("BUILD_USER").to_string(), - version: env!("VERSION").to_string(), - } -} diff --git a/zaino-state/CHANGELOG.md b/zaino-state/CHANGELOG.md new file mode 100644 index 000000000..009f5ec4c --- /dev/null +++ b/zaino-state/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added +- `rpc::grpc::service.rs`, `backends::fetch::get_taddress_transactions`: + - these functions implement the GetTaddressTransactions GRPC method of + lightclient-protocol v0.4.0 which replaces `GetTaddressTxids` +- `chain_index` + - `::finalised_state::db::v0::get_compact_block_stream` + - `::finalised_state::db::v1::get_compact_block_stream` + - `::types::db::legacy`: + - `compact_vin` + - `compact_vout` + - `to_compact`: returns a compactTx from TxInCompact +- `local_cache::compact_block_with_pool_types` +### Changed +- `get_mempool_tx` now takes `GetMempoolTxRequest` as parameter +- `chain_index::finalised_state` + - `::db` + - `::v0` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::v1` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::reader`: + - `get_compact_block` now takes a `PoolTypeFilter` parameter +- `chain_index::types::db::legacy`: + - `to_compact_block()`: now returns transparent data + +### Deprecated +- `GetTaddressTxids` is replaced by `GetTaddressTransactions` + +### Removed +- `Ping` for GRPC service +- `utils::blockid_to_hashorheight` moved to `zaino_proto::utils` diff --git a/zaino-state/Cargo.toml b/zaino-state/Cargo.toml index e0c1421f5..7e7e055ff 100644 --- a/zaino-state/Cargo.toml +++ b/zaino-state/Cargo.toml @@ -1,12 +1,89 @@ [package] name = "zaino-state" description = "A mempool and chain-fetching service built on top of zebra's ReadStateService and TrustedChainSync." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } + +[features] +default = [] + +# test_dependencies - Exposes internal functionality for testing. +test_dependencies = [] + +# **Experimental and alpha features** +# Exposes the **complete** set of experimental / alpha features currently implemented in Zaino. +experimental_features = ["transparent_address_history_experimental"] + +# Activates transparent address history capability in zaino +# +# NOTE: currently this is only implemented in the finalised state. +transparent_address_history_experimental = [] [dependencies] +zaino-common = { workspace = true } +zaino-fetch = { workspace = true } +zaino-proto = { workspace = true } + +# LibRustZcash +zcash_address = { workspace = true } +zcash_keys = { workspace = true } +zcash_primitives = { workspace = true } +zcash_protocol = { workspace = true } +zcash_transparent = { workspace = true } +sapling-crypto = "0.5.0" + +# Zebra +zebra-chain = { workspace = true } +zebra-state = { workspace = true } +zebra-rpc = { workspace = true } + +# Tracing +tracing = { workspace = true } + +# Documentation +simple-mermaid = "0.2.0" + # Miscellaneous Workspace tokio = { workspace = true, features = ["full"] } thiserror = { workspace = true } +tower = { workspace = true, features = ["buffer", "util"] } +async-trait = { workspace = true } +chrono = { workspace = true } +indexmap = { workspace = true } +hex = { workspace = true, features = ["serde"] } +tokio-stream = { workspace = true } +futures = { workspace = true } +tonic = { workspace = true } +dashmap = { workspace = true } +lmdb = { workspace = true } +lmdb-sys = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["preserve_order"] } +prost = { workspace = true } +primitive-types = { workspace = true } +blake2 = { workspace = true } +sha2 = { workspace = true } +core2 = { workspace = true } +bs58 = { workspace = true } +nonempty = { workspace = true } +arc-swap = { workspace = true } +reqwest.workspace = true +bitflags = { workspace = true } +derive_more = { workspace = true, features = ["from"] } + +[dev-dependencies] +tempfile = { workspace = true } +tracing-subscriber = { workspace = true } +once_cell = { workspace = true } +zebra-chain = { workspace = true, features = ["proptest-impl"] } +proptest.workspace = true +incrementalmerkletree = "*" +rand = "0.8.5" + +[build-dependencies] +whoami = { workspace = true } +cargo-lock = { workspace = true } diff --git a/zaino-state/build.rs b/zaino-state/build.rs new file mode 100644 index 000000000..bf30769d2 --- /dev/null +++ b/zaino-state/build.rs @@ -0,0 +1,77 @@ +use std::env; +use std::fs; +use std::io; +use std::path::Path; +use std::process::Command; +use std::str::FromStr as _; +use std::time::{SystemTime, UNIX_EPOCH}; + +use cargo_lock::package::GitReference; +use cargo_lock::package::SourceKind; +use cargo_lock::Lockfile; + +fn main() -> io::Result<()> { + // Fetch the commit hash + let commit_hash = Command::new("git") + .args(["rev-parse", "HEAD"]) + .output() + .expect("Failed to get commit hash") + .stdout; + let commit_hash = String::from_utf8(commit_hash).expect("Invalid UTF-8 sequence"); + println!("cargo:rustc-env=GIT_COMMIT={}", commit_hash.trim()); + + // Fetch the current branch + let branch = Command::new("git") + .args(["rev-parse", "--abbrev-ref", "HEAD"]) + .output() + .expect("Failed to get branch") + .stdout; + let branch = String::from_utf8(branch).expect("Invalid UTF-8 sequence"); + println!("cargo:rustc-env=BRANCH={}", branch.trim()); + + // Set the build date + // SOURCE_DATE_EPOCH can be used to set system time to a desired value + // which is important for achieving determinism. More details can be found + // at https://reproducible-builds.org/docs/source-date-epoch/ + let build_date = match env::var("SOURCE_DATE_EPOCH") { + Ok(s) => s.trim().to_string(), + Err(_) => SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + .to_string(), + }; + + println!("cargo:rustc-env=BUILD_DATE={}", build_date); + + // Set the build user + let build_user = whoami::username(); + println!("cargo:rustc-env=BUILD_USER={build_user}"); + + // Set the version from Cargo.toml + let version = env::var("CARGO_PKG_VERSION").expect("Failed to get version from Cargo.toml"); + println!("cargo:rustc-env=VERSION={version}"); + let lockfile = Lockfile::load("../Cargo.lock").expect("build script cannot load lockfile"); + let maybe_zebra_rev = lockfile.packages.iter().find_map(|package| { + if package.name == cargo_lock::Name::from_str("zebra-chain").unwrap() { + package + .source + .as_ref() + .and_then(|source_id| match source_id.kind() { + SourceKind::Git(GitReference::Rev(rev)) => Some(rev), + _ => None, + }) + } else { + None + } + }); + let out_dir = env::var_os("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("zebraversion.rs"); + fs::write( + &dest_path, + format!("const ZEBRA_VERSION: Option<&'static str> = {maybe_zebra_rev:?};"), + ) + .unwrap(); + + Ok(()) +} diff --git a/zaino-state/src/backends.rs b/zaino-state/src/backends.rs new file mode 100644 index 000000000..a599b771e --- /dev/null +++ b/zaino-state/src/backends.rs @@ -0,0 +1,5 @@ +//! Zaino's chain fetch and tx submission backend services. + +pub mod fetch; + +pub mod state; diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs new file mode 100644 index 000000000..95c186546 --- /dev/null +++ b/zaino-state/src/backends/fetch.rs @@ -0,0 +1,1800 @@ +//! Zcash chain fetch and tx submission service backed by zcashds JsonRPC service. + +use futures::StreamExt; +use hex::FromHex; +use std::{io::Cursor, time}; +use tokio::{sync::mpsc, time::timeout}; +use tonic::async_trait; +use tracing::{info, warn}; +use zebra_state::HashOrHeight; + +use zebra_chain::{ + block::Height, serialization::ZcashDeserialize as _, subtree::NoteCommitmentSubtreeIndex, +}; +use zebra_rpc::{ + client::{ + GetAddressBalanceRequest, GetSubtreesByIndexResponse, GetTreestateResponse, + ValidateAddressResponse, + }, + methods::{ + AddressBalance, GetAddressTxIdsRequest, GetAddressUtxos, GetBlock, GetBlockHashResponse, + GetBlockchainInfoResponse, GetInfo, GetRawTransaction, SentTransactionHash, + ValidateAddresses as _, + }, +}; + +use zaino_fetch::{ + chain::{transaction::FullTransaction, utils::ParseFromSlice}, + jsonrpsee::{ + connector::{JsonRpSeeConnector, RpcError}, + response::{ + address_deltas::{GetAddressDeltasParams, GetAddressDeltasResponse}, + block_deltas::BlockDeltas, + block_header::GetBlockHeader, + block_subsidy::GetBlockSubsidy, + mining_info::GetMiningInfoWire, + peer_info::GetPeerInfo, + GetMempoolInfoResponse, GetNetworkSolPsResponse, + }, + }, +}; + +use zaino_proto::proto::{ + compact_formats::CompactBlock, + service::{ + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReply, GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, + PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, + TxFilter, + }, + utils::{ + blockid_to_hashorheight, compact_block_to_nullifiers, GetBlockRangeError, PoolTypeFilter, + ValidatedBlockRangeRequest, + }, +}; + +use crate::{ + ChainIndex, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, +}; +#[allow(deprecated)] +use crate::{ + chain_index::{source::ValidatorConnector, types}, + config::FetchServiceConfig, + error::FetchServiceError, + indexer::{ + handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, + }, + status::{Status, StatusType}, + stream::{ + AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, + UtxoReplyStream, + }, + utils::{get_build_info, ServiceMetadata}, + BackendType, +}; + +/// Chain fetch service backed by Zcashd's JsonRPC engine. +/// +/// This service is a central service, [`FetchServiceSubscriber`] should be created to fetch data. +/// This is done to enable large numbers of concurrent subscribers without significant slowdowns. +/// +/// NOTE: We currently do not implement clone for chain fetch services as this service is responsible for maintaining and closing its child processes. +/// ServiceSubscribers are used to create separate chain fetch processes while allowing central state processes to be managed in a single place. +/// If we want the ability to clone Service all JoinHandle's should be converted to Arc\. +#[derive(Debug)] +#[deprecated = "Will be eventually replaced by `BlockchainSource`"] +pub struct FetchService { + /// JsonRPC Client. + fetcher: JsonRpSeeConnector, + /// Core indexer. + indexer: NodeBackedChainIndex, + /// Service metadata. + data: ServiceMetadata, + + /// StateService config data. + #[allow(deprecated)] + config: FetchServiceConfig, +} + +#[allow(deprecated)] +impl Status for FetchService { + fn status(&self) -> StatusType { + self.indexer.status() + } +} + +#[async_trait] +#[allow(deprecated)] +impl ZcashService for FetchService { + const BACKEND_TYPE: BackendType = BackendType::Fetch; + + type Subscriber = FetchServiceSubscriber; + type Config = FetchServiceConfig; + + /// Initializes a new FetchService instance and starts sync process. + async fn spawn(config: FetchServiceConfig) -> Result { + info!("Launching Chain Fetch Service.."); + + let fetcher = JsonRpSeeConnector::new_from_config_parts( + &config.validator_rpc_address, + config.validator_rpc_user.clone(), + config.validator_rpc_password.clone(), + config.validator_cookie_path.clone(), + ) + .await?; + + let zebra_build_data = fetcher.get_info().await?; + let data = ServiceMetadata::new( + get_build_info(), + config.network.to_zebra_network(), + zebra_build_data.build, + zebra_build_data.subversion, + ); + info!("Using Zcash build: {}", data); + + let source = ValidatorConnector::Fetch(fetcher.clone()); + let indexer = NodeBackedChainIndex::new(source, config.clone().into()) + .await + .unwrap(); + + let fetch_service = Self { + fetcher, + indexer, + data, + config, + }; + + // wait for sync to complete, return error on sync fail. + loop { + match fetch_service.status() { + StatusType::Ready | StatusType::Closing => break, + StatusType::CriticalError => { + return Err(FetchServiceError::Critical( + "ChainIndex initial sync failed, check full log for details.".to_string(), + )); + } + _ => { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + } + } + + Ok(fetch_service) + } + + /// Returns a [`FetchServiceSubscriber`]. + fn get_subscriber(&self) -> IndexerSubscriber { + IndexerSubscriber::new(FetchServiceSubscriber { + fetcher: self.fetcher.clone(), + indexer: self.indexer.subscriber(), + data: self.data.clone(), + config: self.config.clone(), + }) + } + + /// Shuts down the StateService. + fn close(&mut self) { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + let _ = self.indexer.shutdown().await; + }); + }); + } +} + +#[allow(deprecated)] +impl Drop for FetchService { + fn drop(&mut self) { + self.close() + } +} + +/// A fetch service subscriber. +/// +/// Subscribers should be +#[derive(Debug, Clone)] +#[allow(deprecated)] +pub struct FetchServiceSubscriber { + /// JsonRPC Client. + pub fetcher: JsonRpSeeConnector, + /// Core indexer. + pub indexer: NodeBackedChainIndexSubscriber, + /// Service metadata. + pub data: ServiceMetadata, + /// StateService config data. + #[allow(deprecated)] + config: FetchServiceConfig, +} + +impl Status for FetchServiceSubscriber { + fn status(&self) -> StatusType { + self.indexer.status() + } +} + +impl FetchServiceSubscriber { + /// Fetches the current status + #[deprecated(note = "Use the Status trait method instead")] + pub fn get_status(&self) -> StatusType { + self.indexer.status() + } + + /// Returns the network type running. + #[allow(deprecated)] + pub fn network(&self) -> zaino_common::Network { + self.config.network + } +} + +#[async_trait] +#[allow(deprecated)] +impl ZcashIndexer for FetchServiceSubscriber { + type Error = FetchServiceError; + + /// Returns information about all changes to the given transparent addresses within the given inclusive block-height range. + /// + /// Defaults: if start or end are not specified, they default to 0. + /// + /// ### Normalization + /// + /// - If start is greater than the latest block height (tip), start is set to the tip. + /// - If end is 0 or greater than the tip, end is set to the tip. + /// + /// ### Validation + /// + /// If the resulting start is greater than end, the call fails with an error. + /// (Thus, [tip, tip] is valid and returns only the tip block.) + /// + /// [Original zcashd implementation](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/misc.cpp#L881) + /// + /// zcashd reference: [`getaddressdeltas`](https://zcash.github.io/rpc/getaddressdeltas.html) + /// method: post + /// tags: address + async fn get_address_deltas( + &self, + params: GetAddressDeltasParams, + ) -> Result { + Ok(self.fetcher.get_address_deltas(params).await?) + } + + /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. + /// + /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) + /// method: post + /// tags: control + /// + /// # Notes + /// + /// [The zcashd reference](https://zcash.github.io/rpc/getinfo.html) might not show some fields + /// in Zebra's [`GetInfo`]. Zebra uses the field names and formats from the + /// [zcashd code](https://github.com/zcash/zcash/blob/v4.6.0-1/src/rpc/misc.cpp#L86-L87). + async fn get_info(&self) -> Result { + Ok(self.fetcher.get_info().await?.into()) + } + + /// Returns blockchain state information, as a [`GetBlockchainInfoResponse`] JSON struct. + /// + /// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html) + /// method: post + /// tags: blockchain + /// + /// # Notes + /// + /// Some fields from the zcashd reference are missing from Zebra's [`GetBlockchainInfoResponse`]. It only contains the fields + /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L72-L89) + async fn get_blockchain_info(&self) -> Result { + Ok(self + .fetcher + .get_blockchain_info() + .await? + .try_into() + .map_err(|_e| { + FetchServiceError::SerializationError( + zebra_chain::serialization::SerializationError::Parse( + "chainwork not hex-encoded integer", + ), + ) + })?) + } + + /// Returns details on the active state of the TX memory pool. + /// + /// online zcash rpc reference: [`getmempoolinfo`](https://zcash.github.io/rpc/getmempoolinfo.html) + /// method: post + /// tags: mempool + /// + /// Canonical source code implementation: [`getmempoolinfo`](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/blockchain.cpp#L1555) + /// + /// Zebra does not support this RPC call directly. + async fn get_mempool_info(&self) -> Result { + Ok(self.indexer.get_mempool_info().await.into()) + } + + async fn get_peer_info(&self) -> Result { + Ok(self.fetcher.get_peer_info().await?) + } + + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. + /// + /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) + /// method: post + /// tags: blockchain + async fn get_difficulty(&self) -> Result { + Ok(self.fetcher.get_difficulty().await?.0) + } + + async fn get_block_subsidy(&self, height: u32) -> Result { + Ok(self.fetcher.get_block_subsidy(height).await?) + } + + /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. + /// + /// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry + /// - `addresses`: (array of strings) A list of base-58 encoded addresses. + /// + /// # Notes + /// + /// zcashd also accepts a single string parameter instead of an array of strings, but Zebra + /// doesn't because lightwalletd always calls this RPC with an array of addresses. + /// + /// zcashd also returns the total amount of Zatoshis received by the addresses, but Zebra + /// doesn't because lightwalletd doesn't use that information. + /// + /// The RPC documentation says that the returned object has a string `balance` field, but + /// zcashd actually [returns an + /// integer](https://github.com/zcash/lightwalletd/blob/bdaac63f3ee0dbef62bde04f6817a9f90d483b00/common/common.go#L128-L130). + async fn z_get_address_balance( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result { + Ok(self + .fetcher + .get_address_balance( + address_strings + .valid_addresses() + .map_err(|error| { + FetchServiceError::RpcError(RpcError { + code: error.code() as i64, + message: "Invalid address provided".to_string(), + data: None, + }) + })? + .into_iter() + .map(|address| address.to_string()) + .collect(), + ) + .await? + .into()) + } + + /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. + /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. + /// + /// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. + /// + /// # Notes + /// + /// zcashd accepts an optional `allowhighfees` parameter. Zebra doesn't support this parameter, + /// because lightwalletd doesn't use it. + async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result { + Ok(self + .fetcher + .send_raw_transaction(raw_transaction_hex) + .await? + .into()) + } + + /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. + /// If the block is not in Zebra's state, returns + /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) if a height was + /// passed or -5 if a hash was passed. + /// + /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. + /// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data. + /// + /// # Notes + /// + /// Zebra previously partially supported verbosity=1 by returning only the + /// fields required by lightwalletd ([`lightwalletd` only reads the `tx` + /// field of the result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152)). + /// That verbosity level was migrated to "3"; so while lightwalletd will + /// still work by using verbosity=1, it will sync faster if it is changed to + /// use verbosity=3. + /// + /// The undocumented `chainwork` field is not returned. + async fn z_get_block( + &self, + hash_or_height: String, + verbosity: Option, + ) -> Result { + Ok(self + .fetcher + .get_block(hash_or_height, verbosity) + .await? + .try_into()?) + } + + /// Returns information about the given block and its transactions. + /// + /// zcashd reference: [`getblockdeltas`](https://zcash.github.io/rpc/getblockdeltas.html) + /// method: post + /// tags: blockchain + /// + /// Note: This method has only been implemented in `zcashd`. Zebra has no intention of supporting it. + async fn get_block_deltas(&self, hash: String) -> Result { + Ok(self.fetcher.get_block_deltas(hash).await?) + } + + async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result { + Ok(self.fetcher.get_block_header(hash, verbose).await?) + } + + async fn get_mining_info(&self) -> Result { + Ok(self.fetcher.get_mining_info().await?) + } + + /// Returns the hash of the best block (tip) of the longest chain. + /// online zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) + /// The zcashd doc reference above says there are no parameters and the result is a "hex" (string) of the block hash hex encoded. + /// method: post + /// tags: blockchain + /// Return the hex encoded hash of the best (tip) block, in the longest block chain. + /// + /// # Parameters + /// + /// No request parameters. + /// + /// # Notes + /// + /// Return should be valid hex encoded. + /// + /// The Zcash source code is considered canonical: + /// [In the rpc definition](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/common.h#L48) there are no required params, or optional params. + /// [The function in rpc/blockchain.cpp](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L325) + /// where `return chainActive.Tip()->GetBlockHash().GetHex();` is the [return expression](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L339)returning a `std::string` + async fn get_best_blockhash(&self) -> Result { + Ok(self.fetcher.get_best_blockhash().await?.into()) + } + + /// Returns the current block count in the best valid block chain. + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + async fn get_block_count(&self) -> Result { + Ok(self.fetcher.get_block_count().await?.into()) + } + + /// Return information about the given Zcash address. + /// + /// # Parameters + /// - `address`: (string, required, example="tmHMBeeYRuc2eVicLNfP15YLxbQsooCA6jb") The Zcash transparent address to validate. + /// + /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) + /// method: post + /// tags: blockchain + async fn validate_address( + &self, + address: String, + ) -> Result { + Ok(self.fetcher.validate_address(address).await?) + } + + /// Returns all transaction ids in the memory pool, as a JSON array. + /// + /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) + /// method: post + /// tags: blockchain + async fn get_raw_mempool(&self) -> Result, Self::Error> { + // Ok(self.fetcher.get_raw_mempool().await?.transactions) + Ok(self + .indexer + .get_mempool_txids() + .await? + .iter() + .map(|txid| txid.to_string()) + .collect()) + } + + /// Returns information about the given block's Sapling & Orchard tree state. + /// + /// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height. + /// + /// # Notes + /// + /// The zcashd doc reference above says that the parameter "`height` can be + /// negative where -1 is the last known valid block". On the other hand, + /// `lightwalletd` only uses positive heights, so Zebra does not support + /// negative heights. + async fn z_get_treestate( + &self, + hash_or_height: String, + ) -> Result { + Ok(self + .fetcher + .get_treestate(hash_or_height) + .await? + .try_into()?) + } + + /// Returns information about a range of Sapling or Orchard subtrees. + /// + /// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard". + /// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return. + /// - `limit`: (number, optional) The maximum number of subtree values to return. + /// + /// # Notes + /// + /// While Zebra is doing its initial subtree index rebuild, subtrees will become available + /// starting at the chain tip. This RPC will return an empty list if the `start_index` subtree + /// exists, but has not been rebuilt yet. This matches `zcashd`'s behaviour when subtrees aren't + /// available yet. (But `zcashd` does its rebuild before syncing any blocks.) + async fn z_get_subtrees_by_index( + &self, + pool: String, + start_index: NoteCommitmentSubtreeIndex, + limit: Option, + ) -> Result { + Ok(self + .fetcher + .get_subtrees_by_index(pool, start_index.0, limit.map(|limit_index| limit_index.0)) + .await? + .into()) + } + + /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. + /// + /// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned. + /// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object. + /// + /// # Notes + /// + /// We don't currently support the `blockhash` parameter since lightwalletd does not + /// use it. + /// + /// In verbose mode, we only expose the `hex` and `height` fields since + /// lightwalletd uses only those: + /// + async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result { + Ok(self + .fetcher + .get_raw_transaction(txid_hex, verbose) + .await? + .into()) + } + + async fn chain_height(&self) -> Result { + Ok(Height( + self.indexer.snapshot_nonfinalized_state().best_tip.height.0, + )) + } + /// Returns the transaction ids made by the provided transparent addresses. + /// + /// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: + /// - `addresses`: (json array of string, required) The addresses to get transactions from. + /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). + /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). + /// + /// # Notes + /// + /// Only the multi-argument format is used by lightwalletd and this is what we currently support: + /// + async fn get_address_tx_ids( + &self, + request: GetAddressTxIdsRequest, + ) -> Result, Self::Error> { + let (addresses, start, end) = request.into_parts(); + Ok(self + .fetcher + .get_address_txids(addresses, start, end) + .await? + .transactions) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. + /// + /// # Notes + /// + /// lightwalletd always uses the multi-address request, without chaininfo: + /// + async fn z_get_address_utxos( + &self, + addresses: GetAddressBalanceRequest, + ) -> Result, Self::Error> { + Ok(self + .fetcher + .get_address_utxos( + addresses + .valid_addresses() + .map_err(|error| { + FetchServiceError::RpcError(RpcError { + code: error.code() as i64, + message: "Invalid address provided".to_string(), + data: None, + }) + })? + .into_iter() + .map(|address| address.to_string()) + .collect(), + ) + .await? + .into_iter() + .map(|utxos| utxos.into()) + .collect()) + } + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// This RPC is implemented in the [mining.cpp](https://github.com/zcash/zcash/blob/d00fc6f4365048339c83f463874e4d6c240b63af/src/rpc/mining.cpp#L104) + /// file of the Zcash repository. The Zebra implementation can be found [here](https://github.com/ZcashFoundation/zebra/blob/19bca3f1159f9cb9344c9944f7e1cb8d6a82a07f/zebra-rpc/src/methods.rs#L2687). + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result { + Ok(self.fetcher.get_network_sol_ps(blocks, height).await?) + } +} + +#[async_trait] +#[allow(deprecated)] +impl LightWalletIndexer for FetchServiceSubscriber { + /// Return the height of the tip of the best chain + async fn get_latest_block(&self) -> Result { + let tip = self.indexer.snapshot_nonfinalized_state().best_tip; + // dbg!(&tip); + + Ok(BlockId { + height: tip.height.0 as u64, + hash: tip.blockhash.0.to_vec(), + }) + } + + /// Return the compact block corresponding to the given block identifier + async fn get_block(&self, request: BlockId) -> Result { + let hash_or_height = blockid_to_hashorheight(request).ok_or( + FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Invalid hash and/or height out of range. Failed to convert to u32.", + )), + )?; + + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let height = match hash_or_height { + HashOrHeight::Height(height) => height.0, + HashOrHeight::Hash(hash) => { + match self.indexer.get_block_height(&snapshot, hash.into()).await { + Ok(Some(height)) => height.0, + Ok(None) => { + return Err(FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Invalid hash and/or height out of range. Hash not founf in chain", + ))); + } + Err(_e) => { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::internal("Error: Internal db error."), + )); + } + } + } + }; + + match self + .indexer + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) + .await + { + Ok(Some(block)) => Ok(block), + Ok(None) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + _otherwise => Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Failed to retrieve block from state.", + ))), + } + } + Err(e) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + _otherwise => + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + { + Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + format!("Error: Failed to retrieve block from node. Server Error: {e}",), + ))) + } + } + } + } + } + + /// Same as GetBlock except actions contain only nullifiers + /// + /// NOTE: Currently this only returns Orchard nullifiers to follow Lightwalletd functionality but Sapling could be added if required by wallets. + async fn get_block_nullifiers(&self, request: BlockId) -> Result { + let hash_or_height = blockid_to_hashorheight(request).ok_or( + FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Invalid hash and/or height out of range. Failed to convert to u32.", + )), + )?; + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let height = match hash_or_height { + HashOrHeight::Height(height) => height.0, + HashOrHeight::Hash(hash) => { + match self.indexer.get_block_height(&snapshot, hash.into()).await { + Ok(Some(height)) => height.0, + Ok(None) => { + return Err(FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Invalid hash and/or height out of range. Hash not founf in chain", + ))); + } + Err(_e) => { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::internal("Error: Internal db error."), + )); + } + } + } + }; + match self + .indexer + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) + .await + { + Ok(Some(block)) => Ok(compact_block_to_nullifiers(block)), + Ok(None) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + HashOrHeight::Height(height) + if height > self.data.network().sapling_activation_height() => + { + Err(FetchServiceError::TonicStatusError( + tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is below sapling activation height [{chain_height}].", + )), + )) + } + _otherwise => Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Failed to retrieve block from state.", + ))), + } + } + Err(e) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + HashOrHeight::Height(height) + if height > self.data.network().sapling_activation_height() => + { + Err(FetchServiceError::TonicStatusError( + tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is below sapling activation height [{chain_height}].", + )), + )) + } + _otherwise => + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + { + Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + format!("Error: Failed to retrieve block from node. Server Error: {e}",), + ))) + } + } + } + } + } + + /// Return a list of consecutive compact blocks + #[allow(deprecated)] + async fn get_block_range( + &self, + request: BlockRange, + ) -> Result { + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(FetchServiceError::from)?; + + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; + + // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] + let start = validated_request.start() as u32; + let end = validated_request.end() as u32; + + let fetch_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + tokio::spawn(async move { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); + // Use the snapshot tip directly, as this function doesn't support passthrough + let chain_height = snapshot.best_tip.height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + if channel_tx.send(stream_item).await.is_err() { + break; + } + } + } + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } + } + } + } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); + } + }); + + Ok(CompactBlockStream::new(channel_rx)) + } + + /// Same as GetBlockRange except actions contain only nullifiers + /// + /// NOTE: Currently this only returns Orchard nullifiers to follow Lightwalletd functionality but Sapling could be added if required by wallets. + #[allow(deprecated)] + async fn get_block_range_nullifiers( + &self, + request: BlockRange, + ) -> Result { + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(FetchServiceError::from)?; + + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; + + // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] + let start = validated_request.start() as u32; + let end = validated_request.end() as u32; + + let fetch_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + tokio::spawn(async move { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); + + // Use the snapshot tip directly, as this function doesn't support passthrough + let chain_height = snapshot.best_tip.height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + match stream_item { + Ok(block) => { + if channel_tx + .send(Ok(compact_block_to_nullifiers(block))) + .await + .is_err() + { + break; + } + } + Err(status) => { + if channel_tx.send(Err(status)).await.is_err() { + break; + } + } + } + } + } + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } + } + } + } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); + } + }); + + Ok(CompactBlockStream::new(channel_rx)) + } + + /// Return the requested full (not compact) transaction (as from zcashd) + async fn get_transaction(&self, request: TxFilter) -> Result { + let hash = request.hash; + if hash.len() == 32 { + let reversed_hash = hash.iter().rev().copied().collect::>(); + let hash_hex = hex::encode(reversed_hash); + let tx = self.get_raw_transaction(hash_hex, Some(1)).await?; + + let (hex, height) = if let GetRawTransaction::Object(tx_object) = tx { + (tx_object.hex().clone(), tx_object.height()) + } else { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::not_found("Error: Transaction not received"), + )); + }; + let height: u64 = match height { + Some(h) => h as u64, + // Zebra returns None for mempool transactions, convert to `Mempool Height`. + None => self.indexer.snapshot_nonfinalized_state().best_tip.height.0 as u64, + }; + + Ok(RawTransaction { + data: hex.as_ref().to_vec(), + height, + }) + } else { + Err(FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: Transaction hash incorrect"), + )) + } + } + + /// Submit the given transaction to the Zcash network + async fn send_transaction(&self, request: RawTransaction) -> Result { + let hex_tx = hex::encode(request.data); + let tx_output = self.send_raw_transaction(hex_tx).await?; + + Ok(SendResponse { + error_code: 0, + error_message: tx_output.hash().to_string(), + }) + } + + // Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + let chain_height = self.chain_height().await?; + let txids = self.get_taddress_txids_helper(request).await?; + let fetch_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (transmitter, receiver) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + for txid in txids { + let transaction = + fetch_service_clone.get_raw_transaction(txid, Some(1)).await; + if handle_raw_transaction::( + chain_height.0 as u64, + transaction, + transmitter.clone(), + ) + .await + .is_err() + { + break; + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + transmitter + .send(Err(tonic::Status::internal( + "Error: get_taddress_txids gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(RawTransactionStream::new(receiver)) + } + + /// Return the txids corresponding to the given t-address within the given block range + /// this function is deprecated: use `get_taddress_transactions` + #[allow(deprecated)] + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + + /// Returns the total balance for a list of taddrs + async fn get_taddress_balance(&self, request: AddressList) -> Result { + let taddrs = GetAddressBalanceRequest::new(request.addresses); + let balance = self.z_get_address_balance(taddrs).await?; + let checked_balance: i64 = match i64::try_from(balance.balance()) { + Ok(balance) => balance, + Err(_) => { + return Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Error converting balance from u64 to i64.", + ))); + } + }; + Ok(Balance { + value_zat: checked_balance, + }) + } + + /// Returns the total balance for a list of taddrs + #[allow(deprecated)] + async fn get_taddress_balance_stream( + &self, + mut request: AddressStream, + ) -> Result { + let fetch_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, mut channel_rx) = + mpsc::channel::(self.config.service.channel_size as usize); + let fetcher_task_handle = tokio::spawn(async move { + let fetcher_timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let mut total_balance: u64 = 0; + loop { + match channel_rx.recv().await { + Some(taddr) => { + let taddrs = GetAddressBalanceRequest::new(vec![taddr]); + let balance = + fetch_service_clone.z_get_address_balance(taddrs).await?; + total_balance += balance.balance(); + } + None => { + return Ok(total_balance); + } + } + } + }, + ) + .await; + match fetcher_timeout { + Ok(result) => result, + Err(_) => Err(tonic::Status::deadline_exceeded( + "Error: get_taddress_balance_stream request timed out.", + )), + } + }); + // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. + // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] + let addr_recv_timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + while let Some(address_result) = request.next().await { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + let address = address_result.map_err(|e| { + tonic::Status::unknown(format!("Failed to read from stream: {e}")) + })?; + if channel_tx.send(address.address).await.is_err() { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + return Err(tonic::Status::unknown( + "Error: Failed to send address to balance task.", + )); + } + } + drop(channel_tx); + Ok::<(), tonic::Status>(()) + }, + ) + .await; + match addr_recv_timeout { + Ok(Ok(())) => {} + Ok(Err(e)) => { + fetcher_task_handle.abort(); + return Err(FetchServiceError::TonicStatusError(e)); + } + Err(_) => { + fetcher_task_handle.abort(); + return Err(FetchServiceError::TonicStatusError( + tonic::Status::deadline_exceeded( + "Error: get_taddress_balance_stream request timed out in address loop.", + ), + )); + } + } + match fetcher_task_handle.await { + Ok(Ok(total_balance)) => { + let checked_balance: i64 = match i64::try_from(total_balance) { + Ok(balance) => balance, + Err(_) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + return Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Error converting balance from u64 to i64.", + ))); + } + }; + Ok(Balance { + value_zat: checked_balance, + }) + } + Ok(Err(e)) => Err(FetchServiceError::TonicStatusError(e)), + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + Err(e) => Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + format!("Fetcher Task failed: {e}"), + ))), + } + } + + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. + #[allow(deprecated)] + async fn get_mempool_tx( + &self, + request: GetMempoolTxRequest, + ) -> Result { + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } + + let mempool = self.indexer.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + match mempool.get_mempool_transactions(exclude_txids).await { + Ok(transactions) => { + for serialized_transaction_bytes in transactions { + // TODO: This implementation should be cleaned up to not use parse_from_slice. + // This could be done by implementing try_from zebra_chain::transaction::Transaction for CompactTxData, + // (which implements to_compact())letting us avoid double parsing of transaction bytes. + let transaction: zebra_chain::transaction::Transaction = + zebra_chain::transaction::Transaction::zcash_deserialize( + &mut Cursor::new(&serialized_transaction_bytes), + ) + .unwrap(); + // TODO: Check this is in the correct format and does not need hex decoding or reversing. + let txid = transaction.hash().0.to_vec(); + + match ::parse_from_slice( + &serialized_transaction_bytes, + Some(vec![txid]), + None, + ) { + Ok(transaction) => { + // ParseFromSlice returns any data left after the conversion to a + // FullTransaction, If the conversion has succeeded this should be empty. + if transaction.0.is_empty() { + if channel_tx + .send(transaction.1.to_compact(0).map_err(|e| { + tonic::Status::unknown(e.to_string()) + })) + .await + .is_err() + { + break; + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown("Error: "))) + .await + .is_err() + { + break; + } + } + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + break; + } + } + } + } + } + Err(e) => { + channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .ok(); + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::internal( + "Error: get_mempool_tx gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(CompactTransactionStream::new(channel_rx)) + } + + /// Return a stream of current Mempool transactions. This will keep the output stream open while + /// there are mempool transactions. It will close the returned stream when a new block is mined. + #[allow(deprecated)] + async fn get_mempool_stream(&self) -> Result { + let indexer = self.indexer.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 6) as u64), + async { + let mempool_height = indexer.snapshot_nonfinalized_state().best_tip.height.0; + match indexer.get_mempool_stream(None) { + Some(mut mempool_stream) => { + while let Some(result) = mempool_stream.next().await { + match result { + Ok(transaction_bytes) => { + if channel_tx + .send(Ok(RawTransaction { + data: transaction_bytes, + height: mempool_height as u64, + })) + .await + .is_err() + { + break; + } + } + Err(e) => { + channel_tx + .send(Err(tonic::Status::internal(format!( + "Error in mempool stream: {e:?}" + )))) + .await + .ok(); + break; + } + } + } + } + None => { + warn!("Error fetching stream from mempool, Incorrect chain tip!"); + channel_tx + .send(Err(tonic::Status::internal("Error getting mempool stream"))) + .await + .ok(); + } + }; + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::internal( + "Error: get_mempool_stream gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(RawTransactionStream::new(channel_rx)) + } + + /// GetTreeState returns the note commitment tree state corresponding to the given block. + /// See section 3.7 of the Zcash protocol specification. It returns several other useful + /// values also (even though they can be obtained using GetBlock). + /// The block can be specified by either height or hash. + async fn get_tree_state(&self, request: BlockId) -> Result { + let chain_info = self.get_blockchain_info().await?; + let hash_or_height = if request.height != 0 { + match u32::try_from(request.height) { + Ok(height) => { + if height > chain_info.blocks().0 { + return Err(FetchServiceError::TonicStatusError(tonic::Status::out_of_range( + format!( + "Error: Height out of range [{}]. Height requested is greater than the best chain tip [{}].", + height, chain_info.blocks().0, + )) + )); + } else { + height.to_string() + } + } + Err(_) => { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument( + "Error: Height out of range. Failed to convert to u32.", + ), + )); + } + } + } else { + hex::encode(request.hash) + }; + match self.z_get_treestate(hash_or_height).await { + Ok(state) => { + let (hash, height, time, sapling, orchard) = state.into_parts(); + Ok(TreeState { + network: chain_info.chain().clone(), + height: height.0 as u64, + hash: hash.to_string(), + time, + sapling_tree: sapling.map(hex::encode).unwrap_or_default(), + orchard_tree: orchard.map(hex::encode).unwrap_or_default(), + }) + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + format!("Error: Failed to retrieve treestate from node. Server Error: {e}",), + ))) + } + } + } + + /// GetLatestTreeState returns the note commitment tree state corresponding to the chain tip. + async fn get_latest_tree_state(&self) -> Result { + let chain_info = self.get_blockchain_info().await?; + match self + .z_get_treestate(chain_info.blocks().0.to_string()) + .await + { + Ok(state) => { + let (hash, height, time, sapling, orchard) = state.into_parts(); + Ok(TreeState { + network: chain_info.chain().clone(), + height: height.0 as u64, + hash: hash.to_string(), + time, + sapling_tree: sapling.map(hex::encode).unwrap_or_default(), + orchard_tree: orchard.map(hex::encode).unwrap_or_default(), + }) + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + format!("Error: Failed to retrieve treestate from node. Server Error: {e}",), + ))) + } + } + } + + #[allow(deprecated)] + fn timeout_channel_size(&self) -> (u32, u32) { + ( + self.config.service.timeout, + self.config.service.channel_size, + ) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are collected and returned as a single Vec. + async fn get_address_utxos( + &self, + request: GetAddressUtxosArg, + ) -> Result { + let taddrs = GetAddressBalanceRequest::new(request.addresses); + let utxos = self.z_get_address_utxos(taddrs).await?; + let mut address_utxos: Vec = Vec::new(); + let mut entries: u32 = 0; + for utxo in utxos { + let (address, tx_hash, output_index, script, satoshis, height) = utxo.into_parts(); + if (height.0 as u64) < request.start_height { + continue; + } + entries += 1; + if request.max_entries > 0 && entries > request.max_entries { + break; + } + let checked_index = match i32::try_from(output_index.index()) { + Ok(index) => index, + Err(_) => { + return Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Index out of range. Failed to convert to i32.", + ))); + } + }; + let checked_satoshis = match i64::try_from(satoshis) { + Ok(satoshis) => satoshis, + Err(_) => { + return Err(FetchServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Satoshis out of range. Failed to convert to i64.", + ))); + } + }; + let utxo_reply = GetAddressUtxosReply { + address: address.to_string(), + txid: tx_hash.0.to_vec(), + index: checked_index, + script: script.as_raw_bytes().to_vec(), + value_zat: checked_satoshis, + height: height.0 as u64, + }; + address_utxos.push(utxo_reply) + } + Ok(GetAddressUtxosReplyList { address_utxos }) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are returned in a stream. + #[allow(deprecated)] + async fn get_address_utxos_stream( + &self, + request: GetAddressUtxosArg, + ) -> Result { + let taddrs = GetAddressBalanceRequest::new(request.addresses); + let utxos = self.z_get_address_utxos(taddrs).await?; + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let mut entries: u32 = 0; + for utxo in utxos { + let (address, tx_hash, output_index, script, satoshis, height) = + utxo.into_parts(); + if (height.0 as u64) < request.start_height { + continue; + } + entries += 1; + if request.max_entries > 0 && entries > request.max_entries { + break; + } + let checked_index = match i32::try_from(output_index.index()) { + Ok(index) => index, + Err(_) => { + let _ = channel_tx + .send(Err(tonic::Status::unknown( + "Error: Index out of range. Failed to convert to i32.", + ))) + .await; + return; + } + }; + let checked_satoshis = match i64::try_from(satoshis) { + Ok(satoshis) => satoshis, + Err(_) => { + let _ = channel_tx + .send(Err(tonic::Status::unknown( + "Error: Satoshis out of range. Failed to convert to i64.", + ))) + .await; + return; + } + }; + let utxo_reply = GetAddressUtxosReply { + address: address.to_string(), + txid: tx_hash.0.to_vec(), + index: checked_index, + script: script.as_raw_bytes().to_vec(), + value_zat: checked_satoshis, + height: height.0 as u64, + }; + if channel_tx.send(Ok(utxo_reply)).await.is_err() { + return; + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_mempool_stream gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(UtxoReplyStream::new(channel_rx)) + } + + /// Return information about this lightwalletd instance and the blockchain + async fn get_lightd_info(&self) -> Result { + let blockchain_info = self.get_blockchain_info().await?; + let sapling_id = zebra_rpc::methods::ConsensusBranchIdHex::new( + zebra_chain::parameters::ConsensusBranchId::from_hex("76b809bb") + .map_err(|_e| { + tonic::Status::internal( + "Internal Error - Consesnsus Branch ID hex conversion failed", + ) + })? + .into(), + ); + + let sapling_activation_height = blockchain_info + .upgrades() + .get(&sapling_id) + .map_or(Height(1), |sapling_json| sapling_json.into_parts().1); + + let consensus_branch_id = zebra_chain::parameters::ConsensusBranchId::from( + blockchain_info.consensus().into_parts().0, + ) + .to_string(); + + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + + Ok(LightdInfo { + version: self.data.build_info().version(), + vendor: "ZingoLabs ZainoD".to_string(), + taddr_support: true, + chain_name: blockchain_info.chain().clone(), + sapling_activation_height: sapling_activation_height.0 as u64, + consensus_branch_id, + block_height: blockchain_info.blocks().0 as u64, + git_commit: self.data.build_info().commit_hash(), + branch: self.data.build_info().branch(), + build_date: self.data.build_info().build_date(), + build_user: self.data.build_info().build_user(), + estimated_height: blockchain_info.estimated_height().0 as u64, + zcashd_build: self.data.zebra_build(), + zcashd_subversion: self.data.zebra_subversion(), + donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), + }) + } + + /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) + /// + /// NOTE: Currently unimplemented in Zaino. + async fn ping(&self, _request: Duration) -> Result { + Err(FetchServiceError::TonicStatusError(tonic::Status::unimplemented( + "Ping not yet implemented. If you require this RPC please open an issue or PR at the Zaino github (https://github.com/zingolabs/zaino.git)." + ))) + } +} diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs new file mode 100644 index 000000000..b1b4423a8 --- /dev/null +++ b/zaino-state/src/backends/state.rs @@ -0,0 +1,2711 @@ +//! Zcash chain fetch and tx submission service backed by Zebras [`ReadStateService`]. + +#[allow(deprecated)] +use crate::{ + chain_index::{ + mempool::{Mempool, MempoolSubscriber}, + source::ValidatorConnector, + types as chain_types, ChainIndex, + }, + config::StateServiceConfig, + error::ChainIndexError, + error::{BlockCacheError, StateServiceError}, + indexer::{ + handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, + }, + status::{AtomicStatus, Status, StatusType}, + stream::{ + AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, + UtxoReplyStream, + }, + utils::{get_build_info, ServiceMetadata}, + BackendType, MempoolKey, NodeBackedChainIndex, NodeBackedChainIndexSubscriber, State, +}; +use tokio_stream::StreamExt as _; +use zaino_fetch::{ + chain::{transaction::FullTransaction, utils::ParseFromSlice}, + jsonrpsee::{ + connector::{JsonRpSeeConnector, RpcError}, + response::{ + address_deltas::{BlockInfo, GetAddressDeltasParams, GetAddressDeltasResponse}, + block_deltas::{BlockDelta, BlockDeltas, InputDelta, OutputDelta}, + block_header::GetBlockHeader, + block_subsidy::GetBlockSubsidy, + mining_info::GetMiningInfoWire, + peer_info::GetPeerInfo, + GetMempoolInfoResponse, GetNetworkSolPsResponse, GetSubtreesResponse, + }, + }, +}; +use zaino_proto::proto::utils::{ + blockid_to_hashorheight, compact_block_to_nullifiers, GetBlockRangeError, PoolTypeError, + PoolTypeFilter, ValidatedBlockRangeRequest, +}; +use zaino_proto::proto::{ + compact_formats::CompactBlock, + service::{ + AddressList, Balance, BlockId, BlockRange, GetAddressUtxosArg, GetAddressUtxosReply, + GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, + SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + }, +}; + +use zcash_protocol::consensus::NetworkType; +use zebra_chain::{ + amount::{Amount, NonNegative}, + block::{Header, Height, SerializedBlock}, + chain_tip::NetworkChainTipHeightEstimator, + parameters::{ConsensusBranchId, Network, NetworkKind, NetworkUpgrade}, + serialization::{BytesInDisplayOrder as _, ZcashSerialize}, + subtree::NoteCommitmentSubtreeIndex, +}; +use zebra_rpc::{ + client::{ + GetAddressBalanceRequest, GetBlockchainInfoBalance, GetSubtreesByIndexResponse, + GetTreestateResponse, HexData, Input, SubtreeRpcData, TransactionObject, + ValidateAddressResponse, + }, + methods::{ + chain_tip_difficulty, AddressBalance, ConsensusBranchIdHex, GetAddressTxIdsRequest, + GetAddressUtxos, GetBlock, GetBlockHash, GetBlockHeader as GetBlockHeaderZebra, + GetBlockHeaderObject, GetBlockTransaction, GetBlockTrees, GetBlockchainInfoResponse, + GetInfo, GetRawTransaction, NetworkUpgradeInfo, NetworkUpgradeStatus, SentTransactionHash, + TipConsensusBranch, ValidateAddresses as _, + }, + server::error::LegacyCode, + sync::init_read_state_with_syncer, +}; +use zebra_state::{ + FromDisk, HashOrHeight, OutputLocation, ReadRequest, ReadResponse, ReadStateService, + TransactionLocation, +}; + +use chrono::{DateTime, Utc}; +use futures::{TryFutureExt as _, TryStreamExt as _}; +use hex::{FromHex as _, ToHex}; +use indexmap::IndexMap; +use std::{collections::HashSet, error::Error, fmt, str::FromStr, sync::Arc}; +use tokio::{ + sync::mpsc, + time::{self, timeout}, +}; +use tonic::async_trait; +use tower::{Service, ServiceExt}; +use tracing::{info, warn}; + +macro_rules! expected_read_response { + ($response:ident, $expected_variant:ident) => { + match $response { + ReadResponse::$expected_variant(inner) => inner, + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + } + }; +} + +/// Chain fetch service backed by Zebra's `ReadStateService` and `TrustedChainSync`. +/// +/// NOTE: We currently dop not implement clone for chain fetch services +/// as this service is responsible for maintaining and closing its child processes. +/// ServiceSubscribers are used to create separate chain fetch processes +/// while allowing central state processes to be managed in a single place. +/// If we want the ability to clone Service all JoinHandle's should be +/// converted to Arc\. +#[derive(Debug)] +// #[deprecated = "Will be eventually replaced by `BlockchainSource"] +pub struct StateService { + /// `ReadeStateService` from Zebra-State. + read_state_service: ReadStateService, + + /// Internal mempool. + mempool: Mempool, + + /// StateService config data. + #[allow(deprecated)] + config: StateServiceConfig, + + /// Listener for when the chain tip changes + chain_tip_change: zebra_state::ChainTipChange, + + /// Sync task handle. + sync_task_handle: Option>>, + + /// JsonRPC Client. + rpc_client: JsonRpSeeConnector, + + /// Core indexer. + indexer: NodeBackedChainIndex, + + /// Service metadata. + data: ServiceMetadata, + + /// Thread-safe status indicator. + status: AtomicStatus, +} + +impl StateService { + #[cfg(feature = "test_dependencies")] + /// Helper for tests + pub fn read_state_service(&self) -> &ReadStateService { + &self.read_state_service + } +} + +impl Status for StateService { + fn status(&self) -> StatusType { + let current_status = self.status.load(); + if current_status == StatusType::Closing { + current_status + } else { + self.indexer.status() + } + } +} + +#[async_trait] +// #[allow(deprecated)] +impl ZcashService for StateService { + const BACKEND_TYPE: BackendType = BackendType::State; + + type Subscriber = StateServiceSubscriber; + type Config = StateServiceConfig; + + /// Initializes a new StateService instance and starts sync process. + async fn spawn(config: StateServiceConfig) -> Result { + info!("Spawning State Service.."); + + let rpc_client = JsonRpSeeConnector::new_from_config_parts( + &config.validator_rpc_address, + config.validator_rpc_user.clone(), + config.validator_rpc_password.clone(), + config.validator_cookie_path.clone(), + ) + .await?; + + let zebra_build_data = rpc_client.get_info().await?; + + // This const is optional, as the build script can only + // generate it from hash-based dependencies. + // in all other cases, this check will be skipped. + if let Some(expected_zebrad_version) = crate::ZEBRA_VERSION { + // this `+` indicates a git describe run + // i.e. the first seven characters of the commit hash + // have been appended. We match on those + if zebra_build_data.build.contains('+') { + if !zebra_build_data + .build + .contains(&expected_zebrad_version[0..7]) + { + return Err(StateServiceError::ZebradVersionMismatch { + expected_zebrad_version: expected_zebrad_version.to_string(), + connected_zebrad_version: zebra_build_data.build, + }); + } + } else { + // With no `+`, we expect a version number to be an exact match + if expected_zebrad_version != zebra_build_data.build { + return Err(StateServiceError::ZebradVersionMismatch { + expected_zebrad_version: expected_zebrad_version.to_string(), + connected_zebrad_version: zebra_build_data.build, + }); + } + } + }; + let data = ServiceMetadata::new( + get_build_info(), + config.network.to_zebra_network(), + zebra_build_data.build, + zebra_build_data.subversion, + ); + info!("Using Zcash build: {}", data); + + info!("Launching Chain Syncer.."); + info!("{}", config.validator_rpc_address); + let (mut read_state_service, _latest_chain_tip, chain_tip_change, sync_task_handle) = + init_read_state_with_syncer( + config.validator_state_config.clone(), + &config.network.to_zebra_network(), + config.validator_grpc_address, + ) + .await??; + + info!("chain syncer launched!"); + + // Wait for ReadStateService to catch up to primary database: + loop { + let server_height = rpc_client.get_blockchain_info().await?.blocks; + info!("got blockchain info!"); + + let syncer_response = read_state_service + .ready() + .and_then(|service| service.call(ReadRequest::Tip)) + .await?; + info!("got tip!"); + let (syncer_height, _) = expected_read_response!(syncer_response, Tip).ok_or( + RpcError::new_from_legacycode(LegacyCode::Misc, "no blocks in chain"), + )?; + + if server_height.0 == syncer_height.0 { + break; + } else { + info!(" - ReadStateService syncing with Zebra. Syncer chain height: {}, Validator chain height: {}", + &syncer_height.0, + &server_height.0 + ); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + continue; + } + } + + let mempool_source = ValidatorConnector::State(crate::chain_index::source::State { + read_state_service: read_state_service.clone(), + mempool_fetcher: rpc_client.clone(), + network: config.network, + }); + + let mempool = Mempool::spawn(mempool_source, None).await?; + + let chain_index = NodeBackedChainIndex::new( + ValidatorConnector::State(State { + read_state_service: read_state_service.clone(), + mempool_fetcher: rpc_client.clone(), + network: config.network, + }), + config.clone().into(), + ) + .await + .unwrap(); + + let state_service = Self { + chain_tip_change, + read_state_service, + sync_task_handle: Some(Arc::new(sync_task_handle)), + rpc_client: rpc_client.clone(), + mempool, + indexer: chain_index, + data, + config, + status: AtomicStatus::new(StatusType::Spawning), + }; + + // wait for sync to complete, return error on sync fail. + loop { + match state_service.status() { + StatusType::Ready => { + state_service.status.store(StatusType::Ready); + break; + } + StatusType::CriticalError => { + return Err(StateServiceError::Critical( + "Chain index sync failed".to_string(), + )); + } + StatusType::Closing => break, + _ => { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + } + } + + Ok(state_service) + } + + fn get_subscriber(&self) -> IndexerSubscriber { + IndexerSubscriber::new(StateServiceSubscriber { + read_state_service: self.read_state_service.clone(), + rpc_client: self.rpc_client.clone(), + mempool: self.mempool.subscriber(), + indexer: self.indexer.subscriber(), + data: self.data.clone(), + config: self.config.clone(), + chain_tip_change: self.chain_tip_change.clone(), + }) + } + + /// Shuts down the StateService. + fn close(&mut self) { + if self.sync_task_handle.is_some() { + if let Some(handle) = self.sync_task_handle.take() { + handle.abort(); + } + } + } +} + +#[allow(deprecated)] +impl Drop for StateService { + fn drop(&mut self) { + self.close() + } +} + +/// A fetch service subscriber. +/// +/// Subscribers should be +#[derive(Debug, Clone)] +// #[deprecated] +pub struct StateServiceSubscriber { + /// Remote wrappper functionality for zebra's [`ReadStateService`]. + pub read_state_service: ReadStateService, + + /// Internal mempool. + pub mempool: MempoolSubscriber, + + /// StateService config data. + #[allow(deprecated)] + config: StateServiceConfig, + + /// Listener for when the chain tip changes + chain_tip_change: zebra_state::ChainTipChange, + + /// JsonRPC Client. + pub rpc_client: JsonRpSeeConnector, + + /// Core indexer. + pub indexer: NodeBackedChainIndexSubscriber, + + /// Service metadata. + pub data: ServiceMetadata, +} + +impl Status for StateServiceSubscriber { + fn status(&self) -> StatusType { + self.indexer.status() + } +} + +/// A subscriber to any chaintip updates +#[derive(Clone)] +pub struct ChainTipSubscriber { + monitor: zebra_state::ChainTipChange, +} + +impl ChainTipSubscriber { + /// Waits until the tip hash has changed (relative to the last time this method + /// was called), then returns the best tip's block hash. + pub async fn next_tip_hash( + &mut self, + ) -> Result { + self.monitor + .wait_for_tip_change() + .await + .map(|tip| tip.best_tip_hash()) + } +} + +/// Private RPC methods, which are used as helper methods by the public ones +/// +/// These would be simple to add to the public interface if +/// needed, there are currently no plans to do so. +// #[allow(deprecated)] +impl StateServiceSubscriber { + /// Gets a Subscriber to any updates to the latest chain tip + pub fn chaintip_update_subscriber(&self) -> ChainTipSubscriber { + ChainTipSubscriber { + monitor: self.chain_tip_change.clone(), + } + } + /// Returns the requested block header by hash or height, as a [`GetBlockHeader`] JSON string. + /// If the block is not in Zebra's state, + /// returns [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + /// if a height was passed or -5 if a hash was passed. + /// + /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height + /// for the block to be returned. + /// - `verbose`: (bool, optional, default=false, example=true) false for hex encoded data, + /// true for a json object + /// + /// # Notes + /// + /// The undocumented `chainwork` field is not returned. + /// + /// This rpc is used by get_block(verbose), there is currently no + /// plan to offer this RPC publicly. + async fn get_block_header_inner( + state: &ReadStateService, + network: &Network, + hash_or_height: HashOrHeight, + verbose: Option, + ) -> Result { + let mut state = state.clone(); + let verbose = verbose.unwrap_or(true); + let network = network.clone(); + + let zebra_state::ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash, + } = state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::BlockHeader(hash_or_height))) + .await + .map_err(|_| { + StateServiceError::RpcError(RpcError { + // Compatibility with zcashd. Note that since this function + // is reused by getblock(), we return the errors expected + // by it (they differ whether a hash or a height was passed) + code: LegacyCode::InvalidParameter as i64, + message: "block height not in best chain".to_string(), + data: None, + }) + })? + else { + return Err(StateServiceError::Custom( + "Unexpected response to BlockHeader request".to_string(), + )); + }; + + let response = if !verbose { + GetBlockHeaderZebra::Raw(HexData(header.zcash_serialize_to_vec()?)) + } else { + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state + .ready() + .and_then(|service| { + service.call(zebra_state::ReadRequest::SaplingTree(hash_or_height)) + }) + .await? + else { + return Err(StateServiceError::Custom( + "Unexpected response to SaplingTree request".to_string(), + )); + }; + // This could be `None` if there's a chain reorg between state queries. + let sapling_tree = sapling_tree.ok_or_else(|| { + StateServiceError::RpcError(zaino_fetch::jsonrpsee::connector::RpcError { + code: LegacyCode::InvalidParameter as i64, + message: "missing sapling tree for block".to_string(), + data: None, + }) + })?; + + let zebra_state::ReadResponse::Depth(depth) = state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Depth(hash))) + .await? + else { + return Err(StateServiceError::Custom( + "Unexpected response to Depth request".to_string(), + )); + }; + + // From + // TODO: Deduplicate const definition, consider + // refactoring this to avoid duplicate logic + const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; + + // Confirmations are one more than the depth. + // Depth is limited by height, so it will never overflow an i64. + let confirmations = depth + .map(|depth| i64::from(depth) + 1) + .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); + + let mut nonce = *header.nonce; + nonce.reverse(); + + let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); + let sapling_tree_size = sapling_tree.count(); + let final_sapling_root: [u8; 32] = + if sapling_activation.is_some() && height >= sapling_activation.unwrap() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] + }; + + let difficulty = header.difficulty_threshold.relative_to_network(&network); + let block_commitments = + header_to_block_commitments(&header, &network, height, final_sapling_root)?; + + let block_header = GetBlockHeaderObject::new( + hash, + confirmations, + height, + header.version, + header.merkle_root, + block_commitments, + final_sapling_root, + sapling_tree_size, + header.time.timestamp(), + nonce, + header.solution, + header.difficulty_threshold, + difficulty, + header.previous_block_hash, + next_block_hash, + ); + + GetBlockHeaderZebra::Object(Box::new(block_header)) + }; + + Ok(response) + } + + /// Return a list of consecutive compact blocks. + #[allow(dead_code, deprecated)] + async fn get_block_range_inner( + &self, + request: BlockRange, + nullifiers_only: bool, + ) -> Result { + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(StateServiceError::from)?; + + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(StateServiceError::from)?; + + // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] + let start = validated_request.start() as u32; + let end = validated_request.end() as u32; + + let state_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + tokio::spawn(async move { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let snapshot = state_service_clone.indexer.snapshot_nonfinalized_state(); + let chain_height = snapshot.best_tip.height.0; + + match state_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + chain_types::Height(start), + chain_types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + if nullifiers_only { + while let Some(stream_item) = compact_block_stream.next().await { + match stream_item { + Ok(block) => { + if channel_tx + .send(Ok(compact_block_to_nullifiers(block))) + .await + .is_err() + { + break; + } + } + Err(status) => { + if channel_tx.send(Err(status)).await.is_err() { + break; + } + } + } + } + } else { + while let Some(stream_item) = compact_block_stream.next().await { + if channel_tx.send(stream_item).await.is_err() { + break; + } + } + } + } + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } + } + } + } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); + } + }); + + Ok(CompactBlockStream::new(channel_rx)) + } + + async fn error_get_block( + &self, + e: BlockCacheError, + height: u32, + ) -> Result { + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let chain_height = snapshot.best_tip.height.0; + Err(if height >= chain_height { + StateServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))) + } else { + // TODO: Hide server error from clients before release. + // Currently useful for dev purposes. + StateServiceError::TonicStatusError(tonic::Status::unknown(format!( + "Error: Failed to retrieve block from node. Server Error: {e}", + ))) + }) + } + + pub(crate) async fn get_block_inner( + state: &ReadStateService, + network: &Network, + hash_or_height: HashOrHeight, + verbosity: Option, + ) -> Result { + let mut state_1 = state.clone(); + + let verbosity = verbosity.unwrap_or(1); + match verbosity { + 0 => { + let request = ReadRequest::Block(hash_or_height); + let response = state_1 + .ready() + .and_then(|service| service.call(request)) + .await?; + let block = expected_read_response!(response, Block); + block.map(SerializedBlock::from).map(GetBlock::Raw).ok_or( + StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidParameter, + "block not found", + )), + ) + } + 1 | 2 => { + let state_2 = state.clone(); + let state_3 = state.clone(); + let state_4 = state.clone(); + + let blockandsize_future = { + let req = ReadRequest::BlockAndSize(hash_or_height); + async move { state_1.ready().and_then(|service| service.call(req)).await } + }; + let orchard_future = { + let req = ReadRequest::OrchardTree(hash_or_height); + async move { + state_2 + .clone() + .ready() + .and_then(|service| service.call(req)) + .await + } + }; + + let block_info_future = { + let req = ReadRequest::BlockInfo(hash_or_height); + async move { + state_4 + .clone() + .ready() + .and_then(|service| service.call(req)) + .await + } + }; + let (fullblock, orchard_tree_response, header, block_info) = futures::join!( + blockandsize_future, + orchard_future, + StateServiceSubscriber::get_block_header_inner( + &state_3, + network, + hash_or_height, + Some(true) + ), + block_info_future + ); + + let header_obj = match header? { + GetBlockHeaderZebra::Raw(_hex_data) => unreachable!( + "`true` was passed to get_block_header, an object should be returned" + ), + GetBlockHeaderZebra::Object(get_block_header_object) => get_block_header_object, + }; + + let (transactions_response, size, block_info): (Vec, _, _) = + match (fullblock, block_info) { + ( + Ok(ReadResponse::BlockAndSize(Some((block, size)))), + Ok(ReadResponse::BlockInfo(Some(block_info))), + ) => Ok(( + block + .transactions + .iter() + .map(|transaction| { + match verbosity { + 1 => GetBlockTransaction::Hash(transaction.hash()), + 2 => GetBlockTransaction::Object(Box::new( + TransactionObject::from_transaction( + transaction.clone(), + Some(header_obj.height()), + Some(header_obj.confirmations() as u32), + network, + DateTime::::from_timestamp( + header_obj.time(), + 0, + ), + Some(header_obj.hash()), + // block header has a non-optional height, which indicates + // a mainchain block. It is implied this method cannot return sidechain + // data, at least for now. This is subject to change: TODO + // return Some(true/false) after this assumption is resolved + None, + transaction.hash(), + ), + )), + _ => unreachable!("verbosity known to be 1 or 2"), + } + }) + .collect(), + size, + block_info, + )), + (Ok(ReadResponse::Block(None)), Ok(ReadResponse::BlockInfo(None))) => { + Err(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidParameter, + "block not found", + ))) + } + (Ok(unexpected), Ok(unexpected2)) => { + unreachable!("Unexpected responses from state service: {unexpected:?} {unexpected2:?}") + } + (Err(e), _) | (_, Err(e)) => Err(e.into()), + }?; + + let orchard_tree_response = orchard_tree_response?; + let orchard_tree = expected_read_response!(orchard_tree_response, OrchardTree) + .ok_or(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::Misc, + "missing orchard tree", + )))?; + + let final_orchard_root = match NetworkUpgrade::Nu5.activation_height(network) { + Some(activation_height) if header_obj.height() >= activation_height => { + Some(orchard_tree.root().into()) + } + _otherwise => None, + }; + + let trees = + GetBlockTrees::new(header_obj.sapling_tree_size(), orchard_tree.count()); + + let (chain_supply, value_pools) = ( + GetBlockchainInfoBalance::chain_supply(*block_info.value_pools()), + GetBlockchainInfoBalance::value_pools(*block_info.value_pools(), None), + ); + + Ok(GetBlock::Object(Box::new( + zebra_rpc::client::BlockObject::new( + header_obj.hash(), + header_obj.confirmations(), + Some(size as i64), + Some(header_obj.height()), + Some(header_obj.version()), + Some(header_obj.merkle_root()), + Some(header_obj.block_commitments()), + Some(header_obj.final_sapling_root()), + final_orchard_root, + transactions_response, + Some(header_obj.time()), + Some(header_obj.nonce()), + Some(header_obj.solution()), + Some(header_obj.bits()), + Some(header_obj.difficulty()), + Some(chain_supply), + Some(value_pools), + trees, + Some(header_obj.previous_block_hash()), + header_obj.next_block_hash(), + ), + ))) + } + more_than_two => Err(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidParameter, + format!("invalid verbosity of {more_than_two}"), + ))), + } + } + + /// Fetches transaction objects for addresses within a given block range. + /// This method takes addresses and a block range and returns full transaction objects. + /// Uses parallel async calls for efficient transaction fetching. + /// + /// If `fail_fast` is true, fails immediately when any transaction fetch fails. + /// Otherwise, it continues and returns partial results, filtering out failed fetches. + async fn get_taddress_txs( + &self, + addresses: Vec, + start: u32, + end: u32, + fail_fast: bool, + ) -> Result>, StateServiceError> { + // Convert to GetAddressTxIdsRequest for compatibility with existing helper + let tx_ids_request = GetAddressTxIdsRequest::new(addresses, Some(start), Some(end)); + + // Get transaction IDs using existing method + let txids = self.get_address_tx_ids(tx_ids_request).await?; + + // Fetch all transactions in parallel + let results = futures::future::join_all( + txids + .into_iter() + .map(|txid| async { self.clone().get_raw_transaction(txid, Some(1)).await }), + ) + .await; + + let transactions = results + .into_iter() + .filter_map(|result| { + match (fail_fast, result) { + // Fail-fast mode: propagate errors + (true, Err(e)) => Some(Err(e)), + (true, Ok(tx)) => Some(Ok(tx)), + // Filter mode: skip errors + (false, Err(_)) => None, + (false, Ok(tx)) => Some(Ok(tx)), + } + }) + .collect::, _>>()? + .into_iter() + .filter_map(|tx| match tx { + GetRawTransaction::Object(transaction_obj) => Some(transaction_obj), + GetRawTransaction::Raw(_) => None, + }) + .collect(); + + Ok(transactions) + } + + /// Creates a BlockInfo from a block height using direct state service calls. + async fn block_info_from_height(&self, height: Height) -> Result { + use zebra_state::{HashOrHeight, ReadRequest}; + + let hash_or_height = HashOrHeight::Height(height); + + let response = self + .read_state_service + .clone() + .ready() + .await? + .call(ReadRequest::BlockHeader(hash_or_height)) + .await?; + + match response { + ReadResponse::BlockHeader { hash, .. } => Ok(BlockInfo::new( + hex::encode(hash.bytes_in_display_order()), + height.0, + )), + _ => Err(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidParameter, + format!("Block not found at height {}", height.0), + ))), + } + } + + /// Returns the network type running. + #[allow(deprecated)] + pub fn network(&self) -> zaino_common::Network { + self.config.network + } + + /// Returns the median time of the last 11 blocks. + async fn median_time_past( + &self, + start: &zebra_rpc::client::BlockObject, + ) -> Result { + const MEDIAN_TIME_PAST_WINDOW: usize = 11; + + let mut times = Vec::with_capacity(MEDIAN_TIME_PAST_WINDOW); + + let start_hash = start.hash().to_string(); + let time_0 = start + .time() + .ok_or_else(|| MedianTimePast::StartMissingTime { + hash: start_hash.clone(), + })?; + times.push(time_0); + + let mut prev = start.previous_block_hash(); + + for _ in 0..(MEDIAN_TIME_PAST_WINDOW - 1) { + let hash = match prev { + Some(h) => h.to_string(), + None => break, // genesis + }; + + match self.z_get_block(hash.clone(), Some(1)).await { + Ok(GetBlock::Object(obj)) => { + if let Some(t) = obj.time() { + times.push(t); + } + prev = obj.previous_block_hash(); + } + Ok(GetBlock::Raw(_)) => { + return Err(MedianTimePast::UnexpectedRaw { hash }); + } + Err(_e) => { + // Use values up to this point + break; + } + } + } + + if times.is_empty() { + return Err(MedianTimePast::EmptyWindow); + } + + times.sort_unstable(); + Ok(times[times.len() / 2]) + } +} + +#[async_trait] +// #[allow(deprecated)] +impl ZcashIndexer for StateServiceSubscriber { + type Error = StateServiceError; + + async fn get_info(&self) -> Result { + // A number of these fields are difficult to access from the state service + // TODO: Fix this + self.rpc_client + .get_info() + .await + .map(GetInfo::from) + .map_err(|e| StateServiceError::Custom(e.to_string())) + } + + /// Returns all changes for an address. + /// + /// Returns information about all changes to the given transparent addresses within the given (inclusive) + /// + /// block height range, default is the full blockchain. + /// If start or end are not specified, they default to zero. + /// If start is greater than the latest block height, it's interpreted as that height. + /// + /// If end is zero, it's interpreted as the latest block height. + /// + /// [Original zcashd implementation](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/misc.cpp#L881) + /// + /// zcashd reference: [`getaddressdeltas`](https://zcash.github.io/rpc/getaddressdeltas.html) + /// method: post + /// tags: address + async fn get_address_deltas( + &self, + params: GetAddressDeltasParams, + ) -> Result { + let (addresses, start_raw, end_raw, chain_info) = match ¶ms { + GetAddressDeltasParams::Filtered { + addresses, + start, + end, + chain_info, + } => (addresses.clone(), *start, *end, *chain_info), + GetAddressDeltasParams::Address(a) => (vec![a.clone()], 0, 0, false), + }; + + let tip = self.chain_height().await?; + let mut start = Height(start_raw); + let mut end = Height(end_raw); + if end == Height(0) || end > tip { + end = tip; + } + if start > tip { + start = tip; + } + + let transactions = self + .get_taddress_txs(addresses.clone(), start.0, end.0, true) + .await?; + + // Ordered deltas + let deltas = + GetAddressDeltasResponse::process_transactions_to_deltas(&transactions, &addresses); + + if chain_info && start > Height(0) && end > Height(0) { + let start_info = self.block_info_from_height(start).await?; + let end_info = self.block_info_from_height(end).await?; + + Ok(GetAddressDeltasResponse::WithChainInfo { + deltas, + start: start_info, + end: end_info, + }) + } else { + // Otherwise return the array form + Ok(GetAddressDeltasResponse::Simple(deltas)) + } + } + + async fn get_difficulty(&self) -> Result { + chain_tip_difficulty( + self.config.network.to_zebra_network(), + self.read_state_service.clone(), + false, + ) + .await + .map_err(|e| { + StateServiceError::RpcError(RpcError::new_from_errorobject( + e, + "failed to get difficulty", + )) + }) + } + + async fn get_block_subsidy(&self, height: u32) -> Result { + self.rpc_client + .get_block_subsidy(height) + .await + .map_err(|e| StateServiceError::Custom(e.to_string())) + } + + async fn get_blockchain_info(&self) -> Result { + let mut state = self.read_state_service.clone(); + + let response = state + .ready() + .and_then(|service| service.call(ReadRequest::TipPoolValues)) + .await?; + let (height, hash, balance) = match response { + ReadResponse::TipPoolValues { + tip_height, + tip_hash, + value_balance, + } => (tip_height, tip_hash, value_balance), + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + }; + + let usage_response = state + .ready() + .and_then(|service| service.call(ReadRequest::UsageInfo)) + .await?; + let size_on_disk = expected_read_response!(usage_response, UsageInfo); + + let request = zebra_state::ReadRequest::BlockHeader(hash.into()); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await?; + let header = match response { + ReadResponse::BlockHeader { header, .. } => header, + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + }; + + let now = Utc::now(); + let zebra_estimated_height = + NetworkChainTipHeightEstimator::new(header.time, height, &self.config.network.into()) + .estimate_height_at(now); + let estimated_height = if header.time > now || zebra_estimated_height < height { + height + } else { + zebra_estimated_height + }; + + let upgrades = IndexMap::from_iter( + self.config + .network + .to_zebra_network() + .full_activation_list() + .into_iter() + .filter_map(|(activation_height, network_upgrade)| { + // Zebra defines network upgrades based on incompatible consensus rule changes, + // but zcashd defines them based on ZIPs. + // + // All the network upgrades with a consensus branch ID + // are the same in Zebra and zcashd. + network_upgrade.branch_id().map(|branch_id| { + // zcashd's RPC seems to ignore Disabled network upgrades, + // so Zebra does too. + let status = if height >= activation_height { + NetworkUpgradeStatus::Active + } else { + NetworkUpgradeStatus::Pending + }; + + ( + ConsensusBranchIdHex::new(branch_id.into()), + NetworkUpgradeInfo::from_parts( + network_upgrade, + activation_height, + status, + ), + ) + }) + }), + ); + + let next_block_height = + (height + 1).expect("valid chain tips are a lot less than Height::MAX"); + let consensus = TipConsensusBranch::from_parts( + ConsensusBranchIdHex::new( + NetworkUpgrade::current(&self.config.network.into(), height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID) + .into(), + ) + .inner(), + ConsensusBranchIdHex::new( + NetworkUpgrade::current(&self.config.network.into(), next_block_height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID) + .into(), + ) + .inner(), + ); + + // TODO: Remove unwrap() + let difficulty = chain_tip_difficulty( + self.config.network.to_zebra_network(), + self.read_state_service.clone(), + false, + ) + .await + .unwrap(); + + let verification_progress = f64::from(height.0) / f64::from(zebra_estimated_height.0); + + Ok(GetBlockchainInfoResponse::new( + self.config.network.to_zebra_network().bip70_network_name(), + height, + hash, + estimated_height, + zebra_rpc::client::GetBlockchainInfoBalance::chain_supply(balance), + // TODO: account for new delta_pools arg? + zebra_rpc::client::GetBlockchainInfoBalance::value_pools(balance, None), + upgrades, + consensus, + height, + difficulty, + verification_progress, + // TODO: store work in the finalized state for each height + // see https://github.com/ZcashFoundation/zebra/issues/7109 + 0, + false, + size_on_disk, + // TODO (copied from zebra): Investigate whether this needs to + // be implemented (it's sprout-only in zcashd) + 0, + )) + } + + /// Returns details on the active state of the TX memory pool. + /// In Zaino, this RPC call information is gathered from the local Zaino state instead of directly reflecting the full node's mempool. This state is populated from a gRPC stream, sourced from the full node. + /// There are no request parameters. + /// The Zcash source code is considered canonical: + /// [from the rpc definition](), [this function is called to produce the return value](>). + /// There are no required or optional parameters. + /// the `size` field is called by [this line of code](), and returns an int64. + /// `size` represents the number of transactions currently in the mempool. + /// the `bytes` field is called by [this line of code](), and returns an int64 from [this variable](). + /// `bytes` is the sum memory size in bytes of all transactions in the mempool: the sum of all transaction byte sizes. + /// the `usage` field is called by [this line of code](), and returns an int64 derived from the return of this function(), which includes a number of elements. + /// `usage` is the total memory usage for the mempool, in bytes. + /// the [optional `fullyNotified` field](), is only utilized for zcashd regtests, is deprecated, and is not included. + async fn get_mempool_info(&self) -> Result { + Ok(self.indexer.get_mempool_info().await.into()) + } + + async fn get_peer_info(&self) -> Result { + Ok(self.rpc_client.get_peer_info().await?) + } + + async fn z_get_address_balance( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result { + let mut state = self.read_state_service.clone(); + + let strings_set = address_strings + .valid_addresses() + .map_err(|e| RpcError::new_from_errorobject(e, "invalid taddrs provided"))?; + let response = state + .ready() + .and_then(|service| service.call(ReadRequest::AddressBalance(strings_set))) + .await?; + let (balance, received) = match response { + ReadResponse::AddressBalance { balance, received } => (balance, received), + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + }; + + Ok(AddressBalance::new(balance.into(), received)) + } + + async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result { + // Offload to the json rpc connector, as ReadStateService + // doesn't yet interface with the mempool + self.rpc_client + .send_raw_transaction(raw_transaction_hex) + .await + .map(SentTransactionHash::from) + .map_err(Into::into) + } + + async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result { + self.rpc_client + .get_block_header(hash, verbose) + .await + .map_err(|e| StateServiceError::Custom(e.to_string())) + } + + async fn z_get_block( + &self, + hash_or_height_string: String, + verbosity: Option, + ) -> Result { + let hash_or_height = HashOrHeight::from_str(&hash_or_height_string); + + StateServiceSubscriber::get_block_inner( + &self.read_state_service.clone(), + &self.data.network(), + hash_or_height?, + verbosity, + ) + .await + } + + async fn get_block_deltas(&self, hash: String) -> Result { + // Get the block WITH the transaction data + let zblock = self.z_get_block(hash, Some(2)).await?; + + match zblock { + GetBlock::Object(boxed_block) => { + let deltas = boxed_block + .tx() + .iter() + .enumerate() + .map(|(tx_index, tx)| match tx { + GetBlockTransaction::Object(txo) => { + let txid = txo.txid().to_string(); + + let inputs: Vec = txo + .inputs() + .iter() + .enumerate() + .filter_map(|(i, vin)| match vin { + Input::Coinbase { .. } => None, + Input::NonCoinbase { + txid: prevtxid, + vout: prevout, + value, + value_zat, + address, + .. + } => { + let zats = if let Some(z) = value_zat { + *z + } else if let Some(v) = value { + (v * 100_000_000.0).round() as i64 + } else { + return None; + }; + + let addr = match address { + Some(a) => a.clone(), + None => return None, + }; + + let input_amt: Amount = match (-zats).try_into() { + Ok(a) => a, + Err(_) => return None, + }; + + Some(InputDelta { + address: addr, + satoshis: input_amt, + index: i as u32, + prevtxid: prevtxid.clone(), + prevout: *prevout, + }) + } + }) + .collect::>(); + + let outputs: Vec = + txo.outputs() + .iter() + .filter_map(|vout| { + let addr_opt = + vout.script_pub_key().addresses().as_ref().and_then( + |v| if v.len() == 1 { v.first() } else { None }, + ); + + let addr = addr_opt?.clone(); + + let output_amt: Amount = + match vout.value_zat().try_into() { + Ok(a) => a, + Err(_) => return None, + }; + + Some(OutputDelta { + address: addr, + satoshis: output_amt, + index: vout.n(), + }) + }) + .collect::>(); + + Ok::<_, Self::Error>(BlockDelta { + txid, + index: tx_index as u32, + inputs, + outputs, + }) + } + GetBlockTransaction::Hash(_) => Err(StateServiceError::Custom( + "Unexpected hash when expecting object".to_string(), + )), + }) + .collect::, _>>()?; + + Ok(BlockDeltas { + hash: boxed_block.hash().to_string(), + confirmations: boxed_block.confirmations(), + size: boxed_block.size().expect("size should be present"), + height: boxed_block.height().expect("height should be present").0, + version: boxed_block.version().expect("version should be present"), + merkle_root: boxed_block + .merkle_root() + .expect("merkle root should be present") + .encode_hex::(), + deltas, + time: boxed_block.time().expect("time should be present"), + + median_time: self.median_time_past(&boxed_block).await.unwrap(), + nonce: hex::encode(boxed_block.nonce().unwrap()), + bits: boxed_block + .bits() + .expect("bits should be present") + .to_string(), + difficulty: boxed_block + .difficulty() + .expect("difficulty should be present"), + previous_block_hash: boxed_block + .previous_block_hash() + .map(|hash| hash.to_string()), + next_block_hash: boxed_block.next_block_hash().map(|h| h.to_string()), + }) + } + GetBlock::Raw(_serialized_block) => Err(StateServiceError::Custom( + "Unexpected raw block".to_string(), + )), + } + } + + async fn get_raw_mempool(&self) -> Result, Self::Error> { + Ok(self + .indexer + .get_mempool_txids() + .await? + .into_iter() + .map(|txid| txid.to_string()) + .collect()) + } + + async fn z_get_treestate( + &self, + hash_or_height: String, + ) -> Result { + let mut state = self.read_state_service.clone(); + + let hash_or_height = HashOrHeight::from_str(&hash_or_height)?; + let block_header_response = state + .ready() + .and_then(|service| service.call(ReadRequest::BlockHeader(hash_or_height))) + .await?; + let (header, hash, height) = match block_header_response { + ReadResponse::BlockHeader { + header, + hash, + height, + .. + } => (header, hash, height), + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + }; + + let sapling = + match NetworkUpgrade::Sapling.activation_height(&self.config.network.into()) { + Some(activation_height) if height >= activation_height => Some( + state + .ready() + .and_then(|service| service.call(ReadRequest::SaplingTree(hash_or_height))) + .await?, + ), + _ => None, + } + .and_then(|sap_response| { + expected_read_response!(sap_response, SaplingTree).map(|tree| tree.to_rpc_bytes()) + }); + + let orchard = match NetworkUpgrade::Nu5.activation_height(&self.config.network.into()) { + Some(activation_height) if height >= activation_height => Some( + state + .ready() + .and_then(|service| service.call(ReadRequest::OrchardTree(hash_or_height))) + .await?, + ), + _ => None, + } + .and_then(|orch_response| { + expected_read_response!(orch_response, OrchardTree).map(|tree| tree.to_rpc_bytes()) + }); + + #[allow(deprecated)] + Ok(GetTreestateResponse::from_parts( + hash, + height, + // If the timestamp is pre-unix epoch, something has gone terribly wrong + u32::try_from(header.time.timestamp()).unwrap(), + sapling, + orchard, + )) + } + + async fn get_mining_info(&self) -> Result { + Ok(self.rpc_client.get_mining_info().await?) + } + + // No request parameters. + /// Return the hex encoded hash of the best (tip) block, in the longest block chain. + /// The Zcash source code is considered canonical: + /// [In the rpc definition](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/common.h#L48) there are no required params, or optional params. + /// [The function in rpc/blockchain.cpp](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L325) + /// where `return chainActive.Tip()->GetBlockHash().GetHex();` is the [return expression](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L339)returning a `std::string` + async fn get_best_blockhash(&self) -> Result { + // return should be valid hex encoded. + // Hash from zebra says: + // Return the hash bytes in big-endian byte-order suitable for printing out byte by byte. + // + // Zebra displays transaction and block hashes in big-endian byte-order, + // following the u256 convention set by Bitcoin and zcashd. + match self.read_state_service.best_tip() { + Some(x) => return Ok(GetBlockHash::new(x.1)), + None => { + // try RPC if state read fails: + Ok(self.rpc_client.get_best_blockhash().await?.into()) + } + } + } + + /// Returns the current block count in the best valid block chain. + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + async fn get_block_count(&self) -> Result { + let nfs_snapshot = self.indexer.snapshot_nonfinalized_state(); + let h = nfs_snapshot.best_tip.height; + Ok(h.into()) + } + + async fn validate_address( + &self, + raw_address: String, + ) -> Result { + use zcash_keys::address::Address; + use zcash_transparent::address::TransparentAddress; + + let Ok(address) = raw_address.parse::() else { + return Ok(ValidateAddressResponse::invalid()); + }; + + let address = match address.convert_if_network::
( + match self.config.network.to_zebra_network().kind() { + NetworkKind::Mainnet => NetworkType::Main, + NetworkKind::Testnet => NetworkType::Test, + NetworkKind::Regtest => NetworkType::Regtest, + }, + ) { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); + return Ok(ValidateAddressResponse::invalid()); + } + }; + + // we want to match zcashd's behaviour + Ok(match address { + Address::Transparent(taddr) => ValidateAddressResponse::new( + true, + Some(raw_address), + Some(matches!(taddr, TransparentAddress::ScriptHash(_))), + ), + _ => ValidateAddressResponse::invalid(), + }) + } + + async fn z_get_subtrees_by_index( + &self, + pool: String, + start_index: NoteCommitmentSubtreeIndex, + limit: Option, + ) -> Result { + let mut state = self.read_state_service.clone(); + + match pool.as_str() { + "sapling" => { + let request = zebra_state::ReadRequest::SaplingSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await?; + let sapling_subtrees = expected_read_response!(response, SaplingSubtrees); + let subtrees = sapling_subtrees + .values() + .map(|subtree| { + SubtreeRpcData { + root: subtree.root.to_bytes().encode_hex(), + end_height: subtree.end_height, + } + .into() + }) + .collect(); + + Ok(GetSubtreesResponse { + pool, + start_index, + subtrees, + } + .into()) + } + "orchard" => { + let request = zebra_state::ReadRequest::OrchardSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await?; + let orchard_subtrees = expected_read_response!(response, OrchardSubtrees); + let subtrees = orchard_subtrees + .values() + .map(|subtree| { + SubtreeRpcData { + root: subtree.root.encode_hex(), + end_height: subtree.end_height, + } + .into() + }) + .collect(); + + Ok(GetSubtreesResponse { + pool, + start_index, + subtrees, + } + .into()) + } + otherwise => Err(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::Misc, + format!("invalid pool name \"{otherwise}\", must be \"sapling\" or \"orchard\""), + ))), + } + } + + async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result { + let mut state = self.read_state_service.clone(); + + let txid = zebra_chain::transaction::Hash::from_hex(txid_hex).map_err(|e| { + RpcError::new_from_legacycode(LegacyCode::InvalidAddressOrKey, e.to_string()) + })?; + + let not_found_error = || { + StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidAddressOrKey, + "No such mempool or main chain transaction", + )) + }; + + // First check if transaction is in mempool as this is quick. + match self + .mempool + .contains_txid(&MempoolKey { + txid: txid.to_string(), + }) + .await + { + // Fetch trasaction from mempool. + true => { + match self + .mempool + .get_transaction(&MempoolKey { + txid: txid.to_string(), + }) + .await + { + Some(tx) => { + let serialized = tx.as_ref().serialized_tx.as_ref().clone(); + + match verbose { + // Return an object view, matching the chain path semantics. + Some(_verbosity) => { + let parsed_tx: zebra_chain::transaction::Transaction = + zebra_chain::serialization::ZcashDeserialize::zcash_deserialize( + serialized.as_ref(), + ) + .map_err(|_| not_found_error())?; + + Ok(GetRawTransaction::Object(Box::new( + TransactionObject::from_transaction( + parsed_tx.into(), + None, // best_chain_height + Some(0), // confirmations + &self.config.network.into(), // network + None, // block_time + None, // block_hash + Some(false), // in_best_chain + txid, // txid + ), + ))) + } + // Return raw bytes when not verbose. + None => Ok(GetRawTransaction::Raw(serialized)), + } + } + None => Err(not_found_error()), + } + } + // Fetch transaction from state. + false => { + // + match state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) + .await + .map_err(|_| not_found_error())? + { + zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(match verbose { + Some(_verbosity) => { + // This should be None for sidechain transactions, + // which currently aren't returned by ReadResponse::Transaction + let best_chain_height = Some(tx.height); + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let compact_block = self + .indexer + .get_compact_block( + &snapshot, + chain_types::Height(tx.height.0), + PoolTypeFilter::includes_all(), + ) + .await? + .ok_or_else(|| ChainIndexError::database_hole(tx.height.0, None))?; + let tx_object = TransactionObject::from_transaction( + tx.tx.clone(), + best_chain_height, + Some(tx.confirmations), + &self.config.network.into(), + Some(tx.block_time), + Some(zebra_chain::block::Hash::from_bytes(compact_block.hash)), + Some(best_chain_height.is_some()), + tx.tx.hash(), + ); + GetRawTransaction::Object(Box::new(tx_object)) + } + None => GetRawTransaction::Raw(tx.tx.into()), + }), + zebra_state::ReadResponse::Transaction(None) => Err(not_found_error()), + + _ => unreachable!("unmatched response to a `Transaction` read request"), + } + } + } + } + + async fn get_address_tx_ids( + &self, + request: GetAddressTxIdsRequest, + ) -> Result, Self::Error> { + let mut state = self.read_state_service.clone(); + + let (addresses, start, end) = request.into_parts(); + let response = state + .ready() + .and_then(|service| service.call(ReadRequest::Tip)) + .await?; + let (chain_height, _chain_hash) = expected_read_response!(response, Tip).ok_or( + RpcError::new_from_legacycode(LegacyCode::Misc, "no blocks in chain"), + )?; + + let mut error_string = None; + if start > end { + error_string = Some(format!( + "start {start:?} must be less than or equal to end {end:?}" + )); + } + if Height(start) > chain_height || Height(end) > chain_height { + error_string = Some(format!( + "start {start:?} and end {end:?} must both be less than or \ + equal to the chain tip {chain_height:?}" + )); + } + if let Some(e) = error_string { + return Err(StateServiceError::RpcError(RpcError::new_from_legacycode( + LegacyCode::InvalidParameter, + e, + ))); + } + + let request = ReadRequest::TransactionIdsByAddresses { + addresses: GetAddressBalanceRequest::new(addresses) + .valid_addresses() + .map_err(|e| RpcError::new_from_errorobject(e, "invalid adddress"))?, + + height_range: Height(start)..=Height(end), + }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await?; + let hashes = expected_read_response!(response, AddressesTransactionIds); + + let mut last_tx_location = TransactionLocation::from_usize(Height(0), 0); + + Ok(hashes + .iter() + .map(|(tx_loc, tx_id)| { + // Check that the returned transactions are in chain order. + assert!( + *tx_loc > last_tx_location, + "Transactions were not in chain order:\n\ + {tx_loc:?} {tx_id:?} was after:\n\ + {last_tx_location:?}", + ); + + last_tx_location = *tx_loc; + + tx_id.to_string() + }) + .collect()) + } + + async fn z_get_address_utxos( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result, Self::Error> { + let mut state = self.read_state_service.clone(); + + let valid_addresses = address_strings + .valid_addresses() + .map_err(|e| RpcError::new_from_errorobject(e, "invalid address"))?; + let request = ReadRequest::UtxosByAddresses(valid_addresses); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await?; + let utxos = expected_read_response!(response, AddressUtxos); + let mut last_output_location = OutputLocation::from_usize(Height(0), 0, 0); + + Ok(utxos + .utxos() + .map( + |(utxo_address, utxo_hash, utxo_output_location, utxo_transparent_output)| { + assert!(utxo_output_location > &last_output_location); + last_output_location = *utxo_output_location; + GetAddressUtxos::new( + utxo_address, + *utxo_hash, + utxo_output_location.output_index(), + utxo_transparent_output.lock_script.clone(), + u64::from(utxo_transparent_output.value()), + utxo_output_location.height(), + ) + }, + ) + .collect()) + } + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// This RPC is implemented in the [mining.cpp](https://github.com/zcash/zcash/blob/d00fc6f4365048339c83f463874e4d6c240b63af/src/rpc/mining.cpp#L104) + /// file of the Zcash repository. The Zebra implementation can be found [here](https://github.com/ZcashFoundation/zebra/blob/19bca3f1159f9cb9344c9944f7e1cb8d6a82a07f/zebra-rpc/src/methods.rs#L2687). + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result { + self.rpc_client + .get_network_sol_ps(blocks, height) + .await + .map_err(|e| StateServiceError::Custom(e.to_string())) + } + + // Helper function, to get the chain height in rpc implementations + async fn chain_height(&self) -> Result { + let mut state = self.read_state_service.clone(); + let response = state + .ready() + .and_then(|service| service.call(ReadRequest::Tip)) + .await?; + let (chain_height, _chain_hash) = expected_read_response!(response, Tip).ok_or( + RpcError::new_from_legacycode(LegacyCode::Misc, "no blocks in chain"), + )?; + Ok(chain_height) + } +} + +#[async_trait] +// #[allow(deprecated)] +impl LightWalletIndexer for StateServiceSubscriber { + /// Return the height of the tip of the best chain + async fn get_latest_block(&self) -> Result { + let tip = self.indexer.snapshot_nonfinalized_state().best_tip; + Ok(BlockId { + height: tip.height.0 as u64, + hash: tip.blockhash.0.to_vec(), + }) + } + + /// Return the compact block corresponding to the given block identifier + async fn get_block(&self, request: BlockId) -> Result { + let hash_or_height = blockid_to_hashorheight(request).ok_or( + StateServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Invalid hash and/or height out of range. Failed to convert to u32.", + )), + )?; + + let snapshot = self.indexer.snapshot_nonfinalized_state(); + + // Convert HashOrHeight to chain_types::Height + let block_height = match hash_or_height { + HashOrHeight::Height(h) => chain_types::Height(h.0), + HashOrHeight::Hash(h) => self + .indexer + .get_block_height(&snapshot, chain_types::BlockHash(h.0)) + .await + .map_err(StateServiceError::ChainIndexError)? + .ok_or_else(|| { + StateServiceError::TonicStatusError(tonic::Status::not_found( + "Error: Block not found for given hash.", + )) + })?, + }; + + match self + .indexer + .get_compact_block(&snapshot, block_height, PoolTypeFilter::default()) + .await + { + Ok(Some(block)) => Ok(block), + Ok(None) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + StateServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + _otherwise => Err(StateServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Failed to retrieve block from state.", + ))), + } + } + Err(e) => { + let chain_height = snapshot.best_tip.height.0; + match hash_or_height { + HashOrHeight::Height(Height(height)) if height >= chain_height => Err( + StateServiceError::TonicStatusError(tonic::Status::out_of_range(format!( + "Error: Height out of range [{hash_or_height}]. Height requested \ + is greater than the best chain tip [{chain_height}].", + ))), + ), + _otherwise => + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + { + Err(StateServiceError::TonicStatusError(tonic::Status::unknown( + format!("Error: Failed to retrieve block from node. Server Error: {e}",), + ))) + } + } + } + } + } + + /// Same as GetBlock except actions contain only nullifiers, + /// and saling outputs are not returned (Sapling spends still are) + async fn get_block_nullifiers(&self, request: BlockId) -> Result { + let height: u32 = request.height.try_into().map_err(|_| { + StateServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Height out of range. Failed to convert to u32.", + )) + })?; + + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let block_height = chain_types::Height(height); + + match self + .indexer + .get_compact_block(&snapshot, block_height, PoolTypeFilter::default()) + .await + { + Ok(Some(block)) => Ok(compact_block_to_nullifiers(block)), + Ok(None) => { + self.error_get_block( + BlockCacheError::Custom("Block not found".to_string()), + height, + ) + .await + } + Err(e) => Err(StateServiceError::ChainIndexError(e)), + } + } + + /// Return a list of consecutive compact blocks + async fn get_block_range( + &self, + blockrange: BlockRange, + ) -> Result { + self.get_block_range_inner(blockrange, false).await + } + /// Same as GetBlockRange except actions contain only nullifiers + async fn get_block_range_nullifiers( + &self, + request: BlockRange, + ) -> Result { + self.get_block_range_inner(request, true).await + } + + /// Return the requested full (not compact) transaction (as from zcashd) + async fn get_transaction(&self, request: TxFilter) -> Result { + let hash = zebra_chain::transaction::Hash::from( + <[u8; 32]>::try_from(request.hash).map_err(|_| { + StateServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Transaction hash incorrect", + )) + })?, + ); + let hex = hash.encode_hex(); + + // explicit over method call syntax to make it clear where this method is coming from + ::get_raw_transaction(self, hex, Some(1)) + .await + .and_then(|grt| match grt { + GetRawTransaction::Raw(_serialized_transaction) => Err(StateServiceError::Custom( + "unreachable, verbose transaction expected".to_string(), + )), + GetRawTransaction::Object(transaction_object) => Ok(RawTransaction { + data: transaction_object.hex().as_ref().to_vec(), + height: transaction_object.height().unwrap_or(0) as u64, + }), + }) + } + + /// Submit the given transaction to the Zcash network + async fn send_transaction(&self, request: RawTransaction) -> Result { + let hex_tx = hex::encode(request.data); + let tx_output = self.send_raw_transaction(hex_tx).await?; + + Ok(SendResponse { + error_code: 0, + error_message: tx_output.hash().to_string(), + }) + } + + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + let txids = self.get_taddress_txids_helper(request).await?; + let chain_height = self.chain_height().await?; + let (transmitter, receiver) = mpsc::channel(self.config.service.channel_size as usize); + let service_timeout = self.config.service.timeout; + let service_clone = self.clone(); + tokio::spawn(async move { + let timeout = timeout( + std::time::Duration::from_secs((service_timeout * 4) as u64), + async { + for txid in txids { + let transaction = service_clone.get_raw_transaction(txid, Some(1)).await; + if handle_raw_transaction::( + chain_height.0 as u64, + transaction, + transmitter.clone(), + ) + .await + .is_err() + { + break; + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + transmitter + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_taddredd_txids_stream gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(RawTransactionStream::new(receiver)) + } + + /// Return the txids corresponding to the given t-address within the given block range + /// This function is deprecated. Use `get_taddress_transactions`. + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + + /// Returns the total balance for a list of taddrs + async fn get_taddress_balance( + &self, + request: AddressList, + ) -> Result { + let taddrs = GetAddressBalanceRequest::new(request.addresses); + let balance = self.z_get_address_balance(taddrs).await?; + let checked_balance: i64 = match i64::try_from(balance.balance()) { + Ok(balance) => balance, + Err(_) => { + return Err(Self::Error::TonicStatusError(tonic::Status::unknown( + "Error: Error converting balance from u64 to i64.", + ))); + } + }; + Ok(zaino_proto::proto::service::Balance { + value_zat: checked_balance, + }) + } + /// Returns the total balance for a list of taddrs + /// + /// TODO: This is taken from fetch.rs, we could / probably should reconfigure into a trait implementation. + async fn get_taddress_balance_stream( + &self, + mut request: AddressStream, + ) -> Result { + let fetch_service_clone = self.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, mut channel_rx) = + mpsc::channel::(self.config.service.channel_size as usize); + let fetcher_task_handle = tokio::spawn(async move { + let fetcher_timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + let mut total_balance: u64 = 0; + loop { + match channel_rx.recv().await { + Some(taddr) => { + let taddrs = GetAddressBalanceRequest::new(vec![taddr]); + let balance = + fetch_service_clone.z_get_address_balance(taddrs).await?; + total_balance += balance.balance(); + } + None => { + return Ok(total_balance); + } + } + } + }, + ) + .await; + match fetcher_timeout { + Ok(result) => result, + Err(_) => Err(tonic::Status::deadline_exceeded( + "Error: get_taddress_balance_stream request timed out.", + )), + } + }); + // NOTE: This timeout is so slow due to the blockcache not being implemented. This should be reduced to 30s once functionality is in place. + // TODO: Make [rpc_timout] a configurable system variable with [default = 30s] and [mempool_rpc_timout = 4*rpc_timeout] + let addr_recv_timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + while let Some(address_result) = request.next().await { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + let address = address_result.map_err(|e| { + tonic::Status::unknown(format!("Failed to read from stream: {e}")) + })?; + if channel_tx.send(address.address).await.is_err() { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + return Err(tonic::Status::unknown( + "Error: Failed to send address to balance task.", + )); + } + } + drop(channel_tx); + Ok::<(), tonic::Status>(()) + }, + ) + .await; + match addr_recv_timeout { + Ok(Ok(())) => {} + Ok(Err(e)) => { + fetcher_task_handle.abort(); + return Err(StateServiceError::TonicStatusError(e)); + } + Err(_) => { + fetcher_task_handle.abort(); + return Err(StateServiceError::TonicStatusError( + tonic::Status::deadline_exceeded( + "Error: get_taddress_balance_stream request timed out in address loop.", + ), + )); + } + } + match fetcher_task_handle.await { + Ok(Ok(total_balance)) => { + let checked_balance: i64 = match i64::try_from(total_balance) { + Ok(balance) => balance, + Err(_) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + return Err(StateServiceError::TonicStatusError(tonic::Status::unknown( + "Error: Error converting balance from u64 to i64.", + ))); + } + }; + Ok(Balance { + value_zat: checked_balance, + }) + } + Ok(Err(e)) => Err(StateServiceError::TonicStatusError(e)), + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + Err(e) => Err(StateServiceError::TonicStatusError(tonic::Status::unknown( + format!("Fetcher Task failed: {e}"), + ))), + } + } + + /// Return the compact transactions currently in the mempool; the results + /// can be a few seconds out of date. If the Exclude list is empty, return + /// all transactions; otherwise return all *except* those in the Exclude list + /// (if any); this allows the client to avoid receiving transactions that it + /// already has (from an earlier call to this rpc). The transaction IDs in the + /// Exclude list can be shortened to any number of bytes to make the request + /// more bandwidth-efficient; if two or more transactions in the mempool + /// match a shortened txid, they are all sent (none is excluded). Transactions + /// in the exclude list that don't exist in the mempool are ignored. + async fn get_mempool_tx( + &self, + request: GetMempoolTxRequest, + ) -> Result { + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } + + let pool_types = match PoolTypeFilter::new_from_slice(&request.pool_types) { + Ok(pool_type_filter) => pool_type_filter, + Err(PoolTypeError::InvalidPoolType) => { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument( + "Error: An invalid `PoolType' was found".to_string(), + ), + )) + } + Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: Unknown `PoolType' {} was found", + unknown_pool_type + )), + )) + } + }; + + let mempool = self.mempool.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { + for (mempool_key, mempool_value) in + mempool.get_filtered_mempool(exclude_txids).await + { + let txid_bytes = match hex::decode(mempool_key.txid) { + Ok(bytes) => bytes, + Err(error) => { + if channel_tx + .send(Err(tonic::Status::unknown(error.to_string()))) + .await + .is_err() + { + break; + } else { + continue; + } + } + }; + match ::parse_from_slice( + mempool_value.serialized_tx.as_ref().as_ref(), + Some(vec![txid_bytes]), + None, + ) { + Ok(transaction) => { + // ParseFromSlice returns any data left after the conversion to a + // FullTransaction, If the conversion has succeeded this should be empty. + if transaction.0.is_empty() { + if channel_tx + .send( + transaction + .1 + .to_compact_tx(None, &pool_types) + .map_err(|e| tonic::Status::unknown(e.to_string())), + ) + .await + .is_err() + { + break; + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown("Error: "))) + .await + .is_err() + { + break; + } + } + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + break; + } + } + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::internal( + "Error: get_mempool_tx gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + + Ok(CompactTransactionStream::new(channel_rx)) + } + + /// Return a stream of current Mempool transactions. This will keep the output stream open while + /// there are mempool transactions. It will close the returned stream when a new block is mined. + async fn get_mempool_stream(&self) -> Result { + let mut mempool = self.mempool.clone(); + let service_timeout = self.config.service.timeout; + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + let snapshot = self.indexer.snapshot_nonfinalized_state(); + let mempool_height = snapshot.best_tip.height.0; + tokio::spawn(async move { + let timeout = timeout( + time::Duration::from_secs((service_timeout * 6) as u64), + async { + let (mut mempool_stream, _mempool_handle) = match mempool + .get_mempool_stream(None) + .await + { + Ok(stream) => stream, + Err(e) => { + warn!("Error fetching stream from mempool: {:?}", e); + channel_tx + .send(Err(tonic::Status::internal("Error getting mempool stream"))) + .await + .ok(); + return; + } + }; + while let Some(result) = mempool_stream.recv().await { + match result { + Ok((_mempool_key, mempool_value)) => { + if channel_tx + .send(Ok(RawTransaction { + data: mempool_value + .serialized_tx + .as_ref() + .as_ref() + .to_vec(), + height: mempool_height as u64, + })) + .await + .is_err() + { + break; + } + } + Err(e) => { + channel_tx + .send(Err(tonic::Status::internal(format!( + "Error in mempool stream: {e:?}" + )))) + .await + .ok(); + break; + } + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::internal( + "Error: get_mempool_stream gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + + Ok(RawTransactionStream::new(channel_rx)) + } + + /// GetTreeState returns the note commitment tree state corresponding to the given block. + /// See section 3.7 of the Zcash protocol specification. It returns several other useful + /// values also (even though they can be obtained using GetBlock). + /// The block can be specified by either height or hash. + async fn get_tree_state(&self, request: BlockId) -> Result { + let hash_or_height = blockid_to_hashorheight(request).ok_or( + crate::error::StateServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Invalid hash or height", + )), + )?; + #[allow(deprecated)] + let (hash, height, time, sapling, orchard) = + ::z_get_treestate( + self, + hash_or_height.to_string(), + ) + .await? + .into_parts(); + Ok(TreeState { + network: self.config.network.to_zebra_network().bip70_network_name(), + height: height.0 as u64, + hash: hash.to_string(), + time, + sapling_tree: sapling.map(hex::encode).unwrap_or_default(), + orchard_tree: orchard.map(hex::encode).unwrap_or_default(), + }) + } + + /// GetLatestTreeState returns the note commitment tree state corresponding to the chain tip. + async fn get_latest_tree_state(&self) -> Result { + let latest_block = self.chain_height().await?; + self.get_tree_state(BlockId { + height: latest_block.0 as u64, + hash: vec![], + }) + .await + } + + fn timeout_channel_size(&self) -> (u32, u32) { + ( + self.config.service.timeout, + self.config.service.channel_size, + ) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if + /// [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are collected and returned as a single Vec. + async fn get_address_utxos( + &self, + request: GetAddressUtxosArg, + ) -> Result { + self.get_address_utxos_stream(request) + .await? + .try_collect::>() + .await + .map(|address_utxos| GetAddressUtxosReplyList { address_utxos }) + .map_err(Self::Error::from) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if + /// [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are returned in a stream. + async fn get_address_utxos_stream( + &self, + request: GetAddressUtxosArg, + ) -> Result { + let mut state = self.read_state_service.clone(); + let mut address_set = HashSet::new(); + for address in request.addresses { + address_set.insert(zebra_chain::transparent::Address::from_str( + address.as_ref(), + )?); + } + + let address_utxos_response = state + .ready() + .and_then(|service| service.call(ReadRequest::UtxosByAddresses(address_set))) + .await?; + let utxos = expected_read_response!(address_utxos_response, AddressUtxos); + let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { + for utxo in utxos + .utxos() + .filter_map(|(address, hash, location, output)| { + if location.height().0 as u64 >= request.start_height { + Some(GetAddressUtxosReply { + address: address.to_string(), + txid: hash.0.to_vec(), + index: location.output_index().index() as i32, + script: output.lock_script.as_raw_bytes().to_vec(), + value_zat: output.value.zatoshis(), + height: location.height().0 as u64, + }) + } else { + None + } + }) + .take(match usize::try_from(request.max_entries) { + Ok(0) | Err(_) => usize::MAX, + Ok(non_zero) => non_zero, + }) + { + if channel_tx.send(Ok(utxo)).await.is_err() { + return; + } + } + }); + Ok(UtxoReplyStream::new(channel_rx)) + } + + /// Return information about this lightwalletd instance and the blockchain + /// + /// TODO: This could be made more efficient by fetching data directly (not using self.get_blockchain_info()) + async fn get_lightd_info(&self) -> Result { + let blockchain_info = self.get_blockchain_info().await?; + let sapling_id = zebra_rpc::methods::ConsensusBranchIdHex::new( + zebra_chain::parameters::ConsensusBranchId::from_hex("76b809bb") + .map_err(|_e| { + tonic::Status::internal( + "Internal Error - Consesnsus Branch ID hex conversion failed", + ) + })? + .into(), + ); + let sapling_activation_height = blockchain_info + .upgrades() + .get(&sapling_id) + .map_or(Height(1), |sapling_json| sapling_json.into_parts().1); + + let consensus_branch_id = zebra_chain::parameters::ConsensusBranchId::from( + blockchain_info.consensus().into_parts().0, + ) + .to_string(); + + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + + Ok(LightdInfo { + version: self.data.build_info().version(), + vendor: "ZingoLabs ZainoD".to_string(), + taddr_support: true, + chain_name: blockchain_info.chain().clone(), + sapling_activation_height: sapling_activation_height.0 as u64, + consensus_branch_id, + block_height: blockchain_info.blocks().0 as u64, + git_commit: self.data.build_info().commit_hash(), + branch: self.data.build_info().branch(), + build_date: self.data.build_info().build_date(), + build_user: self.data.build_info().build_user(), + estimated_height: blockchain_info.estimated_height().0 as u64, + zcashd_build: self.data.zebra_build(), + zcashd_subversion: self.data.zebra_subversion(), + donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), + }) + } + + /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) + /// + /// NOTE: Currently unimplemented in Zaino. + async fn ping( + &self, + _request: zaino_proto::proto::service::Duration, + ) -> Result { + Err(crate::error::StateServiceError::TonicStatusError( + tonic::Status::unimplemented( + "Ping not yet implemented. If you require this RPC please open an \ + issue or PR at the Zaino github (https://github.com/zingolabs/zaino.git).", + ), + )) + } +} + +#[allow(clippy::result_large_err, deprecated)] +fn header_to_block_commitments( + header: &Header, + network: &Network, + height: Height, + final_sapling_root: [u8; 32], +) -> Result<[u8; 32], StateServiceError> { + let hash = match header.commitment(network, height).map_err(|e| { + StateServiceError::SerializationError( + zebra_chain::serialization::SerializationError::Parse( + // For some reason this error type takes a + // &'static str, and the the only way to create one + // dynamically is to leak a String. This shouldn't + // be a concern, as this error case should + // never occur when communing with a zebrad, which + // validates this field before serializing it + e.to_string().leak(), + ), + ) + })? { + zebra_chain::block::Commitment::PreSaplingReserved(bytes) => bytes, + zebra_chain::block::Commitment::FinalSaplingRoot(_root) => final_sapling_root, + zebra_chain::block::Commitment::ChainHistoryActivationReserved => [0; 32], + zebra_chain::block::Commitment::ChainHistoryRoot(root) => root.bytes_in_display_order(), + zebra_chain::block::Commitment::ChainHistoryBlockTxAuthCommitment(hash) => { + hash.bytes_in_display_order() + } + }; + Ok(hash) +} + +/// An error type for median time past calculation errors +#[derive(Debug, Clone)] +pub enum MedianTimePast { + /// The start block has no `time`. + StartMissingTime { hash: String }, + + /// Ignored verbosity. + UnexpectedRaw { hash: String }, + + /// No timestamps collected at all. + EmptyWindow, +} + +impl fmt::Display for MedianTimePast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MedianTimePast::StartMissingTime { hash } => { + write!(f, "start block {hash} is missing `time`") + } + MedianTimePast::UnexpectedRaw { hash } => { + write!(f, "unexpected raw payload for block {hash}") + } + MedianTimePast::EmptyWindow => { + write!(f, "no timestamps collected (empty MTP window)") + } + } + } +} + +impl Error for MedianTimePast {} diff --git a/zaino-state/src/broadcast.rs b/zaino-state/src/broadcast.rs new file mode 100644 index 000000000..521a4c067 --- /dev/null +++ b/zaino-state/src/broadcast.rs @@ -0,0 +1,249 @@ +//! Holds zaino-state::Broadcast, a thread safe broadcaster used by the mempool and non-finalised state. + +use dashmap::DashMap; +use std::{collections::HashSet, hash::Hash, sync::Arc}; +use tokio::sync::watch; +use tracing::debug; + +use crate::status::StatusType; + +/// A generic, thread-safe broadcaster that manages mutable state and notifies clients of updates. +#[derive(Clone)] +pub(crate) struct Broadcast { + state: Arc>, + notifier: watch::Sender, +} + +impl Broadcast { + /// Creates a new [`Broadcast`], optionally exposes dashmap spec. + pub(crate) fn new(capacity: Option, shard_amount: Option) -> Self { + let (notifier, _) = watch::channel(StatusType::Spawning); + let state = match (capacity, shard_amount) { + (Some(capacity), Some(shard_amount)) => Arc::new( + DashMap::with_capacity_and_shard_amount(capacity, shard_amount), + ), + (Some(capacity), None) => Arc::new(DashMap::with_capacity(capacity)), + (None, Some(shard_amount)) => Arc::new(DashMap::with_shard_amount(shard_amount)), + (None, None) => Arc::new(DashMap::new()), + }; + + Self { state, notifier } + } + + /// Inserts or updates an entry in the state and optionally broadcasts an update. + #[allow(dead_code)] + pub(crate) fn insert(&self, key: K, value: V, status: Option) { + self.state.insert(key, value); + if let Some(status) = status { + let _ = self.notifier.send(status); + } + } + + /// Inserts or updates an entry in the state and broadcasts an update. + #[allow(dead_code)] + pub(crate) fn insert_set(&self, set: Vec<(K, V)>, status: StatusType) { + for (key, value) in set { + self.state.insert(key, value); + } + let _ = self.notifier.send(status); + } + + /// Inserts only new entries from the set into the state and broadcasts an update. + pub(crate) fn insert_filtered_set(&self, set: Vec<(K, V)>, status: StatusType) { + for (key, value) in set { + // Check if the key is already in the map + if self.state.get(&key).is_none() { + self.state.insert(key, value); + } + } + let _ = self.notifier.send(status); + } + + /// Removes an entry from the state and broadcasts an update. + #[allow(dead_code)] + pub(crate) fn remove(&self, key: &K, status: Option) { + self.state.remove(key); + if let Some(status) = status { + let _ = self.notifier.send(status); + } + } + + /// Retrieves a value from the state by key. + #[allow(dead_code)] + pub(crate) fn get(&self, key: &K) -> Option> { + self.state + .get(key) + .map(|entry| Arc::new((*entry.value()).clone())) + } + + /// Retrieves a set of values from the state by a list of keys. + #[allow(dead_code)] + pub(crate) fn get_set(&self, keys: &[K]) -> Vec<(K, Arc)> { + keys.iter() + .filter_map(|key| { + self.state + .get(key) + .map(|entry| (key.clone(), Arc::new((*entry.value()).clone()))) + }) + .collect() + } + + /// Checks if a key exists in the state. + #[allow(dead_code)] + pub(crate) fn contains_key(&self, key: &K) -> bool { + self.state.contains_key(key) + } + + /// Returns a receiver to listen for state update notifications. + pub(crate) fn subscribe(&self) -> watch::Receiver { + self.notifier.subscribe() + } + + /// Returns a [`BroadcastSubscriber`] to the [`Broadcast`]. + pub(crate) fn subscriber(&self) -> BroadcastSubscriber { + BroadcastSubscriber { + state: self.get_state(), + notifier: self.subscribe(), + } + } + + /// Provides read access to the internal state. + pub(crate) fn get_state(&self) -> Arc> { + Arc::clone(&self.state) + } + + /// Returns the whole state excluding keys in the ignore list. + #[allow(dead_code)] + pub(crate) fn get_filtered_state(&self, ignore_list: &HashSet) -> Vec<(K, V)> { + self.state + .iter() + .filter(|entry| !ignore_list.contains(entry.key())) + .map(|entry| (entry.key().clone(), entry.value().clone())) + .collect() + } + + /// Clears all entries from the state. + pub(crate) fn clear(&self) { + self.state.clear(); + } + + /// Returns the number of entries in the state. + #[allow(dead_code)] + pub(crate) fn len(&self) -> usize { + self.state.len() + } + + /// Returns true if the state is empty. + #[allow(dead_code)] + pub(crate) fn is_empty(&self) -> bool { + self.state.is_empty() + } + + /// Broadcasts an update. + pub(crate) fn notify(&self, status: StatusType) { + if self.notifier.send(status).is_err() { + debug!("No subscribers are currently listening for updates."); + } + } +} + +impl Default for Broadcast { + fn default() -> Self { + Self::new(None, None) + } +} + +impl std::fmt::Debug + for Broadcast +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state_contents: Vec<_> = self + .state + .iter() + .map(|entry| (entry.key().clone(), entry.value().clone())) + .collect(); + f.debug_struct("Broadcast") + .field("state", &state_contents) + .field("notifier", &"watch::Sender") + .finish() + } +} + +/// A generic, thread-safe broadcaster that manages mutable state and notifies clients of updates. +#[derive(Clone)] +pub(crate) struct BroadcastSubscriber { + state: Arc>, + notifier: watch::Receiver, +} + +impl BroadcastSubscriber { + /// Waits on notifier update and returns StatusType. + pub(crate) async fn wait_on_notifier(&mut self) -> Result { + self.notifier.changed().await?; + let status = *self.notifier.borrow(); + Ok(status) + } + + /// Retrieves a value from the state by key. + #[allow(dead_code)] + pub(crate) fn get(&self, key: &K) -> Option> { + self.state + .get(key) + .map(|entry| Arc::new((*entry.value()).clone())) + } + + /// Retrieves a set of values from the state by a list of keys. + #[allow(dead_code)] + pub(crate) fn get_set(&self, keys: &[K]) -> Vec<(K, Arc)> { + keys.iter() + .filter_map(|key| { + self.state + .get(key) + .map(|entry| (key.clone(), Arc::new((*entry.value()).clone()))) + }) + .collect() + } + + /// Checks if a key exists in the state. + #[allow(dead_code)] + pub(crate) fn contains_key(&self, key: &K) -> bool { + self.state.contains_key(key) + } + + /// Returns the whole state excluding keys in the ignore list. + pub(crate) fn get_filtered_state(&self, ignore_list: &HashSet) -> Vec<(K, V)> { + self.state + .iter() + .filter(|entry| !ignore_list.contains(entry.key())) + .map(|entry| (entry.key().clone(), entry.value().clone())) + .collect() + } + + /// Returns the number of entries in the state. + #[allow(dead_code)] + pub(crate) fn len(&self) -> usize { + self.state.len() + } + + /// Returns true if the state is empty. + #[allow(dead_code)] + pub(crate) fn is_empty(&self) -> bool { + self.state.is_empty() + } +} + +impl std::fmt::Debug + for BroadcastSubscriber +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state_contents: Vec<_> = self + .state + .iter() + .map(|entry| (entry.key().clone(), entry.value().clone())) + .collect(); + f.debug_struct("Broadcast") + .field("state", &state_contents) + .field("notifier", &"watch::Sender") + .finish() + } +} diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs new file mode 100644 index 000000000..cba7ac69f --- /dev/null +++ b/zaino-state/src/chain_index.rs @@ -0,0 +1,1558 @@ +//! Holds Zaino's local chain index. +//! +//! Components: +//! - Mempool: Holds mempool transactions +//! - NonFinalisedState: Holds block data for the top 100 blocks of all chains. +//! - FinalisedState: Holds block data for the remainder of the best chain. +//! +//! - Chain: Holds chain / block structs used internally by the ChainIndex. +//! - Holds fields required to: +//! - a. Serve CompactBlock data dirctly. +//! - b. Build trasparent tx indexes efficiently +//! - NOTE: Full transaction and block data is served from the backend finalizer. + +use crate::chain_index::non_finalised_state::BestTip; +use crate::chain_index::source::GetTransactionLocation; +use crate::chain_index::types::db::metadata::MempoolInfo; +use crate::chain_index::types::{BestChainLocation, NonBestChainLocation}; +use crate::error::{ChainIndexError, ChainIndexErrorKind, FinalisedStateError}; +use crate::status::Status; +use crate::{AtomicStatus, CompactBlockStream, NodeConnectionError, StatusType, SyncError}; +use crate::{IndexedBlock, TransactionHash}; +use std::collections::HashSet; +use std::{sync::Arc, time::Duration}; + +use futures::{FutureExt, Stream}; +use hex::FromHex as _; +use non_finalised_state::NonfinalizedBlockCacheSnapshot; +use source::{BlockchainSource, ValidatorConnector}; +use tokio_stream::StreamExt; +use tracing::info; +use zaino_proto::proto::utils::{compact_block_with_pool_types, PoolTypeFilter}; +use zebra_chain::parameters::ConsensusBranchId; +pub use zebra_chain::parameters::Network as ZebraNetwork; +use zebra_chain::serialization::ZcashSerialize; +use zebra_state::HashOrHeight; + +pub mod encoding; +/// All state at least 100 blocks old +pub mod finalised_state; +/// State in the mempool, not yet on-chain +pub mod mempool; +/// State less than 100 blocks old, stored separately as it may be reorged +pub mod non_finalised_state; +/// BlockchainSource +pub mod source; +/// Common types used by the rest of this module +pub mod types; + +#[cfg(test)] +mod tests; + +/// The interface to the chain index. +/// +/// `ChainIndex` provides a unified interface for querying blockchain data from different +/// backend sources. It combines access to both finalized state (older than 100 blocks) and +/// non-finalized state (recent blocks that may still be reorganized). +/// +/// # Implementation +/// +/// The primary implementation is [`NodeBackedChainIndex`], which can be backed by either: +/// - Direct read access to a zebrad database via `ReadStateService` (preferred) +/// - A JSON-RPC connection to a validator node (zcashd, zebrad, or another zainod) +/// +/// # Example with ReadStateService (Preferred) +/// +/// ```no_run +/// # async fn example() -> Result<(), Box> { +/// use zaino_state::{ChainIndex, NodeBackedChainIndex, ValidatorConnector, BlockCacheConfig}; +/// use zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector; +/// use zebra_state::{ReadStateService, Config as ZebraConfig}; +/// use std::path::PathBuf; +/// +/// // Create a ReadStateService for direct database access +/// let zebra_config = ZebraConfig::default(); +/// let read_state_service = ReadStateService::new(&zebra_config).await?; +/// +/// // Create a JSON-RPC connector for mempool access (temporary requirement) +/// let mempool_connector = JsonRpSeeConnector::new_from_config_parts( +/// false, // no cookie auth +/// "127.0.0.1:8232".parse()?, +/// "user".to_string(), +/// "password".to_string(), +/// None, // no cookie path +/// ).await?; +/// +/// // Create the State source combining both services +/// let source = ValidatorConnector::State(zaino_state::chain_index::source::State { +/// read_state_service, +/// mempool_fetcher: mempool_connector, +/// }); +/// +/// // Configure the block cache +/// let config = BlockCacheConfig::new( +/// None, // map capacity +/// None, // shard amount +/// 1, // db version +/// PathBuf::from("/path/to/cache"), +/// None, // db size +/// zebra_chain::parameters::Network::Mainnet, +/// false, // sync enabled +/// false, // db enabled +/// ); +/// +/// // Create the chain index and get a subscriber for queries +/// let chain_index = NodeBackedChainIndex::new(source, config).await?; +/// let subscriber = chain_index.subscriber().await; +/// +/// // Take a snapshot for consistent queries +/// let snapshot = subscriber.snapshot_nonfinalized_state(); +/// +/// // Query blocks in a range using the subscriber +/// if let Some(stream) = subscriber.get_block_range( +/// &snapshot, +/// zaino_state::Height(100000), +/// Some(zaino_state::Height(100010)) +/// ) { +/// // Process the block stream... +/// } +/// # Ok(()) +/// # } +/// ``` +/// +/// # Example with JSON-RPC Only (Fallback) +/// +/// ```no_run +/// # async fn example() -> Result<(), Box> { +/// use zaino_state::{ChainIndex, NodeBackedChainIndex, ValidatorConnector, BlockCacheConfig}; +/// use zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector; +/// use std::path::PathBuf; +/// +/// // Create a JSON-RPC connector to your validator node +/// let connector = JsonRpSeeConnector::new_from_config_parts( +/// false, // no cookie auth +/// "127.0.0.1:8232".parse()?, +/// "user".to_string(), +/// "password".to_string(), +/// None, // no cookie path +/// ).await?; +/// +/// // Wrap the connector for use with ChainIndex +/// let source = ValidatorConnector::Fetch(connector); +/// +/// // Configure the block cache (same as above) +/// let config = BlockCacheConfig::new( +/// None, // map capacity +/// None, // shard amount +/// 1, // db version +/// PathBuf::from("/path/to/cache"), +/// None, // db size +/// zebra_chain::parameters::Network::Mainnet, +/// false, // sync enabled +/// false, // db enabled +/// ); +/// +/// // Create the chain index and get a subscriber for queries +/// let chain_index = NodeBackedChainIndex::new(source, config).await?; +/// let subscriber = chain_index.subscriber().await; +/// +/// // Use the subscriber to access ChainIndex trait methods +/// let snapshot = subscriber.snapshot_nonfinalized_state(); +/// # Ok(()) +/// # } +/// ``` +/// +/// # Migrating from FetchService or StateService +/// +/// If you were previously using `FetchService::spawn()` or `StateService::spawn()`: +/// 1. Extract the relevant fields from your service config into a `BlockCacheConfig` +/// 2. Create the appropriate `ValidatorConnector` variant (State or Fetch) +/// 3. Call `NodeBackedChainIndex::new(source, config).await` +/// +/// When a call asks for info (e.g. a block), Zaino selects sources in this order: +#[doc = simple_mermaid::mermaid!("chain_index_passthrough.mmd")] +pub trait ChainIndex { + /// A snapshot of the nonfinalized state, needed for atomic access + type Snapshot: NonFinalizedSnapshot; + + /// How it can fail + type Error; + + /// Takes a snapshot of the non_finalized state. All NFS-interfacing query + /// methods take a snapshot. The query will check the index + /// it existed at the moment the snapshot was taken. + fn snapshot_nonfinalized_state(&self) -> Self::Snapshot; + + /// Returns Some(Height) for the given block hash *if* it is currently in the best chain. + /// + /// Returns None if the specified block is not in the best chain or is not found. + fn get_block_height( + &self, + snapshot: &Self::Snapshot, + hash: types::BlockHash, + ) -> impl std::future::Future, Self::Error>>; + + /// Given inclusive start and end heights, stream all blocks + /// between the given heights. + /// Returns None if the specified end height + /// is greater than the snapshot's tip + // TO-TEST + #[allow(clippy::type_complexity)] + fn get_block_range( + &self, + snapshot: &Self::Snapshot, + start: types::Height, + end: Option, + ) -> Option, Self::Error>>>; + + /// Returns the *compact* block for the given height. + /// + /// Returns `None` if the specified `height` is greater than the snapshot's tip. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + #[allow(clippy::type_complexity)] + fn get_compact_block( + &self, + nonfinalized_snapshot: &Self::Snapshot, + height: types::Height, + pool_types: PoolTypeFilter, + ) -> impl std::future::Future< + Output = Result, Self::Error>, + >; + + /// Streams *compact* blocks for an inclusive height range. + /// + /// Returns `None` if the requested range is entirely above the snapshot's tip. + /// + /// - The stream covers `[start_height, end_height]` (inclusive). + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + #[allow(clippy::type_complexity)] + fn get_compact_block_stream( + &self, + nonfinalized_snapshot: &Self::Snapshot, + start_height: types::Height, + end_height: types::Height, + pool_types: PoolTypeFilter, + ) -> impl std::future::Future, Self::Error>>; + + /// Finds the newest ancestor of the given block on the main + /// chain, or the block itself if it is on the main chain. + fn find_fork_point( + &self, + snapshot: &Self::Snapshot, + hash: &types::BlockHash, + ) -> impl std::future::Future, Self::Error>>; + + /// Returns the block commitment tree data by hash + #[allow(clippy::type_complexity)] + fn get_treestate( + &self, + // snapshot: &Self::Snapshot, + // currently not implemented internally, fetches data from validator. + // + // NOTE: Should this check blockhash exists in snapshot and db before proxying call? + hash: &types::BlockHash, + ) -> impl std::future::Future>, Option>), Self::Error>>; + + /// given a transaction id, returns the transaction, along with + /// its consensus branch ID if available + #[allow(clippy::type_complexity)] + fn get_raw_transaction( + &self, + snapshot: &Self::Snapshot, + txid: &types::TransactionHash, + ) -> impl std::future::Future, Option)>, Self::Error>>; + + /// Given a transaction ID, returns all known hashes and heights of blocks + /// containing that transaction. + /// + /// Also returns if the transaction is in the mempool (and whether that mempool is + /// in-sync with the provided snapshot) + #[allow(clippy::type_complexity)] + fn get_transaction_status( + &self, + snapshot: &Self::Snapshot, + txid: &types::TransactionHash, + ) -> impl std::future::Future< + Output = Result<(Option, HashSet), Self::Error>, + >; + + /// Returns all txids currently in the mempool. + fn get_mempool_txids( + &self, + ) -> impl std::future::Future, Self::Error>>; + + /// Returns all transactions currently in the mempool, filtered by `exclude_list`. + /// + /// The `exclude_list` may contain shortened transaction ID hex prefixes (client-endian). + fn get_mempool_transactions( + &self, + exclude_list: Vec, + ) -> impl std::future::Future>, Self::Error>>; + + /// Returns a stream of mempool transactions, ending the stream when the chain tip block hash + /// changes (a new block is mined or a reorg occurs). + /// + /// If a snapshot is given and the chain tip has changed from the given spanshot, returns None. + #[allow(clippy::type_complexity)] + fn get_mempool_stream( + &self, + snapshot: Option<&Self::Snapshot>, + ) -> Option, Self::Error>>>; + + /// Returns Information about the mempool state: + /// - size: Current tx count + /// - bytes: Sum of all tx sizes + /// - usage: Total memory usage for the mempool + fn get_mempool_info(&self) -> impl std::future::Future; + + /// Get the tip of the best chain, according to the snapshot + fn best_chaintip( + &self, + nonfinalized_snapshot: &Self::Snapshot, + ) -> impl std::future::Future>; +} + +/// The combined index. Contains a view of the mempool, and the full +/// chain state, both finalized and non-finalized, to allow queries over +/// the entire chain at once. +/// +/// This is the primary implementation backing [`ChainIndex`] and replaces the functionality +/// previously provided by `FetchService` and `StateService`. It can be backed by either: +/// - A zebra `ReadStateService` for direct database access (preferred for performance) +/// - A JSON-RPC connection to any validator node (zcashd, zebrad, or another zainod) +/// +/// To use the [`ChainIndex`] trait methods, call [`subscriber()`](NodeBackedChainIndex::subscriber) +/// to get a [`NodeBackedChainIndexSubscriber`] which implements the trait. +/// +/// # Construction +/// +/// Use [`NodeBackedChainIndex::new()`] with: +/// - A [`ValidatorConnector`] source (State variant preferred, Fetch as fallback) +/// - A [`crate::config::BlockCacheConfig`] containing cache and database settings +/// +/// # Example with StateService (Preferred) +/// +/// ```no_run +/// # async fn example() -> Result<(), Box> { +/// use zaino_state::{NodeBackedChainIndex, ValidatorConnector, BlockCacheConfig}; +/// use zaino_state::chain_index::source::State; +/// use zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector; +/// use zebra_state::{ReadStateService, Config as ZebraConfig}; +/// use std::path::PathBuf; +/// +/// // Create ReadStateService for direct database access +/// let zebra_config = ZebraConfig::default(); +/// let read_state_service = ReadStateService::new(&zebra_config).await?; +/// +/// // Temporary: Create JSON-RPC connector for mempool access +/// let mempool_connector = JsonRpSeeConnector::new_from_config_parts( +/// false, +/// "127.0.0.1:8232".parse()?, +/// "user".to_string(), +/// "password".to_string(), +/// None, +/// ).await?; +/// +/// let source = ValidatorConnector::State(State { +/// read_state_service, +/// mempool_fetcher: mempool_connector, +/// }); +/// +/// // Configure the cache (extract these from your previous StateServiceConfig) +/// let config = BlockCacheConfig { +/// map_capacity: Some(1000), +/// map_shard_amount: Some(16), +/// db_version: 1, +/// db_path: PathBuf::from("/path/to/cache"), +/// db_size: Some(10), // GB +/// network: zebra_chain::parameters::Network::Mainnet, +/// no_sync: false, +/// no_db: false, +/// }; +/// +/// let chain_index = NodeBackedChainIndex::new(source, config).await?; +/// let subscriber = chain_index.subscriber().await; +/// +/// // Use the subscriber to access ChainIndex trait methods +/// let snapshot = subscriber.snapshot_nonfinalized_state(); +/// # Ok(()) +/// # } +/// ``` +/// +/// # Example with JSON-RPC Only (Fallback) +/// +/// ```no_run +/// # async fn example() -> Result<(), Box> { +/// use zaino_state::{NodeBackedChainIndex, ValidatorConnector, BlockCacheConfig}; +/// use zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector; +/// use std::path::PathBuf; +/// +/// // For JSON-RPC backend (replaces FetchService::spawn) +/// let connector = JsonRpSeeConnector::new_from_config_parts( +/// false, +/// "127.0.0.1:8232".parse()?, +/// "user".to_string(), +/// "password".to_string(), +/// None, +/// ).await?; +/// let source = ValidatorConnector::Fetch(connector); +/// +/// // Configure the cache (extract these from your previous FetchServiceConfig) +/// let config = BlockCacheConfig { +/// map_capacity: Some(1000), +/// map_shard_amount: Some(16), +/// db_version: 1, +/// db_path: PathBuf::from("/path/to/cache"), +/// db_size: Some(10), // GB +/// network: zebra_chain::parameters::Network::Mainnet, +/// no_sync: false, +/// no_db: false, +/// }; +/// +/// let chain_index = NodeBackedChainIndex::new(source, config).await?; +/// let subscriber = chain_index.subscriber().await; +/// +/// // Use the subscriber to access ChainIndex trait methods +/// # Ok(()) +/// # } +/// ``` +/// +/// # Migration from StateService/FetchService +/// +/// If migrating from `StateService::spawn(config)`: +/// 1. Create a `ReadStateService` and temporary JSON-RPC connector for mempool +/// 2. Convert config to `BlockCacheConfig` (or use `From` impl) +/// 3. Call `NodeBackedChainIndex::new(ValidatorConnector::State(...), block_config)` +/// +/// If migrating from `FetchService::spawn(config)`: +/// 1. Create a `JsonRpSeeConnector` using the RPC fields from your `FetchServiceConfig` +/// 2. Convert remaining config fields to `BlockCacheConfig` (or use `From` impl) +/// 3. Call `NodeBackedChainIndex::new(ValidatorConnector::Fetch(connector), block_config)` +/// +/// # Current Features +/// +/// - Full mempool support including streaming and filtering +/// - Unified access to finalized and non-finalized blockchain state +/// - Automatic synchronization between state layers +/// - Snapshot-based consistency for queries +#[derive(Debug)] +pub struct NodeBackedChainIndex { + #[allow(dead_code)] + mempool: std::sync::Arc>, + non_finalized_state: std::sync::Arc>, + finalized_db: std::sync::Arc, + sync_loop_handle: Option>>, + status: AtomicStatus, +} + +impl NodeBackedChainIndex { + /// Creates a new chainindex from a connection to a validator + /// Currently this is a ReadStateService or JsonRpSeeConnector + pub async fn new( + source: Source, + config: crate::config::BlockCacheConfig, + ) -> Result { + use futures::TryFutureExt as _; + + let finalized_db = + Arc::new(finalised_state::ZainoDB::spawn(config.clone(), source.clone()).await?); + let mempool_state = mempool::Mempool::spawn(source.clone(), None) + .map_err(crate::InitError::MempoolInitialzationError) + .await?; + + let reader = finalized_db.to_reader(); + let top_of_finalized = if let Some(height) = reader.db_height().await? { + reader.get_chain_block(height).await? + } else { + None + }; + + let non_finalized_state = crate::NonFinalizedState::initialize( + source.clone(), + config.network.to_zebra_network(), + top_of_finalized, + ) + .await?; + + let mut chain_index = Self { + mempool: std::sync::Arc::new(mempool_state), + non_finalized_state: std::sync::Arc::new(non_finalized_state), + finalized_db, + sync_loop_handle: None, + status: AtomicStatus::new(StatusType::Spawning), + }; + chain_index.sync_loop_handle = Some(chain_index.start_sync_loop()); + + Ok(chain_index) + } + + /// Creates a [`NodeBackedChainIndexSubscriber`] from self, + /// a clone-safe, drop-safe, read-only view onto the running indexer. + pub fn subscriber(&self) -> NodeBackedChainIndexSubscriber { + NodeBackedChainIndexSubscriber { + mempool: self.mempool.subscriber(), + non_finalized_state: self.non_finalized_state.clone(), + finalized_state: self.finalized_db.to_reader(), + status: self.status.clone(), + } + } + + /// Shut down the sync process, for a cleaner drop + /// an error indicates a failure to cleanly shutdown. Dropping the + /// chain index should still stop everything + pub async fn shutdown(&self) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Closing); + self.finalized_db.shutdown().await?; + self.mempool.close(); + Ok(()) + } + + /// Displays the status of the chain_index + pub fn status(&self) -> StatusType { + let finalized_status = self.finalized_db.status(); + let mempool_status = self.mempool.status(); + let combined_status = self + .status + .load() + .combine(finalized_status) + .combine(mempool_status); + self.status.store(combined_status); + combined_status + } + + pub(super) fn start_sync_loop(&self) -> tokio::task::JoinHandle> { + info!("Starting ChainIndex sync."); + let nfs = self.non_finalized_state.clone(); + let fs = self.finalized_db.clone(); + let status = self.status.clone(); + let source = self.non_finalized_state.source.clone(); + + tokio::task::spawn(async move { + let result: Result<(), SyncError> = async { + loop { + if status.load() == StatusType::Closing { + break; + } + + status.store(StatusType::Syncing); + + // Sync fs to chain tip - 100. + let chain_height = source + .clone() + .get_best_block_height() + .await + .map_err(|error| { + SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(error)), + ) + })? + .ok_or_else(|| { + SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new( + std::io::Error::other("node returned no best block height"), + )), + ) + })?; + let finalised_height = crate::Height(chain_height.0.saturating_sub(100)); + + // TODO / FIX: Improve error handling here, fix and rebuild db on error. + fs.sync_to_height(finalised_height, &source) + .await + .map_err(|error| { + SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(error)), + ) + })?; + + // Sync nfs to chain tip, trimming blocks to finalized tip. + nfs.sync(fs.clone()).await?; + + status.store(StatusType::Ready); + // TODO: configure sleep duration? + tokio::time::sleep(Duration::from_millis(500)).await + // TODO: Check for shutdown signal. + } + Ok(()) + } + .await; + + // If the sync loop exited unexpectedly with an error, set CriticalError + // so that liveness checks can detect the failure. + if let Err(e) = result { + tracing::error!("Sync loop exited with error: {e:?}"); + status.store(StatusType::CriticalError); + + return Err(e); + } + + result + }) + } +} + +/// A clone-safe *read-only* view onto a running [`NodeBackedChainIndex`]. +/// +/// Designed for concurrent efficiency. +/// +/// [`NodeBackedChainIndexSubscriber`] can safely be cloned and dropped freely. +#[derive(Clone, Debug)] +pub struct NodeBackedChainIndexSubscriber { + mempool: mempool::MempoolSubscriber, + non_finalized_state: std::sync::Arc>, + finalized_state: finalised_state::reader::DbReader, + status: AtomicStatus, +} + +impl NodeBackedChainIndexSubscriber { + fn source(&self) -> &Source { + &self.non_finalized_state.source + } + + /// Returns the combined status of all chain index components. + pub fn combined_status(&self) -> StatusType { + let finalized_status = self.finalized_state.status(); + let mempool_status = self.mempool.status(); + let combined_status = self + .status + .load() + .combine(finalized_status) + .combine(mempool_status); + self.status.store(combined_status); + combined_status + } + + async fn get_fullblock_bytes_from_node( + &self, + id: HashOrHeight, + ) -> Result>, ChainIndexError> { + self.source() + .get_block(id) + .await + .map_err(ChainIndexError::backing_validator)? + .map(|bk| { + bk.zcash_serialize_to_vec() + .map_err(ChainIndexError::backing_validator) + }) + .transpose() + } + + async fn get_indexed_block_height( + &self, + snapshot: &NonfinalizedBlockCacheSnapshot, + hash: types::BlockHash, + ) -> Result, ChainIndexError> { + // ChainIndex step 2: + match snapshot.blocks.get(&hash).cloned() { + Some(block) => Ok(snapshot + // ChainIndex step 3: + .heights_to_hashes + .values() + .find(|h| **h == hash) + // Canonical height is None for blocks not on the best chain + .map(|_| block.index().height())), + None => self + // ChainIndex step 4: + .finalized_state + .get_block_height(hash) + .await + .map_err(|e| ChainIndexError::database_hole(hash, Some(Box::new(e)))), + } + } + + /** + Searches finalized and non-finalized chains for any blocks containing the transaction. + Ordered with finalized blocks first. + + WARNING: there might be multiple chains, each containing a block with the transaction. + */ + async fn blocks_containing_transaction<'snapshot, 'self_lt, 'iter>( + &'self_lt self, + snapshot: &'snapshot NonfinalizedBlockCacheSnapshot, + txid: [u8; 32], + ) -> Result + use<'iter, Source>, FinalisedStateError> + where + 'snapshot: 'iter, + 'self_lt: 'iter, + { + let finalized_blocks_containing_transaction = match self + .finalized_state + .get_tx_location(&types::TransactionHash(txid)) + .await? + { + Some(tx_location) => { + self.finalized_state + .get_chain_block(crate::Height(tx_location.block_height())) + .await? + } + + None => None, + } + .into_iter(); + let non_finalized_blocks_containing_transaction = + snapshot.blocks.values().filter_map(move |block| { + block.transactions().iter().find_map(|transaction| { + if transaction.txid().0 == txid { + Some(block.clone()) + } else { + None + } + }) + }); + Ok(finalized_blocks_containing_transaction + .chain(non_finalized_blocks_containing_transaction)) + } + + async fn get_block_height_passthrough( + &self, + snapshot: &NonfinalizedBlockCacheSnapshot, + hash: types::BlockHash, + ) -> Result, ChainIndexError> { + //ChainIndex step 5: + match self + .source() + .get_block(HashOrHeight::Hash(hash.into())) + .await + { + Ok(Some(block)) => { + // At this point, we know that + // the block is in the VALIDATOR. + match block.coinbase_height() { + None => { + // the block is in the VALIDATOR. but doesnt have a height. That would imply a bug. + Err(ChainIndexError::validator_data_error_block_coinbase_height_missing()) + } + Some(height) => { + // The VALIDATOR returned a block with a height. + // However, there is as of yet no guaranteed the Block is FINALIZED + if height <= snapshot.validator_finalized_height { + Ok(Some(types::Height::from(height))) + } else { + // non-finalized block + // no passthrough + Ok(None) + } + } + } + } + Ok(None) => { + // the block is neither in the INDEXER nor VALIDATOR + Ok(None) + } + Err(e) => Err(ChainIndexError::backing_validator(e)), + } + } + + // Get the height of the mempool + fn get_mempool_height( + &self, + snapshot: &NonfinalizedBlockCacheSnapshot, + ) -> Option { + snapshot + .blocks + .iter() + .find(|(hash, _block)| **hash == self.mempool.mempool_chain_tip()) + .map(|(_hash, block)| block.height()) + } + + fn mempool_branch_id(&self, snapshot: &NonfinalizedBlockCacheSnapshot) -> Option { + self.get_mempool_height(snapshot).and_then(|height| { + ConsensusBranchId::current( + &self.non_finalized_state.network, + zebra_chain::block::Height::from(height + 1), + ) + .map(u32::from) + }) + } +} + +impl Status for NodeBackedChainIndexSubscriber { + fn status(&self) -> StatusType { + self.combined_status() + } +} + +impl ChainIndex for NodeBackedChainIndexSubscriber { + type Snapshot = Arc; + type Error = ChainIndexError; + + /// Takes a snapshot of the non_finalized state. All NFS-interfacing query + /// methods take a snapshot. The query will check the index + /// it existed at the moment the snapshot was taken. + fn snapshot_nonfinalized_state(&self) -> Self::Snapshot { + self.non_finalized_state.get_snapshot() + } + + /// Returns Some(Height) for the given block hash *if* it is currently in the best chain. + /// + /// Returns None if the specified block is not in the best chain or is not found. + /// + /// Used for hash based block lookup (random access). + async fn get_block_height( + &self, + snapshot: &Self::Snapshot, + hash: types::BlockHash, + ) -> Result, Self::Error> { + // ChainIndex step 1: Skip + // mempool blocks have no canon height + // todo: possible efficiency boost by checking mempool for a negative? + + // ChainIndex steps 2-4: + match self.get_indexed_block_height(snapshot, hash).await? { + Some(h) => Ok(Some(h)), + None => self.get_block_height_passthrough(snapshot, hash).await, // ChainIndex step 5 + } + } + + /// Given inclusive start and end heights, stream all blocks + /// between the given heights. + /// Returns None if the specified start height + /// is greater than the snapshot's tip and greater + /// than the validator's finalized height (100 blocks below tip) + fn get_block_range( + &self, + snapshot: &Self::Snapshot, + start: types::Height, + end: std::option::Option, + ) -> Option, Self::Error>>> { + // ChainIndex step 1: Skip + // mempool blocks have no canon height + + // We can serve blocks above where the validator has finalized + // only if we have those blocks in our nonfinalized snapshot + let max_servable_height = snapshot + .validator_finalized_height + .max(snapshot.best_tip.height); + // The lower of the end of the provided range, and the highest block we can serve + let end = end.unwrap_or(max_servable_height).min(max_servable_height); + // Serve as high as we can, or to the provided end if it's lower + if start <= max_servable_height.min(end) { + Some( + futures::stream::iter((start.0)..=(end.0)).then(move |height| async move { + // For blocks above validator_finalized_height, it's not reorg-safe to get blocks by height. It is reorg-safe to get blocks by hash. What we need to do in this case is use our snapshot index to look up the hash at a given height, and then get that hash from the validator. + // This is why we now look in the index. + match self + .finalized_state + .get_block_hash(types::Height(height)) + .await + { + Ok(Some(hash)) => { + return self + .get_fullblock_bytes_from_node(HashOrHeight::Hash(hash.into())) + .await? + .ok_or(ChainIndexError::database_hole(hash, None)) + } + Err(e) => Err(ChainIndexError { + kind: ChainIndexErrorKind::InternalServerError, + message: "".to_string(), + source: Some(Box::new(e)), + }), + Ok(None) => { + match snapshot.get_chainblock_by_height(&types::Height(height)) { + Some(block) => { + return self + .get_fullblock_bytes_from_node(HashOrHeight::Hash( + (*block.hash()).into(), + )) + .await? + .ok_or(ChainIndexError::database_hole(block.hash(), None)) + } + None => self + // usually getting by height is not reorg-safe, but here, height is known to be below or equal to validator_finalized_height. + .get_fullblock_bytes_from_node(HashOrHeight::Height( + zebra_chain::block::Height(height), + )) + .await? + .ok_or(ChainIndexError::database_hole(height, None)), + } + } + } + }), + ) + } else { + None + } + } + + /// Returns the *compact* block for the given height. + /// + /// Returns `None` if the specified `height` is greater than the snapshot's tip. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + /// + /// Returns None if the specified height + /// is greater than the snapshot's tip + async fn get_compact_block( + &self, + nonfinalized_snapshot: &Self::Snapshot, + height: types::Height, + pool_types: PoolTypeFilter, + ) -> Result, Self::Error> { + if height <= nonfinalized_snapshot.best_tip.height { + Ok(Some( + match nonfinalized_snapshot.get_chainblock_by_height(&height) { + Some(block) => compact_block_with_pool_types( + block.to_compact_block(), + &pool_types.to_pool_types_vector(), + ), + None => match self + .finalized_state + .get_compact_block(height, pool_types) + .await + { + Ok(block) => block, + Err(e) => { + return Err(ChainIndexError::database_hole(height, Some(Box::new(e)))) + } + }, + }, + )) + } else { + Ok(None) + } + } + + /// Streams *compact* blocks for an inclusive height range. + /// + /// Returns `None` if either requested height is greater than the snapshot's tip. + /// + /// - The stream covers `[start_height, end_height]` (inclusive). + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + #[allow(clippy::type_complexity)] + async fn get_compact_block_stream( + &self, + nonfinalized_snapshot: &Self::Snapshot, + start_height: types::Height, + end_height: types::Height, + pool_types: PoolTypeFilter, + ) -> Result, Self::Error> { + let chain_tip_height = self.best_chaintip(nonfinalized_snapshot).await?.height; + + if start_height > chain_tip_height || end_height > chain_tip_height { + return Ok(None); + } + + // The nonfinalized cache holds the tip block plus the previous 99 blocks (100 total), + // so the lowest possible cached height is `tip - 99` (saturating at 0). + let lowest_nonfinalized_height = types::Height(chain_tip_height.0.saturating_sub(99)); + + let is_ascending = start_height <= end_height; + + let pool_types_vector = pool_types.to_pool_types_vector(); + + // Pre-create any finalized-state stream(s) we will need so that errors are returned + // from this method (not deferred into the spawned task). + let finalized_stream: Option = if is_ascending { + if start_height < lowest_nonfinalized_height { + let finalized_end_height = types::Height(std::cmp::min( + end_height.0, + lowest_nonfinalized_height.0.saturating_sub(1), + )); + + if start_height <= finalized_end_height { + Some( + self.finalized_state + .get_compact_block_stream( + start_height, + finalized_end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + } + } else { + None + } + // Serve in reverse order. + } else if end_height < lowest_nonfinalized_height { + let finalized_start_height = if start_height < lowest_nonfinalized_height { + start_height + } else { + types::Height(lowest_nonfinalized_height.0.saturating_sub(1)) + }; + + Some( + self.finalized_state + .get_compact_block_stream( + finalized_start_height, + end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + }; + + let nonfinalized_snapshot = nonfinalized_snapshot.clone(); + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. + let (channel_sender, channel_receiver) = tokio::sync::mpsc::channel(128); + + tokio::spawn(async move { + if is_ascending { + // 1) Finalized segment (if any), ascending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + + // 2) Nonfinalized segment, ascending. + let nonfinalized_start_height = + types::Height(std::cmp::max(start_height.0, lowest_nonfinalized_height.0)); + + for height_value in nonfinalized_start_height.0..=end_height.0 { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } else { + // 1) Nonfinalized segment, descending. + if start_height >= lowest_nonfinalized_height { + let nonfinalized_end_height = + types::Height(std::cmp::max(end_height.0, lowest_nonfinalized_height.0)); + + for height_value in (nonfinalized_end_height.0..=start_height.0).rev() { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } + + // 2) Finalized segment (if any), descending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + } + }); + + Ok(Some(CompactBlockStream::new(channel_receiver))) + } + + /// For a given block, + /// find its newest main-chain ancestor, + /// or the block itself if it is on the main-chain. + /// Returns Ok(None) if no fork point found. This is not an error, + /// as zaino does not guarentee knowledge of all sidechain data. + async fn find_fork_point( + &self, + snapshot: &Self::Snapshot, + hash: &types::BlockHash, + ) -> Result, Self::Error> { + // ChainIndex step 1: Skip + // mempool blocks have no canon height, guaranteed to return None + // todo: possible efficiency boost by checking mempool for a negative? + + // ChainIndex step 2: + match snapshot.as_ref().get_chainblock_by_hash(hash) { + Some(block) => { + // At this point, we know that + // The block is non-FINALIZED in the INDEXER + // ChainIndex step 3: + if snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) { + // The block is in the best chain. + Ok(Some((*block.hash(), block.height()))) + } else { + // Otherwise, it's non-best chain! Grab its parent, and recurse + Box::pin(self.find_fork_point(snapshot, block.index().parent_hash())).await + // gotta pin recursive async functions to prevent infinite-sized + // Future-implementing types + } + } + None => { + // At this point, we know that + // the block is NOT non-FINALIZED in the INDEXER. + // ChainIndex step 4 + match self.finalized_state.get_block_height(*hash).await { + Ok(Some(height)) => { + // the block is FINALIZED in the INDEXER + Ok(Some((*hash, height))) + } + Err(e) => Err(ChainIndexError::database_hole(hash, Some(Box::new(e)))), + Ok(None) => { + // At this point, we know that + // the block is NOT FINALIZED in the INDEXER + // (NEITHER is it non-FINALIZED in the INDEXER) + + // Now, we ask the VALIDATOR. + // ChainIndex step 5 + match self + .source() + .get_block(HashOrHeight::Hash(zebra_chain::block::Hash::from(*hash))) + .await + { + Ok(Some(block)) => { + // At this point, we know that + // the block is in the VALIDATOR. + match block.coinbase_height() { + None => { + // the block is in the VALIDATOR. but doesnt have a height. That would imply a bug. + Err(ChainIndexError::validator_data_error_block_coinbase_height_missing()) + } + Some(height) => { + // The VALIDATOR returned a block with a height. + // However, there is as of yet no guaranteed the Block is FINALIZED + if height <= snapshot.validator_finalized_height { + Ok(Some(( + types::BlockHash::from(block.hash()), + types::Height::from(height), + ))) + } else { + // non-finalized block + // no passthrough + Ok(None) + } + } + } + } + + Ok(None) => { + // At this point, we know that + // the block is NOT FINALIZED in the VALIDATOR. + // Return Ok(None) = no block found. + Ok(None) + } + Err(e) => Err(ChainIndexError::backing_validator(e)), + } + } + } + } + } + } + + /// Returns the block commitment tree data by hash + async fn get_treestate( + &self, + // currently not implemented internally, fetches data from validator. + // as this looks up the block by hash, and cares not if the + // block is on the main chain or not, this is safe to pass through + // even if the target block is non-finalized + hash: &types::BlockHash, + ) -> Result<(Option>, Option>), Self::Error> { + match self.source().get_treestate(*hash).await { + Ok(resp) => Ok(resp), + Err(e) => Err(ChainIndexError { + kind: ChainIndexErrorKind::InternalServerError, + message: "failed to fetch treestate from validator".to_string(), + source: Some(Box::new(e)), + }), + } + } + + /// given a transaction id, returns the transaction + /// and the consensus branch ID for the block the transaction + /// is in + async fn get_raw_transaction( + &self, + snapshot: &Self::Snapshot, + txid: &types::TransactionHash, + ) -> Result, Option)>, Self::Error> { + // ChainIndex step 1 + if let Some(mempool_tx) = self + .mempool + .get_transaction(&mempool::MempoolKey { + txid: txid.to_string(), + }) + .await + { + let bytes = mempool_tx.serialized_tx.as_ref().as_ref().to_vec(); + let mempool_branch_id = self.mempool_branch_id(snapshot); + + return Ok(Some((bytes, mempool_branch_id))); + } + + let Some((transaction, location)) = self + .source() + .get_transaction(*txid) + .await + .map_err(ChainIndexError::backing_validator)? + else { + return Ok(None); + }; + // as the reorg process cannot modify a transaction + // it's safe to serve nonfinalized state directly here + let height = match location { + GetTransactionLocation::BestChain(height) => height, + GetTransactionLocation::NonbestChain => { + // if the tranasction isn't on the best chain + // check our indexes. We need to find out the height from our index + // to determine the consensus branch ID + match self + .blocks_containing_transaction(snapshot, txid.0) + .await? + .next() + { + Some(block) => block.index.height.into(), + // If we don't have a block containing the transaction + // locally and the transaction's not on the validator's + // best chain, we can't determine its consensus branch ID + None => return Ok(None), + } + } + // We've already checked the mempool. Should be unreachable? + // todo: error here? + GetTransactionLocation::Mempool => return Ok(None), + }; + + Ok(Some(( + zebra_chain::transaction::SerializedTransaction::from(transaction) + .as_ref() + .to_vec(), + ConsensusBranchId::current(&self.non_finalized_state.network, height).map(u32::from), + ))) + } + + /// Given a transaction ID, returns all known blocks containing this transaction + /// + /// If the transaction is in the mempool, it will be in the `BestChainLocation` + /// if the mempool and snapshot are up-to-date, and the `NonBestChainLocation` set + /// if the snapshot is out-of-date compared to the mempool + async fn get_transaction_status( + &self, + snapshot: &Self::Snapshot, + txid: &types::TransactionHash, + ) -> Result<(Option, HashSet), ChainIndexError> { + let blocks_containing_transaction = self + .blocks_containing_transaction(snapshot, txid.0) + .await? + .collect::>(); + let Some(start_of_nonfinalized) = snapshot.heights_to_hashes.keys().min() else { + return Err(ChainIndexError::database_hole("no blocks", None)); + }; + let mut best_chain_block = blocks_containing_transaction + .iter() + .find(|block| { + snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) + || block.height() < *start_of_nonfinalized + // this block is either in the best chain ``heights_to_hashes`` or finalized. + }) + .map(|block| BestChainLocation::Block(*block.hash(), block.height())); + let mut non_best_chain_blocks: HashSet = + blocks_containing_transaction + .iter() + .filter(|block| { + snapshot.heights_to_hashes.get(&block.height()) != Some(block.hash()) + && block.height() >= *start_of_nonfinalized + }) + .map(|block| NonBestChainLocation::Block(*block.hash(), block.height())) + .collect(); + let in_mempool = self + .mempool + .contains_txid(&mempool::MempoolKey { + txid: txid.to_string(), + }) + .await; + if in_mempool { + let mempool_tip_hash = self.mempool.mempool_chain_tip(); + if mempool_tip_hash == snapshot.best_tip.blockhash { + if best_chain_block.is_some() { + return Err(ChainIndexError { + kind: ChainIndexErrorKind::InvalidSnapshot, + message: + "Best chain and up-to-date mempool both contain the same transaction" + .to_string(), + source: None, + }); + } else { + best_chain_block = + Some(BestChainLocation::Mempool(snapshot.best_tip.height + 1)); + } + } else { + // the best chain and the mempool have divergent tip hashes + // get a new snapshot and use it to find the height of the mempool + let target_height = self + .non_finalized_state + .get_snapshot() + .blocks + .iter() + .find_map(|(hash, block)| { + if *hash == mempool_tip_hash { + Some(block.height() + 1) + // found the block that is the tip that the mempool is hanging on to + } else { + None + } + }); + non_best_chain_blocks.insert(NonBestChainLocation::Mempool(target_height)); + } + } + + // If we haven't found a block on the best chain, + // try passthrough + if best_chain_block.is_none() { + if let Some((_transaction, GetTransactionLocation::BestChain(height))) = self + .source() + .get_transaction(*txid) + .await + .map_err(ChainIndexError::backing_validator)? + { + if height <= snapshot.validator_finalized_height { + if let Some(block) = self + .source() + .get_block(HashOrHeight::Height(height)) + .await + .map_err(ChainIndexError::backing_validator)? + { + best_chain_block = + Some(BestChainLocation::Block(block.hash().into(), height.into())); + } + } + } + } + + Ok((best_chain_block, non_best_chain_blocks)) + } + + /// Returns all txids currently in the mempool. + async fn get_mempool_txids(&self) -> Result, Self::Error> { + self.mempool + .get_mempool() + .await + .into_iter() + .map(|(txid_key, _)| { + TransactionHash::from_hex(&txid_key.txid) + .map_err(ChainIndexError::backing_validator) + }) + .collect::>() + } + + /// Returns all transactions currently in the mempool, filtered by `exclude_list`. + /// + /// The `exclude_list` may contain shortened transaction ID hex prefixes (client-endian). + /// The transaction IDs in the Exclude list can be shortened to any number of bytes to make the request + /// more bandwidth-efficient; if two or more transactions in the mempool + /// match a shortened txid, they are all sent (none is excluded). Transactions + /// in the exclude list that don't exist in the mempool are ignored. + async fn get_mempool_transactions( + &self, + exclude_list: Vec, + ) -> Result>, Self::Error> { + // Use the mempool's own filtering (it already handles client-endian shortened prefixes). + let pairs: Vec<(mempool::MempoolKey, mempool::MempoolValue)> = + self.mempool.get_filtered_mempool(exclude_list).await; + + // Transform to the Vec> that the trait requires. + let bytes: Vec> = pairs + .into_iter() + .map(|(_, v)| v.serialized_tx.as_ref().as_ref().to_vec()) + .collect(); + + Ok(bytes) + } + + /// Returns a stream of mempool transactions, ending the stream when the chain tip block hash + /// changes (a new block is mined or a reorg occurs). + /// + /// If a snapshot is given and the chain tip has changed from the given spanshot, returns None. + fn get_mempool_stream( + &self, + snapshot: Option<&Self::Snapshot>, + ) -> Option, Self::Error>>> { + let expected_chain_tip = snapshot.map(|snapshot| snapshot.best_tip.blockhash); + let mut subscriber = self.mempool.clone(); + + match subscriber + .get_mempool_stream(expected_chain_tip) + .now_or_never() + { + Some(Ok((in_rx, _handle))) => { + let (out_tx, out_rx) = + tokio::sync::mpsc::channel::, ChainIndexError>>(32); + + tokio::spawn(async move { + let mut in_stream = tokio_stream::wrappers::ReceiverStream::new(in_rx); + while let Some(item) = in_stream.next().await { + match item { + Ok((_key, value)) => { + let _ = out_tx + .send(Ok(value.serialized_tx.as_ref().as_ref().to_vec())) + .await; + } + Err(e) => { + let _ = out_tx + .send(Err(ChainIndexError::child_process_status_error( + "mempool", e, + ))) + .await; + break; + } + } + } + }); + + Some(tokio_stream::wrappers::ReceiverStream::new(out_rx)) + } + Some(Err(crate::error::MempoolError::IncorrectChainTip { .. })) => None, + Some(Err(e)) => { + let (out_tx, out_rx) = + tokio::sync::mpsc::channel::, ChainIndexError>>(1); + let _ = out_tx.try_send(Err(e.into())); + Some(tokio_stream::wrappers::ReceiverStream::new(out_rx)) + } + None => { + // Should not happen because the inner tip check is synchronous, but fail safe. + let (out_tx, out_rx) = + tokio::sync::mpsc::channel::, ChainIndexError>>(1); + let _ = out_tx.try_send(Err(ChainIndexError::child_process_status_error( + "mempool", + crate::error::StatusError { + server_status: crate::StatusType::RecoverableError, + }, + ))); + Some(tokio_stream::wrappers::ReceiverStream::new(out_rx)) + } + } + } + + /// Returns Information about the mempool state: + /// - size: Current tx count + /// - bytes: Sum of all tx sizes + /// - usage: Total memory usage for the mempool + async fn get_mempool_info(&self) -> MempoolInfo { + self.mempool.get_mempool_info().await + } + + async fn best_chaintip( + &self, + nonfinalized_snapshot: &Self::Snapshot, + ) -> Result { + Ok( + if nonfinalized_snapshot.validator_finalized_height + > nonfinalized_snapshot.best_tip.height + { + BestTip { + height: nonfinalized_snapshot.validator_finalized_height, + blockhash: self + .source() + // TODO: do something more efficient than getting the whole block + .get_block(HashOrHeight::Height( + nonfinalized_snapshot.validator_finalized_height.into(), + )) + .await + .map_err(|e| { + ChainIndexError::database_hole( + nonfinalized_snapshot.validator_finalized_height, + Some(Box::new(e)), + ) + })? + .ok_or(ChainIndexError::database_hole( + nonfinalized_snapshot.validator_finalized_height, + None, + ))? + .hash() + .into(), + } + } else { + nonfinalized_snapshot.best_tip + }, + ) + } +} + +impl NonFinalizedSnapshot for Arc +where + T: NonFinalizedSnapshot, +{ + fn get_chainblock_by_hash(&self, target_hash: &types::BlockHash) -> Option<&IndexedBlock> { + self.as_ref().get_chainblock_by_hash(target_hash) + } + + fn get_chainblock_by_height(&self, target_height: &types::Height) -> Option<&IndexedBlock> { + self.as_ref().get_chainblock_by_height(target_height) + } +} + +/// A snapshot of the non-finalized state, for consistent queries +pub trait NonFinalizedSnapshot { + /// Hash -> block + fn get_chainblock_by_hash(&self, target_hash: &types::BlockHash) -> Option<&IndexedBlock>; + /// Height -> block + fn get_chainblock_by_height(&self, target_height: &types::Height) -> Option<&IndexedBlock>; +} + +impl NonFinalizedSnapshot for NonfinalizedBlockCacheSnapshot { + fn get_chainblock_by_hash(&self, target_hash: &types::BlockHash) -> Option<&IndexedBlock> { + self.blocks.iter().find_map(|(hash, chainblock)| { + if hash == target_hash { + Some(chainblock) + } else { + None + } + }) + } + fn get_chainblock_by_height(&self, target_height: &types::Height) -> Option<&IndexedBlock> { + self.heights_to_hashes.iter().find_map(|(height, hash)| { + if height == target_height { + self.get_chainblock_by_hash(hash) + } else { + None + } + }) + } +} diff --git a/zaino-state/src/chain_index/encoding.rs b/zaino-state/src/chain_index/encoding.rs new file mode 100644 index 000000000..621b8947d --- /dev/null +++ b/zaino-state/src/chain_index/encoding.rs @@ -0,0 +1,501 @@ +//! Holds traits and primitive functions for Zaino's Serialisation schema. +#![allow(dead_code)] + +use core::iter::FromIterator; +use core2::io::{self, Read, Write}; + +/// Wire-format version tags. +pub mod version { + /// Tag byte for data encoded with *v1* layout. + pub const V1: u8 = 1; + + /// Tag byte for data encoded with *v2* layout. + pub const V2: u8 = 2; + + // Add new versions as required. + // pub const V3: u8 = 3; +} + +/* ────────────────────────── Zaino Serialiser Traits ─────────────────────────── */ +/// # Zaino wire-format: one-byte version tag +/// +/// ## Quick summary +/// +/// ┌─ byte 0 ─┬──────────── body depends on that tag ────────────┐ +/// │ version │ (little-endian by default) │ +/// └──────────┴──────────────────────────────────────────────────┘ +/// +/// * `Self::VERSION` = the tag **this build *writes***. +/// * On **read**, we peek at the tag: +/// * if it equals `Self::VERSION` call `decode_latest`; +/// * otherwise fall back to the relevant `decode_vN` helper +/// (defaults to “unsupported” unless overwritten). +/// +/// ## Update guide. +/// +/// ### Initial release (`VERSION = 1`) +/// 1. `pub struct TxV1 { … }` // layout frozen forever +/// 2. `impl ZainoVersionedSerde for TxV1` +/// * `const VERSION = 1` +/// * `encode_body` – **v1** layout +/// * `decode_v1` – parses **v1** bytes +/// * `decode_latest` - wrapper for `Self::decode_v1` +/// +/// ### Bump to v2 +/// 1. `pub struct TxV2 { … }` // new “current” layout +/// 2. `impl From for TxV2` // loss-less upgrade path +/// 3. `impl ZainoVersionedSerde for TxV2` +/// * `const VERSION = 2` +/// * `encode_body` – **v2** layout +/// * `decode_v1` – `TxV1::decode_latest(r).map(Self::from)` +/// * `decode_v2` – parses **v2** bytes +/// * `decode_latest` - wrapper for `Self::decode_v2` +/// +/// ### Next bumps (v3, v4, …, vN) +/// * Create struct for new version. +/// * Set `const VERSION = N`. +/// * Add the `decode_vN` trait method and extend the `match` table inside **this trait** when a brand-new tag first appears. +/// * Implement `decode_vN` for N’s layout. +/// * Update `decode_latest` to wrap `decode_vN`. +/// * Implement `decode_v(N-1)`, `decode_v(N-2)`, ..., `decode_v(N-K)` for all previous versions. +/// +/// ## Mandatory items per implementation +/// * `const VERSION` +/// * `encode_body` +/// * `decode_vN` — **must** parse bytes for version N, where N = [`Self::VERSION`]. +/// * `decode_latest` — **must** parse `Self::VERSION` bytes. +/// +/// Historical helpers (`decode_v1`, `decode_v2`, …) must be implemented +/// for compatibility with historical versions +pub trait ZainoVersionedSerde: Sized { + /// Tag this build writes. + const VERSION: u8; + + /*──────────── encoding ────────────*/ + + /// Encode **only** the body (no tag). + fn encode_body(&self, w: &mut W) -> io::Result<()>; + + /*──────────── mandatory decoder for *this* version ────────────*/ + + /// Parses a body whose tag equals `Self::VERSION`. + /// + /// The trait implementation must wrap `decode_vN` where N = [`Self::VERSION`] + fn decode_latest(r: &mut R) -> io::Result; + + /*──────────── version decoders ────────────*/ + // Add more versions here when required. + + #[inline(always)] + #[allow(unused)] + /// Decode an older v1 version + fn decode_v1(r: &mut R) -> io::Result { + Err(io::Error::new(io::ErrorKind::InvalidData, "v1 unsupported")) + } + #[inline(always)] + #[allow(unused)] + /// Decode an older v2 version + fn decode_v2(r: &mut R) -> io::Result { + Err(io::Error::new(io::ErrorKind::InvalidData, "v2 unsupported")) + } + + /*──────────── router ────────────*/ + + #[inline] + /// Decode the body, dispatcing to the appropriate decode_vx function + fn decode_body(r: &mut R, version_tag: u8) -> io::Result { + if version_tag == Self::VERSION { + Self::decode_latest(r) + } else { + match version_tag { + version::V1 => Self::decode_v1(r), + version::V2 => Self::decode_v2(r), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("unsupported Zaino version tag {version_tag}"), + )), + } + } + } + + /*──────────── User entry points ────────────*/ + + #[inline] + /// The expected start point. Read the version tag, then decode the rest + fn serialize(&self, mut w: W) -> io::Result<()> { + w.write_all(&[Self::VERSION])?; + self.encode_body(&mut w) + } + + #[inline] + /// Deserialises struct. + fn deserialize(mut r: R) -> io::Result { + let mut tag = [0u8; 1]; + r.read_exact(&mut tag)?; + Self::decode_body(&mut r, tag[0]) + } + + /// Serialize into a `Vec` (tag + body). + #[inline] + fn to_bytes(&self) -> io::Result> { + let mut buf = Vec::new(); + self.serialize(&mut buf)?; + Ok(buf) + } + + /// Reconstruct from a `&[u8]` (expects tag + body). + #[inline] + fn from_bytes(data: &[u8]) -> io::Result { + let mut cursor = core2::io::Cursor::new(data); + Self::deserialize(&mut cursor) + } +} + +/// Defines the fixed encoded length of a database record. +pub trait FixedEncodedLen { + /// the fixed encoded length of a database record *not* incuding the version byte. + const ENCODED_LEN: usize; + + /// Length of version tag in bytes. + const VERSION_TAG_LEN: usize = 1; + + /// the fixed encoded length of a database record *incuding* the version byte. + const VERSIONED_LEN: usize = Self::ENCODED_LEN + Self::VERSION_TAG_LEN; +} + +/* ──────────────────────────── CompactSize helpers ────────────────────────────── */ +/// A zcash/bitcoin CompactSize, a form of variable-length integer +pub struct CompactSize; + +/// The largest value representable as a CompactSize +pub const MAX_COMPACT_SIZE: u32 = 0x0200_0000; + +impl CompactSize { + /// Reads an integer encoded in compact form. + pub fn read(mut reader: R) -> io::Result { + let mut flag_bytes = [0; 1]; + reader.read_exact(&mut flag_bytes)?; + let flag = flag_bytes[0]; + + let result = if flag < 253 { + Ok(flag as u64) + } else if flag == 253 { + let mut bytes = [0; 2]; + reader.read_exact(&mut bytes)?; + match u16::from_le_bytes(bytes) { + n if n < 253 => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "non-canonical CompactSize", + )), + n => Ok(n as u64), + } + } else if flag == 254 { + let mut bytes = [0; 4]; + reader.read_exact(&mut bytes)?; + match u32::from_le_bytes(bytes) { + n if n < 0x10000 => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "non-canonical CompactSize", + )), + n => Ok(n as u64), + } + } else { + let mut bytes = [0; 8]; + reader.read_exact(&mut bytes)?; + match u64::from_le_bytes(bytes) { + n if n < 0x100000000 => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "non-canonical CompactSize", + )), + n => Ok(n), + } + }?; + + match result { + s if s > ::from(MAX_COMPACT_SIZE) => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "CompactSize too large", + )), + s => Ok(s), + } + } + + /// Reads an integer encoded in compact form and performs checked conversion + /// to the target type. + pub fn read_t>(mut reader: R) -> io::Result { + let n = Self::read(&mut reader)?; + ::try_from(n).map_err(|_| { + io::Error::new( + io::ErrorKind::InvalidInput, + "CompactSize value exceeds range of target type.", + ) + }) + } + + /// Writes the provided `usize` value to the provided Writer in compact form. + pub fn write(mut writer: W, size: usize) -> io::Result<()> { + match size { + s if s < 253 => writer.write_all(&[s as u8]), + s if s <= 0xFFFF => { + writer.write_all(&[253])?; + writer.write_all(&(s as u16).to_le_bytes()) + } + s if s <= 0xFFFFFFFF => { + writer.write_all(&[254])?; + writer.write_all(&(s as u32).to_le_bytes()) + } + s => { + writer.write_all(&[255])?; + writer.write_all(&(s as u64).to_le_bytes()) + } + } + } + + /// Returns the number of bytes needed to encode the given size in compact form. + pub fn serialized_size(size: usize) -> usize { + match size { + s if s < 253 => 1, + s if s <= 0xFFFF => 3, + s if s <= 0xFFFFFFFF => 5, + _ => 9, + } + } +} + +/* ───────────────────────────── integer helpers ───────────────────────────── */ + +/// Reads a u8. +#[inline] +pub fn read_u8(mut r: R) -> io::Result { + let mut buf = [0u8; 1]; + r.read_exact(&mut buf)?; + Ok(buf[0]) +} + +/// Writes a u8. +#[inline] +pub fn write_u8(mut w: W, v: u8) -> io::Result<()> { + w.write_all(&[v]) +} + +/// Reads a u16 in LE format. +#[inline] +pub fn read_u16_le(mut r: R) -> io::Result { + let mut buf = [0u8; 2]; + r.read_exact(&mut buf)?; + Ok(u16::from_le_bytes(buf)) +} + +/// Reads a u16 in BE format. +#[inline] +pub fn read_u16_be(mut r: R) -> io::Result { + let mut buf = [0u8; 2]; + r.read_exact(&mut buf)?; + Ok(u16::from_be_bytes(buf)) +} + +/// Writes a u16 in LE format. +#[inline] +pub fn write_u16_le(mut w: W, v: u16) -> io::Result<()> { + w.write_all(&v.to_le_bytes()) +} + +/// Writes a u16 in BE format. +#[inline] +pub fn write_u16_be(mut w: W, v: u16) -> io::Result<()> { + w.write_all(&v.to_be_bytes()) +} + +/// Reads a u32 in LE format. +#[inline] +pub fn read_u32_le(mut r: R) -> io::Result { + let mut buf = [0u8; 4]; + r.read_exact(&mut buf)?; + Ok(u32::from_le_bytes(buf)) +} + +/// Reads a u32 in BE format. +#[inline] +pub fn read_u32_be(mut r: R) -> io::Result { + let mut buf = [0u8; 4]; + r.read_exact(&mut buf)?; + Ok(u32::from_be_bytes(buf)) +} + +/// Writes a u32 in LE format. +#[inline] +pub fn write_u32_le(mut w: W, v: u32) -> io::Result<()> { + w.write_all(&v.to_le_bytes()) +} + +/// Writes a u32 in BE format. +#[inline] +pub fn write_u32_be(mut w: W, v: u32) -> io::Result<()> { + w.write_all(&v.to_be_bytes()) +} + +/// Reads a u64 in LE format. +#[inline] +pub fn read_u64_le(mut r: R) -> io::Result { + let mut buf = [0u8; 8]; + r.read_exact(&mut buf)?; + Ok(u64::from_le_bytes(buf)) +} + +/// Reads a u64 in BE format. +#[inline] +pub fn read_u64_be(mut r: R) -> io::Result { + let mut buf = [0u8; 8]; + r.read_exact(&mut buf)?; + Ok(u64::from_be_bytes(buf)) +} + +/// Writes a u64 in LE format. +#[inline] +pub fn write_u64_le(mut w: W, v: u64) -> io::Result<()> { + w.write_all(&v.to_le_bytes()) +} + +/// Writes a u64 in BE format. +#[inline] +pub fn write_u64_be(mut w: W, v: u64) -> io::Result<()> { + w.write_all(&v.to_be_bytes()) +} + +/// Reads an i64 in LE format. +#[inline] +pub fn read_i64_le(mut r: R) -> io::Result { + let mut buf = [0u8; 8]; + r.read_exact(&mut buf)?; + Ok(i64::from_le_bytes(buf)) +} + +/// Reads an i64 in BE format. +#[inline] +pub fn read_i64_be(mut r: R) -> io::Result { + let mut buf = [0u8; 8]; + r.read_exact(&mut buf)?; + Ok(i64::from_be_bytes(buf)) +} + +/// Writes an i64 in LE format. +#[inline] +pub fn write_i64_le(mut w: W, v: i64) -> io::Result<()> { + w.write_all(&v.to_le_bytes()) +} + +/// Writes an i64 in BE format. +#[inline] +pub fn write_i64_be(mut w: W, v: i64) -> io::Result<()> { + w.write_all(&v.to_be_bytes()) +} + +/* ───────────────────────────── fixed-array helpers ───────────────────────── */ + +/// Read exactly `N` bytes **as-is** (little-endian / “native order”). +#[inline] +pub fn read_fixed_le(mut r: R) -> io::Result<[u8; N]> { + let mut buf = [0u8; N]; + r.read_exact(&mut buf)?; + Ok(buf) +} + +/// Write an `[u8; N]` **as-is** (little-endian / “native order”). +#[inline] +pub fn write_fixed_le(mut w: W, bytes: &[u8; N]) -> io::Result<()> { + w.write_all(bytes) +} + +/// Read exactly `N` bytes from the stream and **reverse** them so the caller +/// receives little-endian/internal order while the wire sees big-endian. +#[inline] +pub fn read_fixed_be(mut r: R) -> io::Result<[u8; N]> { + let mut buf = [0u8; N]; + r.read_exact(&mut buf)?; + buf.reverse(); + Ok(buf) +} + +/// Take an internal little-endian `[u8; N]`, reverse it, and write big-endian +/// order to the stream. +#[inline] +pub fn write_fixed_be(mut w: W, bytes: &[u8; N]) -> io::Result<()> { + let mut tmp = *bytes; + tmp.reverse(); + w.write_all(&tmp) +} + +/* ─────────────────────────── Option helpers ──────────────────────────── */ + +/// 0 = None, 1 = Some. +pub fn write_option(mut w: W, value: &Option, mut f: F) -> io::Result<()> +where + W: Write, + F: FnMut(&mut W, &T) -> io::Result<()>, +{ + match value { + None => w.write_all(&[0]), + Some(val) => { + w.write_all(&[1])?; + f(&mut w, val) + } + } +} + +/// Reads an option based on option tag byte. +pub fn read_option(mut r: R, mut f: F) -> io::Result> +where + R: Read, + F: FnMut(&mut R) -> io::Result, +{ + let mut flag = [0u8; 1]; + r.read_exact(&mut flag)?; + match flag[0] { + 0 => Ok(None), + 1 => f(&mut r).map(Some), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "non-canonical Option tag", + )), + } +} + +/* ──────────────────────────── Vec helpers ────────────────────────────── */ +/// Writes a vec of structs, preceded by number of items (compactsize). +pub fn write_vec(mut w: W, vec: &[T], mut f: F) -> io::Result<()> +where + W: Write, + F: FnMut(&mut W, &T) -> io::Result<()>, +{ + CompactSize::write(&mut w, vec.len())?; + for item in vec { + f(&mut w, item)? + } + Ok(()) +} + +/// Reads a vec of structs, preceded by number of items (compactsize). +pub fn read_vec(mut r: R, mut f: F) -> io::Result> +where + R: Read, + F: FnMut(&mut R) -> io::Result, +{ + let len = CompactSize::read(&mut r)? as usize; + let mut v = Vec::with_capacity(len); + for _ in 0..len { + v.push(f(&mut r)?); + } + Ok(v) +} + +/// Same as `read_vec` but collects straight into any container that +/// implements `FromIterator`. +pub fn read_vec_into(mut r: R, mut f: F) -> io::Result +where + R: Read, + F: FnMut(&mut R) -> io::Result, + C: FromIterator, +{ + let len = CompactSize::read(&mut r)? as usize; + (0..len).map(|_| f(&mut r)).collect() +} diff --git a/zaino-state/src/chain_index/finalised_state.rs b/zaino-state/src/chain_index/finalised_state.rs new file mode 100644 index 000000000..d9a444b2b --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state.rs @@ -0,0 +1,756 @@ +//! Finalised ChainIndex database (ZainoDB) +//! +//! This module provides `ZainoDB`, the **on-disk** backing store for the *finalised* portion of the +//! chain index. +//! +//! “Finalised” in this context means: All but the top 100 blocks in the blockchain. This follows +//! Zebra's model where a reorg of depth greater than 100 would require a complete network restart. +//! +//! `ZainoDB` is a facade around a versioned LMDB-backed database implementation. It is responsible +//! for: +//! - opening or creating the correct on-disk database version, +//! - coordinating **database version migrations** when the on-disk version is older than the configured +//! target, +//! - exposing a small set of core read/write operations to the rest of `chain_index`, +//! - and providing a read-only handle (`DbReader`) that should be used for all chain fetches. +//! +//! # Code layout (submodules) +//! +//! The finalised-state subsystem is split into the following files: +//! +//! - [`capability`] +//! - Defines the *capability model* used to represent which features a given DB version supports. +//! - Defines the core DB traits (`DbRead`, `DbWrite`, `DbCore`) and extension traits +//! (`BlockCoreExt`, `TransparentHistExt`, etc.). +//! - Defines versioned metadata (`DbMetadata`, `DbVersion`, `MigrationStatus`) persisted on disk. +//! +//! - [`db`] +//! - Houses concrete DB implementations by **major** version (`db::v0`, `db::v1`) and the +//! version-erased facade enum [`db::DbBackend`] that implements the capability traits. +//! +//! - [`router`] +//! - Implements [`router::Router`], a capability router that can direct calls to either the +//! primary DB or a shadow DB during major migrations. +//! +//! - [`migrations`] +//! - Implements migration orchestration (`MigrationManager`) and concrete migration steps. +//! +//! - [`reader`] +//! - Defines [`reader::DbReader`], a read-only view that routes each query through the router +//! using the appropriate capability request. +//! +//! - [`entry`] +//! - Defines integrity-preserving wrappers (`StoredEntryFixed`, `StoredEntryVar`) used by +//! versioned DB implementations for checksummed key/value storage. +//! +//! # Architecture overview +//! +//! At runtime the layering is: +//! +//! ```text +//! ZainoDB (facade; owns config; exposes simple methods) +//! └─ Router (capability-based routing; primary + optional shadow) +//! └─ DbBackend (enum; V0 / V1; implements core + extension traits) +//! ├─ db::v0::DbV0 (legacy schema; compact-block streamer) +//! └─ db::v1::DbV1 (current schema; full indices incl. transparent history indexing) +//! ``` +//! +//! Consumers should avoid depending on the concrete DB version; they should prefer `DbReader`, +//! which automatically routes each read to a backend that actually supports the requested feature. +//! +//! # Database types and serialization strategy +//! +//! The finalised database stores **only** types that are explicitly designed for persistence. +//! Concretely, values written into LMDB are composed from the database-serializable types in +//! [`crate::chain_index::types::db`] (re-exported via [`crate::chain_index::types`]). +//! +//! All persisted types implement [`crate::chain_index::encoding::ZainoVersionedSerde`], which +//! defines Zaino’s on-disk wire format: +//! - a **one-byte version tag** (`encoding::version::V1`, `V2`, …), +//! - followed by a version-specific body (little-endian unless stated otherwise). +//! +//! This “version-tagged value” model allows individual record layouts to evolve while keeping +//! backward compatibility via `decode_vN` implementations. Any incompatible change to persisted +//! types must be coordinated with the database schema versioning in this module (see +//! [`capability::DbVersion`]) and, where required, accompanied by a migration (see [`migrations`]). +//! +//! Database implementations additionally use the integrity wrappers in [`entry`] to store values +//! with a BLAKE2b-256 checksum bound to the encoded key (`key || encoded_value`), providing early +//! detection of corruption or key/value mismatches. +//! +//! # On-disk layout and version detection +//! +//! Database discovery is intentionally conservative: `try_find_current_db_version` returns the +//! **oldest** detected version, because the process may have been terminated mid-migration, leaving +//! multiple version directories on disk. +//! +//! The current logic recognises two layouts: +//! +//! - **Legacy v0 layout:** network directories `live/`, `test/`, `local/` containing LMDB +//! `data.mdb` + `lock.mdb`. +//! - **Versioned v1+ layout:** network directories `mainnet/`, `testnet/`, `regtest/` containing +//! version subdirectories enumerated by [`db::VERSION_DIRS`] (e.g. `v1/`). +//! +//! # Versioning and migration strategy +//! +//! `ZainoDB::spawn` selects a **target version** from `BlockCacheConfig::db_version` and compares it +//! against the **current on-disk version** read from `DbMetadata`. +//! +//! - If no database exists, a new DB is created at the configured target version. +//! - If a database exists and `current_version < target_version`, the [`migrations::MigrationManager`] +//! is invoked to migrate the database. +//! +//! Major migrations are designed to be low-downtime and disk-conscious: +//! - a *shadow* DB of the new version is built in parallel, +//! - the router continues serving from the primary DB until the shadow is complete, +//! - then the shadow is promoted to primary, and the old DB is deleted once all handles are dropped. +//! +//! Migration progress is tracked via `DbMetadata::migration_status` (see [`capability::MigrationStatus`]) +//! to support resumption after crashes. +//! +//! **Downgrades are not supported.** If a higher version exists on disk than the configured target, +//! the code currently opens the on-disk DB as-is; do not rely on “forcing” an older version via +//! config. +//! +//! # Core API and invariants +//! +//! `ZainoDB` provides: +//! +//! - Lifecycle: +//! - [`ZainoDB::spawn`], [`ZainoDB::shutdown`], [`ZainoDB::status`], [`ZainoDB::wait_until_ready`] +//! +//! - Writes: +//! - [`ZainoDB::write_block`]: append-only; **must** write `db_tip + 1` +//! - [`ZainoDB::delete_block_at_height`]/[`ZainoDB::delete_block`]: pop-only; **must** delete tip +//! - [`ZainoDB::sync_to_height`]: convenience sync loop that fetches blocks from a `BlockchainSource` +//! +//! - Reads: +//! - `db_height`, `get_block_height`, `get_block_hash`, `get_metadata` +//! +//! **Write invariants** matter for correctness across all DB versions: +//! - `write_block` must be called in strictly increasing height order and must not skip heights. +//! - `delete_block*` must only remove the current tip, and must keep all secondary indices consistent. +//! +//! # Usage (recommended pattern) +//! +//! - Construct the DB once at startup. +//! - Await readiness. +//! - Hand out `DbReader` handles for all read/query operations. +//! +//! ```rust,no_run +//! use std::sync::Arc; +//! +//! let db = Arc::new(crate::chain_index::finalised_state::ZainoDB::spawn(cfg, source).await?); +//! db.wait_until_ready().await; +//! +//! let reader = db.to_reader(); +//! let tip = reader.db_height().await?; +//! ``` +//! +//! # Development: extending the finalised DB safely +//! +//! Common tasks and where they belong: +//! +//! - **Add a new query/index:** implement it in the latest DB version (e.g. `db::v1`), then expose it +//! via a capability extension trait in [`capability`], route it via [`reader`], and gate it via +//! `Capability` / `DbVersion::capability`. +//! +//! - **Add a new DB major version (v2):** +//! 1. Add `db::v2` module and `DbV2` implementation. +//! 2. Extend [`db::DbBackend`] with a `V2(DbV2)` variant and delegate trait impls. +//! 3. Append `"v2"` to [`db::VERSION_DIRS`] (no gaps; order matters for discovery). +//! 4. Extend `ZainoDB::spawn` config mapping to accept `cfg.db_version == 2`. +//! 5. Update [`capability::DbVersion::capability`] for `(2, 0)`. +//! 6. Add a migration step in [`migrations`] and register it in `MigrationManager::get_migration`. +//! +//! - **Change an on-disk encoding:** treat it as a schema change. Either implement a migration or +//! bump the DB major version and rebuild in shadow. +//! + +// TODO / FIX - REMOVE THIS ONCE CHAININDEX LANDS! +#![allow(dead_code)] + +pub(crate) mod capability; +pub(crate) mod db; +pub(crate) mod entry; +pub(crate) mod migrations; +pub(crate) mod reader; +pub(crate) mod router; + +use capability::*; +use db::{DbBackend, VERSION_DIRS}; +use migrations::MigrationManager; +use reader::*; +use router::Router; +use tracing::info; +use zebra_chain::parameters::NetworkKind; + +use crate::{ + chain_index::{source::BlockchainSourceError, types::GENESIS_HEIGHT}, + config::BlockCacheConfig, + error::FinalisedStateError, + BlockHash, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, StatusType, +}; + +use std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::{ + sync::watch, + time::{interval, MissedTickBehavior}, +}; + +use super::source::BlockchainSource; + +#[derive(Debug)] +/// Handle to the finalised on-disk chain index. +/// +/// `ZainoDB` is the owner-facing facade for the finalised portion of the ChainIndex: +/// - it opens or creates the appropriate on-disk database version, +/// - it coordinates migrations when `current_version < target_version`, +/// - and it exposes a small set of lifecycle, write, and core read methods. +/// +/// ## Concurrency model +/// Internally, `ZainoDB` holds an [`Arc`] to a [`Router`]. The router provides lock-free routing +/// between a primary database and (during major migrations) an optional shadow database. +/// +/// Query paths should not call `ZainoDB` methods directly. Instead, construct a [`DbReader`] using +/// [`ZainoDB::to_reader`] and perform all reads via that read-only API. This ensures capability- +/// correct routing (especially during migrations). +/// +/// ## Configuration +/// `ZainoDB` stores the [`BlockCacheConfig`] used to: +/// - determine network-specific on-disk paths, +/// - select a target database version (`cfg.db_version`), +/// - and compute per-block metadata (e.g., network selection for `BlockMetadata`). +pub(crate) struct ZainoDB { + // Capability router for the active database backend(s). + /// + /// - In steady state, all requests route to the primary backend. + /// - During a major migration, some or all capabilities may route to a shadow backend until + /// promotion completes. + db: Arc, + + /// Immutable configuration snapshot used for sync and metadata construction. + cfg: BlockCacheConfig, +} + +/// Lifecycle, migration control, and core read/write API for the finalised database. +/// +/// This `impl` intentionally stays small and policy heavy: +/// - version selection and migration orchestration lives in [`ZainoDB::spawn`], +/// - the storage engine details are encapsulated behind [`DbBackend`] and the capability traits, +/// - higher-level query routing is provided by [`DbReader`]. +impl ZainoDB { + // ***** DB control ***** + + /// Spawns a `ZainoDB` instance. + /// + /// This method: + /// 1. Detects the on-disk database version (if any) using [`ZainoDB::try_find_current_db_version`]. + /// 2. Selects a target schema version from `cfg.db_version`. + /// 3. Opens the existing database at the detected version, or creates a new database at the + /// target version. + /// 4. If an existing database is older than the target (`current_version < target_version`), + /// runs migrations using [`migrations::MigrationManager`]. + /// + /// ## Version selection rules + /// - `cfg.db_version == 0` targets `DbVersion { 0, 0, 0 }` (legacy layout). + /// - `cfg.db_version == 1` targets `DbVersion { 1, 0, 0 }` (current layout). + /// - Any other value returns an error. + /// + /// ## Migrations + /// Migrations are invoked only when a database already exists on disk and the opened database + /// reports a lower version than the configured target. + /// + /// Migrations may require access to chain data to rebuild indices. For that reason, a + /// [`BlockchainSource`] is provided here and passed into the migration manager. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - the configured target version is unsupported, + /// - the on-disk database version is unsupported, + /// - opening or creating the database fails, + /// - or any migration step fails. + pub(crate) async fn spawn( + cfg: BlockCacheConfig, + source: T, + ) -> Result + where + T: BlockchainSource, + { + let version_opt = Self::try_find_current_db_version(&cfg).await; + + let target_version = match cfg.db_version { + 0 => DbVersion { + major: 0, + minor: 0, + patch: 0, + }, + 1 => DbVersion { + major: 1, + minor: 0, + patch: 0, + }, + x => { + return Err(FinalisedStateError::Custom(format!( + "unsupported database version: DbV{x}" + ))); + } + }; + + let backend = match version_opt { + Some(version) => { + info!("Opening ZainoDBv{} from file.", version); + match version { + 0 => DbBackend::spawn_v0(&cfg).await?, + 1 => DbBackend::spawn_v1(&cfg).await?, + _ => { + return Err(FinalisedStateError::Custom(format!( + "unsupported database version: DbV{version}" + ))); + } + } + } + None => { + info!("Creating new ZainoDBv{}.", target_version); + match target_version.major() { + 0 => DbBackend::spawn_v0(&cfg).await?, + 1 => DbBackend::spawn_v1(&cfg).await?, + _ => { + return Err(FinalisedStateError::Custom(format!( + "unsupported database version: DbV{target_version}" + ))); + } + } + } + }; + let current_version = backend.get_metadata().await?.version(); + + let router = Arc::new(Router::new(Arc::new(backend))); + + if version_opt.is_some() && current_version < target_version { + info!( + "Starting ZainoDB migration manager, migratiing database from v{} to v{}.", + current_version, target_version + ); + let mut migration_manager = MigrationManager { + router: Arc::clone(&router), + cfg: cfg.clone(), + current_version, + target_version, + source, + }; + migration_manager.migrate().await?; + } + + Ok(Self { db: router, cfg }) + } + + /// Gracefully shuts down the running database backend(s). + /// + /// This delegates to the router, which shuts down: + /// - the primary backend, and + /// - any shadow backend currently present (during migrations). + /// + /// After this call returns `Ok(())`, database files may still remain on disk; shutdown does not + /// delete data. (Deletion of old versions is handled by migrations when applicable.) + pub(crate) async fn shutdown(&self) -> Result<(), FinalisedStateError> { + self.db.shutdown().await + } + + /// Returns the runtime status of the serving database. + /// + /// This status is provided by the backend implementing [`capability::DbCore::status`]. During + /// migrations, the router determines which backend serves `READ_CORE`, and the status reflects + /// that routing decision. + pub(crate) fn status(&self) -> StatusType { + self.db.status() + } + + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This polls the router at a fixed interval (100ms) using a Tokio timer. The polling loop uses + /// `MissedTickBehavior::Delay` to avoid catch-up bursts under load or when the runtime is + /// stalled. + /// + /// Call this after [`ZainoDB::spawn`] if downstream services require the database to be fully + /// initialised before handling requests. + pub(crate) async fn wait_until_ready(&self) { + let mut ticker = interval(Duration::from_millis(100)); + ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); + loop { + ticker.tick().await; + if self.db.status() == StatusType::Ready { + break; + } + } + } + + /// Creates a read-only view onto the running database. + /// + /// All chain fetches should be performed through [`DbReader`] rather than calling read methods + /// directly on `ZainoDB`. + pub(crate) fn to_reader(self: &Arc) -> DbReader { + DbReader { + inner: Arc::clone(self), + } + } + + /// Attempts to detect the current on-disk database version from the filesystem layout. + /// + /// The detection is intentionally conservative: it returns the **oldest** detected version, + /// because the process may have been terminated mid-migration, leaving both an older primary + /// and a newer shadow directory on disk. + /// + /// ## Recognised layouts + /// + /// - **Legacy v0 layout** + /// - Network directories: `live/`, `test/`, `local/` + /// - Presence check: both `data.mdb` and `lock.mdb` exist + /// - Reported version: `Some(0)` + /// + /// - **Versioned v1+ layout** + /// - Network directories: `mainnet/`, `testnet/`, `regtest/` + /// - Version subdirectories: enumerated by [`db::VERSION_DIRS`] (e.g. `"v1"`) + /// - Presence check: both `data.mdb` and `lock.mdb` exist within a version directory + /// - Reported version: `Some(i + 1)` where `i` is the index in `VERSION_DIRS` + /// + /// Returns: + /// - `Some(version)` if a compatible database directory is found, + /// - `None` if no database is detected (fresh DB creation case). + async fn try_find_current_db_version(cfg: &BlockCacheConfig) -> Option { + let legacy_dir = match cfg.network.to_zebra_network().kind() { + NetworkKind::Mainnet => "live", + NetworkKind::Testnet => "test", + NetworkKind::Regtest => "local", + }; + let legacy_path = cfg.storage.database.path.join(legacy_dir); + if legacy_path.join("data.mdb").exists() && legacy_path.join("lock.mdb").exists() { + return Some(0); + } + + let net_dir = match cfg.network.to_zebra_network().kind() { + NetworkKind::Mainnet => "mainnet", + NetworkKind::Testnet => "testnet", + NetworkKind::Regtest => "regtest", + }; + let net_path = cfg.storage.database.path.join(net_dir); + if net_path.exists() && net_path.is_dir() { + for (i, version_dir) in VERSION_DIRS.iter().enumerate() { + let db_path = net_path.join(version_dir); + let data_file = db_path.join("data.mdb"); + let lock_file = db_path.join("lock.mdb"); + if data_file.exists() && lock_file.exists() { + let version = (i + 1) as u32; + return Some(version); + } + } + } + + None + } + + /// Returns the database backend that should serve the requested capability. + /// + /// This is used by [`DbReader`] to route calls to the correct database during major migrations. + /// The router may return either the primary or shadow backend depending on the current routing + /// masks. + /// + /// ## Errors + /// Returns [`FinalisedStateError::FeatureUnavailable`] if neither backend currently serves the + /// requested capability. + #[inline] + pub(crate) fn backend_for_cap( + &self, + cap: CapabilityRequest, + ) -> Result, FinalisedStateError> { + self.db.backend(cap) + } + + // ***** Db Core Write ***** + + /// Sync the database up to and including `height` using a [`BlockchainSource`]. + /// + /// This method is a convenience ingestion loop that: + /// - determines the current database tip height, + /// - fetches each missing block from the source, + /// - fetches Sapling and Orchard commitment tree roots for each block, + /// - constructs [`BlockMetadata`] and an [`IndexedBlock`], + /// - and appends the block via [`ZainoDB::write_block`]. + /// + /// ## Chainwork handling + /// For database versions that expose [`capability::BlockCoreExt`], chainwork is retrieved from + /// stored header data and threaded through `BlockMetadata`. + /// + /// Legacy v0 databases do not expose header/chainwork APIs; in that case, chainwork is set to + /// zero. This is safe only insofar as v0 consumers do not rely on chainwork-dependent features. + /// + /// ## Invariants + /// - Blocks are written strictly in height order. + /// - This method assumes the source provides consistent block and commitment tree data. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - a block is missing from the source at a required height, + /// - commitment tree roots are missing for Sapling or Orchard, + /// - constructing an [`IndexedBlock`] fails, + /// - or any underlying database write fails. + pub(crate) async fn sync_to_height( + &self, + height: Height, + source: &T, + ) -> Result<(), FinalisedStateError> + where + T: BlockchainSource, + { + let network = self.cfg.network; + let db_height_opt = self.db_height().await?; + let mut db_height = db_height_opt.unwrap_or(GENESIS_HEIGHT); + + let zebra_network = network.to_zebra_network(); + let sapling_activation_height = zebra_chain::parameters::NetworkUpgrade::Sapling + .activation_height(&zebra_network) + .expect("Sapling activation height must be set"); + let nu5_activation_height = zebra_chain::parameters::NetworkUpgrade::Nu5 + .activation_height(&zebra_network) + .expect("NU5 activation height must be set"); + + let mut parent_chainwork = if db_height_opt.is_none() { + ChainWork::from_u256(0.into()) + } else { + db_height.0 += 1; + match self + .db + .backend(CapabilityRequest::BlockCoreExt)? + .get_block_header(height) + .await + { + Ok(header) => *header.index().chainwork(), + // V0 does not hold or use chainwork, and does not serve header data, + // can we handle this better? + // + // can we get this data from zebra blocks? + Err(_) => ChainWork::from_u256(0.into()), + } + }; + + // Track last time we emitted an info log so we only print every 10s. + let current_height = Arc::new(AtomicU64::new(db_height.0 as u64)); + let target_height = height.0 as u64; + + // Shutdown signal for the reporter task. + let (shutdown_tx, shutdown_rx) = watch::channel(()); + // Spawn reporter task that logs every 10 seconds, even while write_block() is running. + let reporter_current = Arc::clone(¤t_height); + let reporter_network = network; + let mut reporter_shutdown = shutdown_rx.clone(); + let reporter_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(10)); + loop { + tokio::select! { + _ = interval.tick() => { + let cur = reporter_current.load(Ordering::Relaxed); + tracing::info!( + "sync_to_height: syncing height {current} / {target} on network = {:?}", + reporter_network, + current = cur, + target = target_height + ); + } + // stop when we receive a shutdown signal + _ = reporter_shutdown.changed() => { + break; + } + } + } + }); + + // Run the main sync logic inside an inner async block so we always get + // a chance to shutdown the reporter task regardless of how this block exits. + let result: Result<(), FinalisedStateError> = (async { + for height_int in (db_height.0)..=height.0 { + // Update the shared progress value as soon as we start processing this height. + current_height.store(height_int as u64, Ordering::Relaxed); + + let block = match source + .get_block(zebra_state::HashOrHeight::Height( + zebra_chain::block::Height(height_int), + )) + .await? + { + Some(block) => block, + None => { + return Err(FinalisedStateError::BlockchainSourceError( + BlockchainSourceError::Unrecoverable(format!( + "error fetching block at height {} from validator", + height.0 + )), + )); + } + }; + + let block_hash = BlockHash::from(block.hash().0); + + // Fetch sapling / orchard commitment tree data if above relevant network upgrade. + let (sapling_opt, orchard_opt) = + source.get_commitment_tree_roots(block_hash).await?; + let is_sapling_active = height_int >= sapling_activation_height.0; + let is_orchard_active = height_int >= nu5_activation_height.0; + let (sapling_root, sapling_size) = if is_sapling_active { + sapling_opt.ok_or_else(|| { + FinalisedStateError::BlockchainSourceError( + BlockchainSourceError::Unrecoverable(format!( + "missing Sapling commitment tree root for block {block_hash}" + )), + ) + })? + } else { + (zebra_chain::sapling::tree::Root::default(), 0) + }; + + let (orchard_root, orchard_size) = if is_orchard_active { + orchard_opt.ok_or_else(|| { + FinalisedStateError::BlockchainSourceError( + BlockchainSourceError::Unrecoverable(format!( + "missing Orchard commitment tree root for block {block_hash}" + )), + ) + })? + } else { + (zebra_chain::orchard::tree::Root::default(), 0) + }; + + let metadata = BlockMetadata::new( + sapling_root, + sapling_size as u32, + orchard_root, + orchard_size as u32, + parent_chainwork, + network.to_zebra_network(), + ); + + let block_with_metadata = BlockWithMetadata::new(block.as_ref(), metadata); + let chain_block = match IndexedBlock::try_from(block_with_metadata) { + Ok(block) => block, + Err(_) => { + return Err(FinalisedStateError::BlockchainSourceError( + BlockchainSourceError::Unrecoverable(format!( + "error building block data at height {}", + height.0 + )), + )); + } + }; + parent_chainwork = *chain_block.index().chainwork(); + + self.write_block(chain_block).await?; + } + + Ok(()) + }) + .await; + + // Signal the reporter to shut down and wait for it to finish. + // Ignore send error if receiver already dropped. + let _ = shutdown_tx.send(()); + // Await the reporter to ensure clean shutdown; ignore errors if it panicked/was aborted. + let _ = reporter_handle.await; + + result + } + + /// Appends a single fully constructed [`IndexedBlock`] to the database. + /// + /// This **must** be the next block after the current database tip (`db_tip_height + 1`). + /// Database implementations may assume append-only semantics to maintain secondary index + /// consistency. + /// + /// For reorg handling, callers should delete tip blocks using [`ZainoDB::delete_block_at_height`] + /// or [`ZainoDB::delete_block`] before re-appending. + pub(crate) async fn write_block(&self, b: IndexedBlock) -> Result<(), FinalisedStateError> { + self.db.write_block(b).await + } + + /// Deletes the block at height `h` from the database. + /// + /// This **must** be the current database tip. Deleting non-tip blocks is not supported because + /// it would require re-writing dependent indices for all higher blocks. + /// + /// This method delegates to the backend’s `delete_block_at_height` implementation. If that + /// deletion cannot be completed correctly (for example, if the backend cannot reconstruct all + /// derived index entries needed for deletion), callers must fall back to [`ZainoDB::delete_block`] + /// using an [`IndexedBlock`] fetched from the validator/source to ensure a complete wipe. + pub(crate) async fn delete_block_at_height( + &self, + h: Height, + ) -> Result<(), FinalisedStateError> { + self.db.delete_block_at_height(h).await + } + + /// Deletes the provided block from the database. + /// + /// This **must** be the current database tip. The provided [`IndexedBlock`] is used to ensure + /// all derived indices created by that block can be removed deterministically. + /// + /// Prefer [`ZainoDB::delete_block_at_height`] when possible; use this method when the backend + /// requires full block contents to correctly reverse all indices. + pub(crate) async fn delete_block(&self, b: &IndexedBlock) -> Result<(), FinalisedStateError> { + self.db.delete_block(b).await + } + + // ***** DB Core Read ***** + + /// Returns the highest block height stored in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if at least one block is present, + /// - `Ok(None)` if the database is empty. + pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { + self.db.db_height().await + } + + /// Returns the main-chain height for `hash` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if the hash is indexed, + /// - `Ok(None)` if the hash is not present (not an error). + pub(crate) async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError> { + self.db.get_block_height(hash).await + } + + /// Returns the main-chain block hash for `height` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(hash))` if the height is indexed, + /// - `Ok(None)` if the height is not present (not an error). + pub(crate) async fn get_block_hash( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + self.db.get_block_hash(height).await + } + + /// Returns the persisted database metadata. + /// + /// See [`capability::DbMetadata`] for the precise fields and on-disk encoding. + pub(crate) async fn get_metadata(&self) -> Result { + self.db.get_metadata().await + } + + /// Returns the internal router (test-only). + /// + /// This is intended for unit/integration tests that need to observe or manipulate routing state + /// during migrations. Production code should not depend on the router directly. + #[cfg(test)] + pub(crate) fn router(&self) -> &Router { + &self.db + } +} diff --git a/zaino-state/src/chain_index/finalised_state/CHANGELOG.md b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md new file mode 100644 index 000000000..93cd3fdbe --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md @@ -0,0 +1,150 @@ +Zaino Finalised-State Database Changelog +======================================= + +Format +------ +One entry per database version bump (major / minor / patch). Keep entries concise and factual. + +Entry template: + +-------------------------------------------------------------------------------- +DB VERSION vX.Y.Z (from vA.B.C) +Date: YYYY-MM-DD +-------------------------------------------------------------------------------- + +Summary +- <1–3 bullets describing intent of the change> + +On-disk schema +- Layout: + - +- Tables: + - Added: <...> + - Removed: <...> + - Renamed: new> +- Encoding: + - Keys: + - Values: + - Checksums / validation: +- Invariants: + - + +API / capabilities +- Capability changes: + - Added: <...> + - Removed: <...> + - Changed: <...> +- Public surface changes: + - Added: + - Removed: + - Changed: + +Migration +- Strategy: +- Backfill: +- Completion criteria: +- Failure handling: + +Bug Fixes / Optimisations + +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (from v0.0.0) +Date: 2025-08-13 +-------------------------------------------------------------------------------- + +Summary +- Replace legacy v0 schema with versioned v1 schema and expanded indices / query surface. +- Introduce stronger integrity checks and on-demand validation for v1 read paths. +- Keep compact block retrieval available (compatibility surface). + +On-disk schema +- Layout: + - Move to per-network version directory layout: //v1/ + - VERSION_DIRS begins at ["v1"] (new versions append, no gaps). +- Tables: + - Added (v1): headers, txids, transparent, sapling, orchard, commitment_tree_data, heights (hash->height), + plus v1 indices for tx locations, spent outpoints, and transparent address history. + - Removed / superseded (v0): legacy compact-block-streamer oriented storage layout. +- Encoding: + - v1 values are stored as checksum-protected `StoredEntryVar` / `StoredEntryFixed` entries. + - Canonical key bytes are used for checksum verification via `verify(key)`. +- Invariants (v1 validation enforces): + - Per-table checksum verification for all per-block tables. + - Chain continuity: header parent hash at height h matches stored hash at h-1. + - Merkle consistency: header merkle root matches computed root from stored txid list. + - Index consistency: + - hash->height mapping must match the queried height. + - spent + addr history records must exist and match for transparent inputs/outputs. + +API / capabilities +- Capability changes: + - v0: READ_CORE | WRITE_CORE | COMPACT_BLOCK_EXT + - v1: Capability::LATEST (block core/transparent/shielded, indexed block, transparent history, etc.) +- Public surface changes: + - Added (v1-only; FeatureUnavailable on v0): + - BlockCoreExt: header/txids/range fetch, txid<->location lookup + - BlockTransparentExt: per-tx and per-block transparent access + ranges + - BlockShieldedExt: sapling/orchard per-tx and per-block access + ranges, commitment tree data (+ ranges) + - IndexedBlockExt: indexed block retrieval + - TransparentHistExt: addr records, range queries, balance/utxos, outpoint spender(s) + - Preserved: + - CompactBlockExt remains available for both v0 and v1. + +Migration +- Strategy: shadow build + promotion (no in-place transformation of v0). +- Backfill: rebuild all v1 tables/indices by ingesting chain data. +- Completion criteria: + - metadata indicates migrated/ready, and required tables exist through the tip. + - validation succeeds for the contiguous best chain range as built. +- Failure handling: + - do not promote partially built v1; continue using v0 if present; rebuild v1 on retry. + +Bug Fixes / Optimisations +- Complete DB rework +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (RC Bug Fixes) +-------------------------------------------------------------------------------- + +Summary +- Minor version bump to reflect updated compact block API contract (streaming + pool filtering semantics). +- No schema or encoding changes; metadata-only migration updates persisted DB version marker. + +On-disk schema +- Layout: + - No changes. +- Tables: + - Added: None. + - Removed: None. + - Renamed: None. +- Encoding: + - Keys: No changes. + - Values: No changes. + - Checksums / validation: No changes. +- Invariants: + - No changes. + +API / capabilities +- Capability changes: + - Added: None. + - Removed: None. + - Changed: + - COMPACT_BLOCK_EXT contract updated for v1 backends: + - get_compact_block(...) now takes a PoolTypeFilter, which selects which pool data is materialized into the returned compact block. + - get_compact_block_stream(...) added. + +- Public surface changes: + - Added: + - CompactBlockExt::get_compact_block_stream(start_height, end_height, pool_types: PoolTypeFilter). + - Removed: None. + - Changed: + - CompactBlockExt::get_compact_block(height, pool_types: PoolTypeFilter) signature updated. + - Compact block contents are now filtered by PoolTypeFilter, and may include transparent transaction data (vin/vout) when selected. + +Bug Fixes / Optimisations +- Added safety check for idempotent DB writes +- Updated 'fix_addr_hist_records_by_addr_and_index_blocking' to take and reuse an lmdb ro transaction, improving initial sync performance. + + +-------------------------------------------------------------------------------- +(append new entries below) +-------------------------------------------------------------------------------- diff --git a/zaino-state/src/chain_index/finalised_state/capability.rs b/zaino-state/src/chain_index/finalised_state/capability.rs new file mode 100644 index 000000000..703f1543f --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/capability.rs @@ -0,0 +1,1014 @@ +//! Capability model, versioned metadata, and DB trait surface +//! +//! This file defines the **capability- and version-aware interface** that all `ZainoDB` database +//! implementations must conform to. +//! +//! The core idea is: +//! - Each concrete DB major version (e.g. `DbV0`, `DbV1`) implements a common set of traits. +//! - A `Capability` bitmap declares which parts of that trait surface are actually supported. +//! - The router (`Router`) and reader (`DbReader`) use *single-feature* requests +//! (`CapabilityRequest`) to route a call to a backend that is guaranteed to support it. +//! +//! This design enables: +//! - running mixed-version configurations during major migrations (primary + shadow), +//! - serving old data while building new indices, +//! - and gating API features cleanly when a backend does not support an extension. +//! +//! # What’s in this file +//! +//! ## Capability / routing types +//! - [`Capability`]: bitflags describing what an *open* database instance can serve. +//! - [`CapabilityRequest`]: a single-feature request (non-composite) used for routing. +//! +//! ## Versioned metadata +//! - [`DbVersion`]: schema version triple (major/minor/patch) plus a mapping to supported capabilities. +//! - [`DbMetadata`]: persisted singleton stored under the fixed key `"metadata"` in the LMDB +//! metadata database; includes: +//! - `version: DbVersion` +//! - `schema_hash: [u8; 32]` (BLAKE2b-256 of schema definition/contract) +//! - `migration_status: MigrationStatus` +//! - [`MigrationStatus`]: persisted migration progress marker to support resuming after shutdown. +//! +//! All metadata types in this file implement `ZainoVersionedSerde` and therefore have explicit +//! on-disk encoding versions. +//! +//! ## Trait surface +//! This file defines: +//! +//! - **Core traits** implemented by every DB version: +//! - [`DbRead`], [`DbWrite`], and [`DbCore`] +//! +//! - **Extension traits** implemented by *some* versions: +//! - [`BlockCoreExt`], [`BlockTransparentExt`], [`BlockShieldedExt`] +//! - [`CompactBlockExt`] +//! - [`IndexedBlockExt`] +//! - [`TransparentHistExt`] +//! +//! Extension traits must be capability-gated: if a DB does not advertise the corresponding capability +//! bit, routing must not hand that backend out for that request. +//! +//! # Versioning strategy (practical guidance) +//! +//! - `DbVersion::major` is the primary compatibility boundary: +//! - v0 is a legacy compact-block streamer. +//! - v1 adds richer indices (chain block data + transparent history). +//! +//! - `minor`/`patch` can be used for additive or compatible changes, but only if on-disk encodings +//! remain readable and all invariants remain satisfied. +//! +//! - `DbVersion::capability()` must remain conservative: +//! - only advertise capabilities that are fully correct for that on-disk schema. +//! +//! # Development: adding or changing features safely +//! +//! When adding a new feature/query that requires new persistent data: +//! +//! 1. Add a new capability bit to [`Capability`]. +//! 2. Add a corresponding variant to [`CapabilityRequest`] and map it in: +//! - `as_capability()` +//! - `name()` +//! 3. Add a new extension trait (or extend an existing one) that expresses the required operations. +//! 4. Implement the extension trait for the latest DB version(s). +//! 5. Update `DbVersion::capability()` for the version(s) that support it. +//! 6. Route it through `DbReader` by requesting the new `CapabilityRequest`. +//! +//! When changing persisted metadata formats, bump the `ZainoVersionedSerde::VERSION` for that type +//! and provide a decoding path in `decode_latest()`. + +use core::fmt; + +use crate::{ + chain_index::types::TransactionHash, error::FinalisedStateError, read_fixed_le, read_u32_le, + read_u8, version, write_fixed_le, write_u32_le, write_u8, BlockHash, BlockHeaderData, + CommitmentTreeData, CompactBlockStream, FixedEncodedLen, Height, IndexedBlock, + OrchardCompactTx, OrchardTxList, SaplingCompactTx, SaplingTxList, StatusType, + TransparentCompactTx, TransparentTxList, TxLocation, TxidList, ZainoVersionedSerde, +}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::{chain_index::types::AddrEventBytes, AddrScript, Outpoint}; + +use async_trait::async_trait; +use bitflags::bitflags; +use core2::io::{self, Read, Write}; +use zaino_proto::proto::utils::PoolTypeFilter; + +// ***** Capability definition structs ***** + +bitflags! { + /// Capability bitmap describing what an **open** database instance can serve. + /// + /// A capability is an *implementation promise*: if a backend advertises a capability bit, then + /// the corresponding trait surface must be fully and correctly implemented for that backend’s + /// on-disk schema. + /// + /// ## How capabilities are used + /// - [`DbVersion::capability`] maps a persisted schema version to a conservative capability set. + /// - [`crate::chain_index::finalised_state::router::Router`] holds a primary and optional shadow + /// backend and uses masks to decide which backend may serve a given feature. + /// - [`crate::chain_index::finalised_state::reader::DbReader`] requests capabilities via + /// [`CapabilityRequest`] (single-feature requests) and therefore obtains a backend that is + /// guaranteed to support the requested operation. + /// + /// ## Extension trait mapping + /// Each bit corresponds 1-for-1 with a trait surface: + /// - `READ_CORE` / `WRITE_CORE` correspond to [`DbRead`] / [`DbWrite`] + /// - all other bits correspond to extension traits (e.g. [`BlockCoreExt`], [`TransparentHistExt`]) + #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] + pub(crate) struct Capability: u32 { + /* ------ core database functionality ------ */ + + /// Backend implements [`DbRead`]. + /// + /// This includes: + /// - tip height (`db_height`) + /// - hash↔height lookups + /// - reading the persisted metadata singleton. + const READ_CORE = 0b0000_0001; + + /// Backend implements [`DbWrite`]. + /// + /// This includes: + /// - appending tip blocks, + /// - deleting tip blocks, + /// - and updating the metadata singleton. + const WRITE_CORE = 0b0000_0010; + + /* ---------- database extensions ---------- */ + + /// Backend implements [`BlockCoreExt`] (header/txid and tx-index lookups). + const BLOCK_CORE_EXT = 0b0000_0100; + + /// Backend implements [`BlockTransparentExt`] (transparent per-block/per-tx data). + const BLOCK_TRANSPARENT_EXT = 0b0000_1000; + + /// Backend implements [`BlockShieldedExt`] (sapling/orchard per-block/per-tx data). + const BLOCK_SHIELDED_EXT = 0b0001_0000; + + /// Backend implements [`CompactBlockExt`] (CompactBlock materialization). + const COMPACT_BLOCK_EXT = 0b0010_0000; + + /// Backend implements [`IndexedBlockExt`] (full `IndexedBlock` materialization). + const CHAIN_BLOCK_EXT = 0b0100_0000; + + /// Backend implements [`TransparentHistExt`] (transparent address history indices). + #[cfg(feature = "transparent_address_history_experimental")] + const TRANSPARENT_HIST_EXT = 0b1000_0000; + } +} + +impl Capability { + /// Capability set supported by a **fresh** database at the latest major schema supported by this build. + /// + /// This value is used as the “expected modern baseline” for new DB instances. It must remain in + /// sync with: + /// - the latest on-disk schema (`DbV1` today, `DbV2` in the future), + /// - and [`DbVersion::capability`] for that schema. + pub(crate) const LATEST: Capability = { + let base = Capability::READ_CORE + .union(Capability::WRITE_CORE) + .union(Capability::BLOCK_CORE_EXT) + .union(Capability::BLOCK_TRANSPARENT_EXT) + .union(Capability::BLOCK_SHIELDED_EXT) + .union(Capability::COMPACT_BLOCK_EXT) + .union(Capability::CHAIN_BLOCK_EXT); + + #[cfg(feature = "transparent_address_history_experimental")] + { + base.union(Capability::TRANSPARENT_HIST_EXT) + } + #[cfg(not(feature = "transparent_address_history_experimental"))] + { + base + } + }; + + /// Returns `true` if `self` includes **all** bits from `other`. + /// + /// This is primarily used for feature gating and routing assertions. + #[inline] + pub(crate) const fn has(self, other: Capability) -> bool { + self.contains(other) + } +} + +/// A *single-feature* capability request used for routing. +/// +/// `CapabilityRequest` values are intentionally non-composite: each variant maps to exactly one +/// [`Capability`] bit. This keeps routing and error reporting unambiguous. +/// +/// The router uses the request to select a backend that advertises the requested capability. +/// If no backend advertises the capability, the call must fail with +/// [`FinalisedStateError::FeatureUnavailable`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub(crate) enum CapabilityRequest { + /// Request the [`DbRead`] core surface. + ReadCore, + + /// Request the [`DbWrite`] core surface. + WriteCore, + + /// Request the [`BlockCoreExt`] extension surface. + BlockCoreExt, + + /// Request the [`BlockTransparentExt`] extension surface. + BlockTransparentExt, + + /// Request the [`BlockShieldedExt`] extension surface. + BlockShieldedExt, + + /// Request the [`CompactBlockExt`] extension surface. + CompactBlockExt, + + /// Request the [`IndexedBlockExt`] extension surface. + IndexedBlockExt, + + /// Request the [`TransparentHistExt`] extension surface. + #[cfg(feature = "transparent_address_history_experimental")] + TransparentHistExt, +} + +impl CapabilityRequest { + /// Maps this request to the corresponding single-bit [`Capability`]. + /// + /// This mapping must remain 1-for-1 with: + /// - the definitions in [`Capability`], and + /// - the human-readable names returned by [`CapabilityRequest::name`]. + #[inline] + pub(crate) const fn as_capability(self) -> Capability { + match self { + CapabilityRequest::ReadCore => Capability::READ_CORE, + CapabilityRequest::WriteCore => Capability::WRITE_CORE, + CapabilityRequest::BlockCoreExt => Capability::BLOCK_CORE_EXT, + CapabilityRequest::BlockTransparentExt => Capability::BLOCK_TRANSPARENT_EXT, + CapabilityRequest::BlockShieldedExt => Capability::BLOCK_SHIELDED_EXT, + CapabilityRequest::CompactBlockExt => Capability::COMPACT_BLOCK_EXT, + CapabilityRequest::IndexedBlockExt => Capability::CHAIN_BLOCK_EXT, + #[cfg(feature = "transparent_address_history_experimental")] + CapabilityRequest::TransparentHistExt => Capability::TRANSPARENT_HIST_EXT, + } + } + + /// Returns a stable human-friendly feature name for errors and logs. + /// + /// This value is used in [`FinalisedStateError::FeatureUnavailable`] and must remain stable + /// across refactors to avoid confusing diagnostics. + #[inline] + pub(crate) const fn name(self) -> &'static str { + match self { + CapabilityRequest::ReadCore => "READ_CORE", + CapabilityRequest::WriteCore => "WRITE_CORE", + CapabilityRequest::BlockCoreExt => "BLOCK_CORE_EXT", + CapabilityRequest::BlockTransparentExt => "BLOCK_TRANSPARENT_EXT", + CapabilityRequest::BlockShieldedExt => "BLOCK_SHIELDED_EXT", + CapabilityRequest::CompactBlockExt => "COMPACT_BLOCK_EXT", + CapabilityRequest::IndexedBlockExt => "CHAIN_BLOCK_EXT", + #[cfg(feature = "transparent_address_history_experimental")] + CapabilityRequest::TransparentHistExt => "TRANSPARENT_HIST_EXT", + } + } +} + +/// Convenience conversion from a routing request to its single-bit capability. +impl From for Capability { + #[inline] + fn from(req: CapabilityRequest) -> Self { + req.as_capability() + } +} + +// ***** Database metadata structs ***** + +/// Persisted database metadata singleton. +/// +/// This record is stored under the fixed key `"metadata"` in the LMDB metadata database and is used to: +/// - identify the schema version currently on disk, +/// - bind the database to an explicit schema contract (`schema_hash`), +/// - and persist migration progress (`migration_status`) for crash-safe resumption. +/// +/// ## Encoding +/// `DbMetadata` implements [`ZainoVersionedSerde`]. The encoded body is: +/// - one versioned [`DbVersion`], +/// - a fixed 32-byte schema hash, +/// - one versioned [`MigrationStatus`]. +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub(crate) struct DbMetadata { + /// Schema version triple for the on-disk database. + pub(crate) version: DbVersion, + + /// BLAKE2b-256 hash of the schema definition/contract. + /// + /// This hash is intended to detect accidental schema drift (layout/type changes) across builds. + /// It is not a security boundary; it is a correctness and operator-safety signal. + pub(crate) schema_hash: [u8; 32], + + /// Persisted migration state, used to resume safely after shutdown/crash. + /// + /// Outside of migrations this should be [`MigrationStatus::Empty`]. + pub(crate) migration_status: MigrationStatus, +} + +impl DbMetadata { + /// Constructs a new metadata record. + /// + /// Callers should ensure `schema_hash` matches the schema contract for `version`, and that + /// `migration_status` is set conservatively (typically `Empty` unless actively migrating). + pub(crate) fn new( + version: DbVersion, + schema_hash: [u8; 32], + migration_status: MigrationStatus, + ) -> Self { + Self { + version, + schema_hash, + migration_status, + } + } + + /// Returns the persisted schema version. + pub(crate) fn version(&self) -> DbVersion { + self.version + } + + /// Returns the schema contract hash. + pub(crate) fn schema(&self) -> [u8; 32] { + self.schema_hash + } + + /// Returns the persisted migration status. + pub(crate) fn migration_status(&self) -> MigrationStatus { + self.migration_status + } +} + +/// Versioned on-disk encoding for the metadata singleton. +/// +/// Body layout (after the `ZainoVersionedSerde` tag byte): +/// 1. `DbVersion` (versioned, includes its own tag) +/// 2. `[u8; 32]` schema hash +/// 3. `MigrationStatus` (versioned, includes its own tag) +impl ZainoVersionedSerde for DbMetadata { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + self.version.serialize(&mut *w)?; + write_fixed_le::<32, _>(&mut *w, &self.schema_hash)?; + self.migration_status.serialize(&mut *w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let version = DbVersion::deserialize(&mut *r)?; + let schema_hash = read_fixed_le::<32, _>(&mut *r)?; + let migration_status = MigrationStatus::deserialize(&mut *r)?; + Ok(DbMetadata { + version, + schema_hash, + migration_status, + }) + } +} + +/// `DbMetadata` has a fixed encoded body length. +/// +/// Body length = `DbVersion::VERSIONED_LEN` (12 + 1) + 32-byte schema hash +/// + `MigrationStatus::VERSIONED_LEN` (1 + 1) = 47 bytes. +impl FixedEncodedLen for DbMetadata { + const ENCODED_LEN: usize = DbVersion::VERSIONED_LEN + 32 + MigrationStatus::VERSIONED_LEN; +} + +/// Human-readable summary for logs. +/// +/// The schema hash is abbreviated to the first 4 bytes for readability. +impl core::fmt::Display for DbMetadata { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "DbMetadata {{ version: {}.{}.{} , schema_hash: 0x", + self.version.major(), + self.version.minor(), + self.version.patch() + )?; + + for byte in &self.schema_hash[..4] { + write!(f, "{byte:02x}")?; + } + + write!(f, "… }}") + } +} + +/// Database schema version triple. +/// +/// The version is interpreted as `{major}.{minor}.{patch}` and is used to: +/// - select a database backend implementation, +/// - determine supported capabilities for routing, +/// - and enforce safe upgrades via migrations. +/// +/// ## Compatibility model +/// - `major` is the primary compatibility boundary (schema family). +/// - `minor` and `patch` may be used for compatible changes, but only if all persisted record +/// encodings remain readable and correctness invariants are preserved. +/// +/// The authoritative capability mapping is provided by [`DbVersion::capability`], and must remain +/// conservative: only advertise features that are correct for the given on-disk schema. +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub(crate) struct DbVersion { + /// Major version tag. + pub(crate) major: u32, + /// Minor version tag. + pub(crate) minor: u32, + /// Patch tag. + pub(crate) patch: u32, +} + +impl DbVersion { + /// Construct a new DbVersion. + pub(crate) fn new(major: u32, minor: u32, patch: u32) -> Self { + Self { + major, + minor, + patch, + } + } + + /// Returns the major version tag. + pub(crate) fn major(&self) -> u32 { + self.major + } + + /// Returns the minor version tag. + pub(crate) fn minor(&self) -> u32 { + self.minor + } + + /// Returns the patch tag. + pub(crate) fn patch(&self) -> u32 { + self.patch + } + + /// Returns the conservative capability set for this schema version. + /// + /// Routing relies on this mapping for safety: if a capability is not included here, callers + /// must not assume the corresponding trait surface is available. + /// + /// If a schema version is unknown to this build, this returns [`Capability::empty`], ensuring + /// the router will reject feature requests rather than serving incorrect data. + pub(crate) fn capability(&self) -> Capability { + match (self.major, self.minor) { + // V0: legacy compact block streamer. + (0, _) => { + Capability::READ_CORE | Capability::WRITE_CORE | Capability::COMPACT_BLOCK_EXT + } + + // V1: Adds chainblockv1 and transparent transaction history data. + (1, 0) => { + let base = Capability::READ_CORE + | Capability::WRITE_CORE + | Capability::BLOCK_CORE_EXT + | Capability::BLOCK_TRANSPARENT_EXT + | Capability::BLOCK_SHIELDED_EXT + | Capability::COMPACT_BLOCK_EXT + | Capability::CHAIN_BLOCK_EXT; + + #[cfg(feature = "transparent_address_history_experimental")] + { + base | Capability::TRANSPARENT_HIST_EXT + } + #[cfg(not(feature = "transparent_address_history_experimental"))] + { + base + } + } + + // Unknown / unsupported + _ => Capability::empty(), + } + } +} + +/// Versioned on-disk encoding for database versions. +/// +/// Body layout (after the tag byte): three little-endian `u32` values: +/// `major`, `minor`, `patch`. +impl ZainoVersionedSerde for DbVersion { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_u32_le(&mut *w, self.major)?; + write_u32_le(&mut *w, self.minor)?; + write_u32_le(&mut *w, self.patch) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let major = read_u32_le(&mut *r)?; + let minor = read_u32_le(&mut *r)?; + let patch = read_u32_le(&mut *r)?; + Ok(DbVersion { + major, + minor, + patch, + }) + } +} + +// DbVersion: body = 3*(4-byte u32) - 12 bytes +impl FixedEncodedLen for DbVersion { + const ENCODED_LEN: usize = 4 + 4 + 4; +} + +/// Formats as `{major}.{minor}.{patch}` for logs and diagnostics. +impl core::fmt::Display for DbVersion { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}.{}.{}", self.major, self.minor, self.patch) + } +} + +/// Persisted migration progress marker. +/// +/// This value exists to make migrations crash-resumable. A migration may: +/// - build a shadow database incrementally, +/// - optionally perform partial rebuild phases to limit disk amplification, +/// - and finally promote the shadow to primary. +/// +/// Database implementations and the migration manager must treat this value conservatively: +/// if the process is interrupted, the next startup should be able to determine the correct +/// resumption behavior from this status and the on-disk state. +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[derive(Default)] +pub(crate) enum MigrationStatus { + /// No migration is in progress. + #[default] + Empty, + + /// A partial build phase is currently in progress. + /// + /// Some migrations split work into phases to limit disk usage (for example, deleting the old + /// database before rebuilding the new one in full). + PartialBuidInProgress, + + /// The partial build phase completed successfully. + PartialBuildComplete, + + /// The final build phase is currently in progress. + FinalBuildInProgress, + + /// Migration work is complete and the database is ready for promotion/steady-state operation. + Complete, +} + +/// Human-readable migration status for logs and diagnostics. +impl fmt::Display for MigrationStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let status_str = match self { + MigrationStatus::Empty => "Empty", + MigrationStatus::PartialBuidInProgress => "Partial build in progress", + MigrationStatus::PartialBuildComplete => "Partial build complete", + MigrationStatus::FinalBuildInProgress => "Final build in progress", + MigrationStatus::Complete => "Complete", + }; + write!(f, "{status_str}") + } +} + +/// Versioned on-disk encoding for migration status. +/// +/// Body layout (after the tag byte): one `u8` discriminator. +/// Unknown tags must fail decoding. +impl ZainoVersionedSerde for MigrationStatus { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let tag = match self { + MigrationStatus::Empty => 0, + MigrationStatus::PartialBuidInProgress => 1, + MigrationStatus::PartialBuildComplete => 2, + MigrationStatus::FinalBuildInProgress => 3, + MigrationStatus::Complete => 4, + }; + write_u8(w, tag) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + match read_u8(r)? { + 0 => Ok(MigrationStatus::Empty), + 1 => Ok(MigrationStatus::PartialBuidInProgress), + 2 => Ok(MigrationStatus::PartialBuildComplete), + 3 => Ok(MigrationStatus::FinalBuildInProgress), + 4 => Ok(MigrationStatus::Complete), + other => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid MigrationStatus tag: {other}"), + )), + } + } +} + +/// `MigrationStatus` has a fixed 1-byte encoded body (discriminator). +impl FixedEncodedLen for MigrationStatus { + const ENCODED_LEN: usize = 1; +} + +// ***** Core Database functionality ***** + +/// Core read-only operations that *every* database schema version must support. +/// +/// These operations form the minimum required surface for: +/// - determining the chain tip stored on disk, +/// - mapping hashes to heights and vice versa, +/// - and reading the persisted schema metadata. +/// +/// All methods must be consistent with the database’s *finalised* chain view. +#[async_trait] +pub trait DbRead: Send + Sync { + /// Returns the highest block height stored, or `None` if the database is empty. + /// + /// Implementations must treat the stored height as the authoritative tip for all other core + /// lookups. + async fn db_height(&self) -> Result, FinalisedStateError>; + + /// Returns the height for `hash` if present. + /// + /// Returns: + /// - `Ok(Some(height))` if indexed, + /// - `Ok(None)` if not present (not an error). + async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError>; + + /// Returns the hash for `height` if present. + /// + /// Returns: + /// - `Ok(Some(hash))` if indexed, + /// - `Ok(None)` if not present (not an error). + async fn get_block_hash( + &self, + height: Height, + ) -> Result, FinalisedStateError>; + + /// Returns the persisted metadata singleton. + /// + /// This must reflect the schema actually used by the backend instance. + async fn get_metadata(&self) -> Result; +} + +/// Core write operations that *every* database schema version must support. +/// +/// The finalised database is updated using *stack semantics*: +/// - blocks are appended at the tip (`write_block`), +/// - and removed only from the tip (`delete_block_at_height` / `delete_block`). +/// +/// Implementations must keep all secondary indices internally consistent with these operations. +#[async_trait] +pub trait DbWrite: Send + Sync { + /// Appends a fully-validated block to the database. + /// + /// Invariant: `block` must be the next height after the current tip (no gaps, no rewrites). + async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError>; + + /// Deletes the tip block identified by `height` from every finalised table. + /// + /// Invariant: `height` must be the current database tip height. + async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError>; + + /// Deletes the provided tip block from every finalised table. + /// + /// This is the “full-information” deletion path: it takes an [`IndexedBlock`] so the backend + /// can deterministically remove all derived index entries even if reconstructing them from + /// height alone is not possible. + /// + /// Invariant: `block` must be the current database tip block. + async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError>; + + /// Replaces the persisted metadata singleton with `metadata`. + /// + /// Implementations must ensure this update is atomic with respect to readers (within the + /// backend’s concurrency model). + async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError>; +} + +/// Core runtime surface implemented by every backend instance. +/// +/// This trait binds together: +/// - the core read/write operations, and +/// - lifecycle and status reporting for background tasks. +/// +/// In practice, [`crate::chain_index::finalised_state::router::Router`] implements this by +/// delegating to the currently routed core backend(s). +#[async_trait] +pub trait DbCore: DbRead + DbWrite + Send + Sync { + /// Returns the current runtime status (`Starting`, `Syncing`, `Ready`, …). + fn status(&self) -> StatusType; + + /// Initiates a graceful shutdown of background tasks and closes database resources. + async fn shutdown(&self) -> Result<(), FinalisedStateError>; +} + +// ***** Database Extension traits ***** + +/// Core block indexing extension. +/// +/// This extension covers header and txid range fetches plus transaction indexing by [`TxLocation`]. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise [`Capability::BLOCK_CORE_EXT`]. +#[async_trait] +pub trait BlockCoreExt: Send + Sync { + /// Return block header data by height. + async fn get_block_header( + &self, + height: Height, + ) -> Result; + + /// Returns block headers for the inclusive range `[start, end]`. + /// + /// Callers should ensure `start <= end`. + async fn get_block_range_headers( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; + + /// Return block txids by height. + async fn get_block_txids(&self, height: Height) -> Result; + + /// Return block txids for the given height range. + /// + /// Callers should ensure `start <= end`. + async fn get_block_range_txids( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; + + /// Returns the transaction hash for the given [`TxLocation`]. + /// + /// `TxLocation` is the internal transaction index key used by the database. + async fn get_txid( + &self, + tx_location: TxLocation, + ) -> Result; + + /// Returns the [`TxLocation`] for `txid` if the transaction is indexed. + /// + /// Returns: + /// - `Ok(Some(location))` if indexed, + /// - `Ok(None)` if not present (not an error). + /// + /// NOTE: transaction data is indexed by TxLocation internally. + async fn get_tx_location( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError>; +} + +/// Transparent transaction indexing extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_TRANSPARENT_EXT`]. +#[async_trait] +pub trait BlockTransparentExt: Send + Sync { + /// Returns the serialized [`TransparentCompactTx`] for `tx_location`, if present. + /// + /// Returns: + /// - `Ok(Some(tx))` if present, + /// - `Ok(None)` if not present (not an error). + async fn get_transparent( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError>; + + /// Fetch block transparent transaction data for given block height. + async fn get_block_transparent( + &self, + height: Height, + ) -> Result; + + /// Returns transparent transaction tx data for the inclusive block height range `[start, end]`. + async fn get_block_range_transparent( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; +} + +/// Shielded transaction indexing extension (Sapling + Orchard + commitment tree data). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_SHIELDED_EXT`]. +#[async_trait] +pub trait BlockShieldedExt: Send + Sync { + /// Fetch the serialized SaplingCompactTx for the given TxLocation, if present. + async fn get_sapling( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError>; + + /// Fetch block sapling transaction data by height. + async fn get_block_sapling(&self, height: Height) + -> Result; + + /// Fetches block sapling tx data for the given (inclusive) height range. + async fn get_block_range_sapling( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; + + /// Fetch the serialized OrchardCompactTx for the given TxLocation, if present. + async fn get_orchard( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError>; + + /// Fetch block orchard transaction data by height. + async fn get_block_orchard(&self, height: Height) + -> Result; + + /// Fetches block orchard tx data for the given (inclusive) height range. + async fn get_block_range_orchard( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; + + /// Fetch block commitment tree data by height. + async fn get_block_commitment_tree_data( + &self, + height: Height, + ) -> Result; + + /// Fetches block commitment tree data for the given (inclusive) height range. + async fn get_block_range_commitment_tree_data( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError>; +} + +/// CompactBlock materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::COMPACT_BLOCK_EXT`]. +#[async_trait] +pub trait CompactBlockExt: Send + Sync { + /// Returns the CompactBlock for the given Height. + async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result; + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result; +} + +/// `IndexedBlock` materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::CHAIN_BLOCK_EXT`]. +#[async_trait] +pub trait IndexedBlockExt: Send + Sync { + /// Returns the [`IndexedBlock`] for `height`, if present. + /// + /// Returns: + /// - `Ok(Some(block))` if present, + /// - `Ok(None)` if not present (not an error). + /// + /// TODO: Add separate range fetch method as this method is slow for fetching large ranges! + async fn get_chain_block( + &self, + height: Height, + ) -> Result, FinalisedStateError>; +} + +/// Transparent address history indexing extension. +/// +/// This extension provides address-scoped queries backed by persisted indices built from the +/// transparent transaction graph (outputs, spends, and derived address events). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::TRANSPARENT_HIST_EXT`]. +/// +/// Range semantics: +/// - Methods that accept `start_height` and `end_height` interpret the range as inclusive: +/// `[start_height, end_height]` +#[cfg(feature = "transparent_address_history_experimental")] +#[async_trait] +pub trait TransparentHistExt: Send + Sync { + /// Fetch all address history records for a given transparent address. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more valid records exist, + /// - `Ok(None)` if no records exist (not an error), + /// - `Err(...)` if any decoding or DB error occurs. + async fn addr_records( + &self, + addr_script: AddrScript, + ) -> Result>, FinalisedStateError>; + + /// Fetch all address history records for a given address and TxLocation. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more matching records are found at that index, + /// - `Ok(None)` if no matching records exist (not an error), + /// - `Err(...)` on decode or DB failure. + async fn addr_and_index_records( + &self, + addr_script: AddrScript, + tx_location: TxLocation, + ) -> Result>, FinalisedStateError>; + + /// Fetch all distinct `TxLocation` values for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more matching records are found, + /// - `Ok(None)` if no matches found (not an error), + /// - `Err(...)` on decode or DB failure. + async fn addr_tx_locations_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError>; + + /// Fetch all UTXOs (unspent mined outputs) for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Each entry is `(TxLocation, vout, value)`. + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more UTXOs are found, + /// - `Ok(None)` if none found (not an error), + /// - `Err(...)` on decode or DB failure. + async fn addr_utxos_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError>; + + /// Computes the transparent balance change for `addr_script` over the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Includes: + /// - `+value` for mined outputs + /// - `−value` for spent inputs + /// + /// Returns the signed net value as `i64`, or error on failure. + async fn addr_balance_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result; + + // TODO: Add addr_deltas_by_range method! + + /// Fetch the `TxLocation` that spent a given outpoint, if any. + /// + /// Returns: + /// - `Ok(Some(TxLocation))` if the outpoint is spent. + /// - `Ok(None)` if no entry exists (not spent or not known). + /// - `Err(...)` on deserialization or DB error. + async fn get_outpoint_spender( + &self, + outpoint: Outpoint, + ) -> Result, FinalisedStateError>; + + /// Fetch the `TxLocation` entries for a batch of outpoints. + /// + /// For each input: + /// - Returns `Some(TxLocation)` if spent, + /// - `None` if not found, + /// - or returns `Err` immediately if any DB or decode error occurs. + async fn get_outpoint_spenders( + &self, + outpoints: Vec, + ) -> Result>, FinalisedStateError>; +} diff --git a/zaino-state/src/chain_index/finalised_state/db.rs b/zaino-state/src/chain_index/finalised_state/db.rs new file mode 100644 index 000000000..7a6bdd47b --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db.rs @@ -0,0 +1,628 @@ +//! Versioned database backends (DbBackend) and major-version dispatch +//! +//! This file defines the major-version split for the on-disk finalised database and provides +//! [`DbBackend`], a version-erased enum used throughout the finalised-state subsystem. +//! +//! Concrete database implementations live in: +//! - [`v0`]: legacy schema (compact-block streamer) +//! - [`v1`]: current schema (expanded indices and query surface) +//! +//! `DbBackend` delegates the core DB traits (`DbCore`, `DbRead`, `DbWrite`) and all extension traits +//! to the appropriate concrete implementation. +//! +//! # Capability model integration +//! +//! Each `DbBackend` instance declares its supported [`Capability`] set via `DbBackend::capability()`. +//! This must remain consistent with: +//! - [`capability::DbVersion::capability()`] (schema version → capability mapping), and +//! - the extension trait impls in this file (unsupported methods must return `FeatureUnavailable`). +//! +//! In particular: +//! - v0 supports READ/WRITE core + `CompactBlockExt`. +//! - v1 supports the full current capability set (`Capability::LATEST`), including: +//! - block header/txid/location indexing, +//! - transparent + shielded compact tx access, +//! - indexed block retrieval, +//! - transparent address history indices. +//! +//! # On-disk directory layout (v1+) +//! +//! [`VERSION_DIRS`] enumerates the version subdirectory names used for versioned layouts under the +//! per-network directory (`mainnet/`, `testnet/`, `regtest/`). +//! +//! **Important:** new versions must be appended to `VERSION_DIRS` in order, with no gaps, because +//! discovery code assumes index+1 corresponds to the version number. +//! +//! # Adding a new major version (v2) — checklist +//! +//! 1. Create `db::v2` and implement `DbV2::spawn(cfg)`. +//! 2. Add `V2(DbV2)` variant to [`DbBackend`]. +//! 3. Add `spawn_v2` constructor. +//! 4. Append `"v2"` to [`VERSION_DIRS`]. +//! 5. Extend all trait delegation `match` arms in this file. +//! 6. Update `DbBackend::capability()` and `DbVersion::capability()` for the new version. +//! 7. Add a migration step in `migrations.rs` and register it with `MigrationManager`. +//! +//! # Development: adding new indices/queries +//! +//! Prefer implementing new indices in the latest DB version first (e.g. `v1`) and exposing them via: +//! - a capability bit + extension trait in `capability.rs`, +//! - routing via `DbReader` and `Router`, +//! - and a migration/rebuild plan if the index requires historical backfill. +//! +//! Keep unsupported methods explicit: if a DB version does not provide a feature, return +//! `FinalisedStateError::FeatureUnavailable(...)` rather than silently degrading semantics. + +pub(crate) mod v0; +pub(crate) mod v1; + +use v0::DbV0; +use v1::DbV1; +use zaino_proto::proto::utils::PoolTypeFilter; + +use crate::{ + chain_index::{ + finalised_state::capability::{ + BlockCoreExt, BlockShieldedExt, BlockTransparentExt, CompactBlockExt, DbCore, + DbMetadata, DbRead, DbWrite, IndexedBlockExt, + }, + types::TransactionHash, + }, + config::BlockCacheConfig, + error::FinalisedStateError, + BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, IndexedBlock, + OrchardCompactTx, OrchardTxList, SaplingCompactTx, SaplingTxList, StatusType, + TransparentCompactTx, TransparentTxList, TxLocation, TxidList, +}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::{chain_index::finalised_state::capability::TransparentHistExt, AddrScript, Outpoint}; + +use async_trait::async_trait; +use std::time::Duration; +use tokio::time::{interval, MissedTickBehavior}; + +use super::capability::Capability; + +/// Version subdirectory names for versioned on-disk layouts. +/// +/// This list defines the supported major-version directory names under a per-network directory. +/// For example, a v1 database is stored under `/v1/`. +/// +/// Invariants: +/// - New versions must be appended to this list in order. +/// - There must be no missing versions between entries. +/// - Discovery code assumes `VERSION_DIRS[index]` corresponds to major version `index + 1`. +pub(super) const VERSION_DIRS: [&str; 1] = ["v1"]; + +#[derive(Debug)] +/// All concrete database implementations. +/// Version-erased database backend. +/// +/// This enum is the central dispatch point for the finalised-state database: +/// - It is constructed by spawning a concrete backend (for example, v0 or v1). +/// - It implements the core database traits (`DbCore`, `DbRead`, `DbWrite`). +/// - It implements capability extension traits by delegating to the concrete implementation, or by +/// returning [`FinalisedStateError::FeatureUnavailable`] when unsupported. +/// +/// Capability reporting is provided by [`DbBackend::capability`] and must match the methods that +/// successfully dispatch in the extension trait implementations below. +pub(crate) enum DbBackend { + /// Legacy schema backend. + V0(DbV0), + + /// Current schema backend. + V1(DbV1), +} + +// ***** Core database functionality ***** + +impl DbBackend { + /// Spawn a v0 database backend. + /// + /// This constructs and initializes the legacy schema implementation and returns it wrapped in + /// [`DbBackend::V0`]. + pub(crate) async fn spawn_v0(cfg: &BlockCacheConfig) -> Result { + Ok(Self::V0(DbV0::spawn(cfg).await?)) + } + + /// Spawn a v1 database backend. + /// + /// This constructs and initializes the current schema implementation and returns it wrapped in + /// [`DbBackend::V1`]. + pub(crate) async fn spawn_v1(cfg: &BlockCacheConfig) -> Result { + Ok(Self::V1(DbV1::spawn(cfg).await?)) + } + + /// Wait until the database backend reports [`StatusType::Ready`]. + /// + /// This polls `DbCore::status()` on a fixed interval. It is intended for startup sequencing in + /// components that require the database to be fully initialized before accepting requests. + /// + /// Notes: + /// - This method does not return an error. If the database never becomes ready, it will loop. + /// - The polling interval is intentionally small and uses `MissedTickBehavior::Delay` to avoid + /// burst catch-up behavior under load. + pub(crate) async fn wait_until_ready(&self) { + let mut ticker = interval(Duration::from_millis(100)); + ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + ticker.tick().await; + if self.status() == StatusType::Ready { + break; + } + } + } + + /// Return the capabilities supported by this database instance. + /// + /// This is the authoritative runtime capability set for this backend and must remain consistent + /// with the dispatch behavior in the extension trait implementations below. + pub(crate) fn capability(&self) -> Capability { + match self { + Self::V0(_) => { + Capability::READ_CORE | Capability::WRITE_CORE | Capability::COMPACT_BLOCK_EXT + } + Self::V1(_) => Capability::LATEST, + } + } +} + +impl From for DbBackend { + /// Wrap an already-constructed v0 database backend. + fn from(value: DbV0) -> Self { + Self::V0(value) + } +} + +impl From for DbBackend { + /// Wrap an already-constructed v1 database backend. + fn from(value: DbV1) -> Self { + Self::V1(value) + } +} + +#[async_trait] +impl DbCore for DbBackend { + /// Return the current status of the backend. + /// + /// This is a thin delegation wrapper over the concrete implementation. + fn status(&self) -> StatusType { + match self { + Self::V0(db) => db.status(), + Self::V1(db) => db.status(), + } + } + + /// Shut down the backend and release associated resources. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn shutdown(&self) -> Result<(), FinalisedStateError> { + match self { + Self::V0(db) => db.shutdown().await, + Self::V1(db) => db.shutdown().await, + } + } +} + +#[async_trait] +impl DbRead for DbBackend { + /// Return the highest stored height in the database, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn db_height(&self) -> Result, FinalisedStateError> { + match self { + Self::V0(db) => db.db_height().await, + Self::V1(db) => db.db_height().await, + } + } + + /// Resolve a block hash to its stored height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError> { + match self { + Self::V0(db) => db.get_block_height(hash).await, + Self::V1(db) => db.get_block_height(hash).await, + } + } + + /// Resolve a block height to its stored block hash, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn get_block_hash( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V0(db) => db.get_block_hash(height).await, + Self::V1(db) => db.get_block_hash(height).await, + } + } + + /// Read the database metadata record. + /// + /// This includes versioning and migration status and is used by the migration manager and + /// compatibility checks. + async fn get_metadata(&self) -> Result { + match self { + Self::V0(db) => db.get_metadata().await, + Self::V1(db) => db.get_metadata().await, + } + } +} + +#[async_trait] +impl DbWrite for DbBackend { + /// Write a fully-indexed block into the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { + match self { + Self::V0(db) => db.write_block(block).await, + Self::V1(db) => db.write_block(block).await, + } + } + + /// Delete the block at a given height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError> { + match self { + Self::V0(db) => db.delete_block_at_height(height).await, + Self::V1(db) => db.delete_block_at_height(height).await, + } + } + + /// Delete a specific indexed block from the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. + async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { + match self { + Self::V0(db) => db.delete_block(block).await, + Self::V1(db) => db.delete_block(block).await, + } + } + + /// Update the database metadata record. + /// + /// This is used by migrations and schema management logic. + async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { + match self { + Self::V0(db) => db.update_metadata(metadata).await, + Self::V1(db) => db.update_metadata(metadata).await, + } + } +} + +// ***** Database capability extension traits ***** +// +// Each extension trait corresponds to a distinct capability group. The dispatch rules are: +// - If the backend supports the capability, delegate to the concrete implementation. +// - If unsupported, return `FinalisedStateError::FeatureUnavailable("")`. +// +// These names must remain consistent with the capability wiring in `capability.rs`. + +#[async_trait] +impl BlockCoreExt for DbBackend { + async fn get_block_header( + &self, + height: Height, + ) -> Result { + match self { + Self::V1(db) => db.get_block_header(height).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } + + async fn get_block_range_headers( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_headers(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } + + async fn get_block_txids(&self, height: Height) -> Result { + match self { + Self::V1(db) => db.get_block_txids(height).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } + + async fn get_block_range_txids( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_txids(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } + + async fn get_txid( + &self, + tx_location: TxLocation, + ) -> Result { + match self { + Self::V1(db) => db.get_txid(tx_location).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } + + async fn get_tx_location( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_tx_location(txid).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_core")), + } + } +} + +#[async_trait] +impl BlockTransparentExt for DbBackend { + async fn get_transparent( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_transparent(tx_location).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_transparent")), + } + } + + async fn get_block_transparent( + &self, + height: Height, + ) -> Result { + match self { + Self::V1(db) => db.get_block_transparent(height).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_transparent")), + } + } + + async fn get_block_range_transparent( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_transparent(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_transparent")), + } + } +} + +#[async_trait] +impl BlockShieldedExt for DbBackend { + async fn get_sapling( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_sapling(tx_location).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_sapling(&self, h: Height) -> Result { + match self { + Self::V1(db) => db.get_block_sapling(h).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_range_sapling( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_sapling(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_orchard( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_orchard(tx_location).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_orchard(&self, h: Height) -> Result { + match self { + Self::V1(db) => db.get_block_orchard(h).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_range_orchard( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_orchard(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_commitment_tree_data( + &self, + height: Height, + ) -> Result { + match self { + Self::V1(db) => db.get_block_commitment_tree_data(height).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } + + async fn get_block_range_commitment_tree_data( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_block_range_commitment_tree_data(start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable("block_shielded")), + } + } +} + +#[async_trait] +impl CompactBlockExt for DbBackend { + async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + #[allow(unreachable_patterns)] + match self { + Self::V0(db) => db.get_compact_block(height, pool_types).await, + Self::V1(db) => db.get_compact_block(height, pool_types).await, + _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), + } + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + #[allow(unreachable_patterns)] + match self { + Self::V0(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } + Self::V1(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } + _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), + } + } +} + +#[async_trait] +impl IndexedBlockExt for DbBackend { + async fn get_chain_block( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_chain_block(height).await, + _ => Err(FinalisedStateError::FeatureUnavailable("chain_block")), + } + } +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[async_trait] +impl TransparentHistExt for DbBackend { + async fn addr_records( + &self, + script: AddrScript, + ) -> Result>, FinalisedStateError> { + match self { + Self::V1(db) => db.addr_records(script).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn addr_and_index_records( + &self, + script: AddrScript, + tx_location: TxLocation, + ) -> Result>, FinalisedStateError> { + match self { + Self::V1(db) => db.addr_and_index_records(script, tx_location).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn addr_tx_locations_by_range( + &self, + script: AddrScript, + start: Height, + end: Height, + ) -> Result>, FinalisedStateError> { + match self { + Self::V1(db) => db.addr_tx_locations_by_range(script, start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn addr_utxos_by_range( + &self, + script: AddrScript, + start: Height, + end: Height, + ) -> Result>, FinalisedStateError> { + match self { + Self::V1(db) => db.addr_utxos_by_range(script, start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn addr_balance_by_range( + &self, + script: AddrScript, + start: Height, + end: Height, + ) -> Result { + match self { + Self::V1(db) => db.addr_balance_by_range(script, start, end).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn get_outpoint_spender( + &self, + outpoint: Outpoint, + ) -> Result, FinalisedStateError> { + match self { + Self::V1(db) => db.get_outpoint_spender(outpoint).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } + + async fn get_outpoint_spenders( + &self, + outpoints: Vec, + ) -> Result>, FinalisedStateError> { + match self { + Self::V1(db) => db.get_outpoint_spenders(outpoints).await, + _ => Err(FinalisedStateError::FeatureUnavailable( + "transparent_history", + )), + } + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/db_schema_v1_0.txt b/zaino-state/src/chain_index/finalised_state/db/db_schema_v1_0.txt new file mode 100644 index 000000000..ca8b539df --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/db_schema_v1_0.txt @@ -0,0 +1,107 @@ +# ──────────────────────────────────────────────────────────────────────────────── +# Zaino – Finalised State Database, on-disk layout (Schema v1, 2025-08-05) +# ──────────────────────────────────────────────────────────────────────────────── +# +# Any change to this file is a **breaking** change. Bump the schema version, +# place the new description in a *new* file (`db_schema_v{N}.txt`), compute the +# new 32-byte BLAKE2b digest and update the constant in `schema.rs`. +# +# Abbreviations +# ───────────── +# H = block Height (u32) | B = [u8;32] block/tx hash +# LE(uN) = unsigned integer of N bits | BE = big-endian +# CS = CompactSize length prefix +# U256 = 32-byte little-endian unsigned integer +# +# Conventions +# ─────────── +# • All records are encoded with Zaino’s versioned-serialize framework: +# +# • All fixed-width integral fields are little-endian unless explicitly noted. +# • Variable-length collections are length-prefixed with Bitcoin/Zcash +# CompactSize. +# • `Option` is encoded as: 0x00 (None) | 0x01 (Some) . +# • `StoredEntryFixed` -> fixed length metadata wrapper around `T` +# V1 body = < [u8; 32] check_hash> +# • `StoredEntryVar` -> variable metadata wrapper around `T` +# V1 body = < [u8; 32] check_hash> +# +# ─────────────────────────── Logical databases ──────────────────────────────── +# +# 1. headers ― H -> StoredEntryVar +# Key : BE height +# Val : 0x01 + BlockHeaderData +# BlockHeaderData V1 body = +# BlockIndex + BlockData +# BlockIndex = B hash B parent_hash U256 chain_work +# Option height +# BlockData = LE(u32) version +# LE(i64) unix_time +# B merkle_root +# B block_commitments +# LE(u32) bits +# [32] nonce +# +# 2. txids ― H -> StoredEntryVar +# Val : 0x01 + CS count + count × B txid +# +# 3. transparent ― H -> StoredEntryVar> +# Val : 0x01 + CS blk_tx_count + each TransparentTxList +# TransparentTxList = CS tx_count + tx_count × Option +# +# 4. sapling ― H -> StoredEntryVar +# Key : BE height +# Val : 0x01 + CS tx_count + tx_count × Option +# SaplingCompactTx = +# Option value_balance +# spends = CS n + n × CompactSaplingSpend +# CompactSaplingSpend = 32-byte nullifier +# outputs = CS m + m × CompactSaplingOutput +# CompactSaplingOutput = 32-byte cmu 32-byte epk 52-byte ciphertext +# +# 5. orchard ― H -> StoredEntryVar +# Key : BE height +# Val : 0x01 + CS tx_count + tx_count × Option +# OrchardCompactTx = +# Option value_balance +# actions = CS n + n × CompactOrchardAction +# CompactOrchardAction = 32-byte nullifier 32-byte cmx 32-byte epk 52-byte ciphertext +# +# 6. commitment_tree_data ― H -> StoredEntryFixed> +# Key : BE height +# Val : 0x01 + CS n + n × CommitmentTreeData +# CommitmentTreeData V1 body = +# CommitmentTreeRoots + CommitmentTreeSizes +# CommitmentTreeRoots = 32-byte sapling_root 32-byte orchard_root +# CommitmentTreeSizes = LE(u32) sapling_total LE(u32) orchard_total +# +# 7. heights ― B (block hash) -> StoredEntryFixed +# Key : 32-byte block hash (internal byte order) +# Val : 0x01 + BE height +# +# 8. spent ― Outpoint -> StoredEntryFixed> +# Key : 0x01 + Outpoint +# Outpoint = B prev_txid LE(u32) prev_index +# Val : 0x01 + CS n + n × TxLocation +# TxLocation = LE(u32) height LE(u32) tx_index +# +# 9. address_history ― AddrScript -> StoredEntryFixed +# Key : 0x01 + AddrScript +# AddrScript = [20] hash u8 script_type +# Val : 0x01 + CS len + raw event bytes (multiple entries may share the key) +# +# 10. metadata ― "metadata" (ASCII) -> StoredEntryFixed (singleton) +# DbMetadata V1 body = +# [32] schema_hash +# LE(i64) created_unix_ts +# LE(u32) pruned_tip +# LE(u32) network (Zcash main = 0, test = 1, regtest = 2) +# +# ─────────────────────────── Environment settings ───────────────────────────── +# LMDB page-size: platform default +# max_dbs: 10 (see list above) +# Flags: MDB_NOTLS | MDB_NORDAHEAD +# +# All Databases are append-only and indexed by height -> LMDB default +# +# ───────────────────────────── END OF FILE ──────────────────────────────────── diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs new file mode 100644 index 000000000..525b0c9ee --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -0,0 +1,1058 @@ +//! ZainoDB V0 Implementation +//! +//! WARNING: This is a legacy development database and should not be used in production environments. +//! +//! This module implements the original “v0” finalised-state database backend. It exists primarily +//! for backward compatibility and for development/testing scenarios where the historical v0 +//! on-disk layout must be opened. +//! +//! ## Important constraints +//! +//! - **Not schema-versioned in the modern sense:** this database version predates Zaino’s +//! `ZainoVersionedSerde` wire format, therefore it does not store version-tagged records and does +//! not participate in fine-grained schema evolution. +//! - **Legacy encoding strategy:** +//! - keys and values are stored as JSON via `serde_json` for most types, +//! - `CompactBlock` values are encoded as raw Prost bytes via a custom `Serialize`/`Deserialize` +//! wrapper (`DbCompactBlock`) so they can still flow through `serde_json`. +//! - **Limited feature surface:** v0 only supports the core height/hash mapping and compact block +//! retrieval. It does not provide the richer indices introduced in v1 (header data, transaction +//! locations, transparent history indexing, etc.). +//! +//! ## On-disk layout +//! +//! The v0 database uses the legacy network directory names: +//! - mainnet: `live/` +//! - testnet: `test/` +//! - regtest: `local/` +//! +//! Each network directory contains an LMDB environment with (at minimum) these tables: +//! - `heights_to_hashes`: `` +//! - `hashes_to_blocks`: `` (where the compact block is stored +//! as raw Prost bytes wrapped by JSON) +//! +//! ## Runtime model +//! +//! `DbV0` spawns a lightweight background maintenance task that: +//! - publishes `StatusType::Ready` once spawned, +//! - periodically calls `clean_trailing()` to reclaim stale LMDB reader slots. +//! +//! This backend uses `tokio::task::block_in_place` / `tokio::task::spawn_blocking` around LMDB +//! operations to avoid blocking the async runtime. + +use crate::{ + chain_index::{ + finalised_state::capability::{ + CompactBlockExt, DbCore, DbMetadata, DbRead, DbVersion, DbWrite, + }, + types::GENESIS_HEIGHT, + }, + config::BlockCacheConfig, + error::FinalisedStateError, + status::{AtomicStatus, StatusType}, + CompactBlockStream, Height, IndexedBlock, +}; + +use zaino_proto::proto::{ + compact_formats::CompactBlock, + service::PoolType, + utils::{compact_block_with_pool_types, PoolTypeFilter}, +}; + +use zebra_chain::{ + block::{Hash as ZebraHash, Height as ZebraHeight}, + parameters::NetworkKind, +}; + +use async_trait::async_trait; +use lmdb::{Cursor, Database, DatabaseFlags, Environment, EnvironmentFlags, Transaction}; +use prost::Message; +use serde::{Deserialize, Serialize}; +use std::{fs, sync::Arc, time::Duration}; +use tokio::time::{interval, MissedTickBehavior}; +use tracing::{info, warn}; + +// ───────────────────────── ZainoDb v0 Capabilities ───────────────────────── + +/// `DbRead` implementation for the legacy v0 backend. +/// +/// Note: v0 exposes only a minimal read surface. Missing data is mapped to `Ok(None)` where the +/// core trait expects optional results. +#[async_trait] +impl DbRead for DbV0 { + /// Returns the database tip height (`None` if empty). + async fn db_height(&self) -> Result, FinalisedStateError> { + self.tip_height().await + } + + /// Returns the block height for a given block hash, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. + async fn get_block_height( + &self, + hash: crate::BlockHash, + ) -> Result, FinalisedStateError> { + match self.get_block_height_by_hash(hash).await { + Ok(height) => Ok(Some(height)), + Err( + FinalisedStateError::DataUnavailable(_) + | FinalisedStateError::FeatureUnavailable(_), + ) => Ok(None), + Err(other) => Err(other), + } + } + + /// Returns the block hash for a given block height, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. + async fn get_block_hash( + &self, + height: crate::Height, + ) -> Result, FinalisedStateError> { + match self.get_block_hash_by_height(height).await { + Ok(hash) => Ok(Some(hash)), + Err( + FinalisedStateError::DataUnavailable(_) + | FinalisedStateError::FeatureUnavailable(_), + ) => Ok(None), + Err(other) => Err(other), + } + } + + /// Returns synthetic metadata for v0. + /// + /// v0 does not persist `DbMetadata` on disk; this returns a constructed value describing + /// version `0.0.0` and a default schema hash. + async fn get_metadata(&self) -> Result { + self.get_metadata().await + } +} + +/// `DbWrite` implementation for the legacy v0 backend. +/// +/// v0 supports append-only writes and pop-only deletes at the tip, enforced by explicit checks in +/// the legacy methods. +#[async_trait] +impl DbWrite for DbV0 { + /// Writes a fully-validated finalised block, enforcing strict height monotonicity. + async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { + self.write_block(block).await + } + + /// Deletes a block at the given height, enforcing that it is the current tip. + async fn delete_block_at_height( + &self, + height: crate::Height, + ) -> Result<(), FinalisedStateError> { + self.delete_block_at_height(height).await + } + + /// Deletes a block by explicit content. + /// + /// This is a fallback path used when tip-based deletion cannot safely determine the full set of + /// keys to delete (for example, when corruption is suspected). + async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { + self.delete_block(block).await + } + + /// Updates the metadata singleton. + /// + /// NOTE: v0 does not persist metadata on disk; this is a no-op to satisfy the trait. + async fn update_metadata(&self, _metadata: DbMetadata) -> Result<(), FinalisedStateError> { + Ok(()) + } +} + +/// `DbCore` implementation for the legacy v0 backend. +/// +/// The core lifecycle API is implemented in terms of a status flag and a lightweight background +/// maintenance task. +#[async_trait] +impl DbCore for DbV0 { + /// Returns the current runtime status published by this backend. + fn status(&self) -> StatusType { + self.status.load() + } + + /// Requests shutdown of background tasks and syncs the LMDB environment before returning. + /// + /// This method is best-effort: background tasks are aborted after a timeout and the LMDB + /// environment is fsync’d before exit. + async fn shutdown(&self) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Closing); + + if let Some(handle) = &self.db_handler { + let timeout = tokio::time::sleep(Duration::from_secs(5)); + timeout.await; + // TODO: Check if handle is returned else abort + handle.abort(); + } + let _ = self.clean_trailing().await; + if let Err(e) = self.env.sync(true) { + warn!("LMDB fsync before close failed: {e}"); + } + Ok(()) + } +} + +/// [`CompactBlockExt`] capability implementation for [`DbV0`]. +/// +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + +/// transaction data. +#[async_trait] +impl CompactBlockExt for DbV0 { + async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) + .await + } +} + +/// Finalised part of the chain, held in an LMDB database (legacy v0). +/// +/// `DbV0` maintains two simple indices: +/// - height → hash +/// - hash → compact block +/// +/// It does **not** implement the richer v1 indices (header data, tx location maps, address history, +/// commitment tree tables, etc.). +#[derive(Debug)] +pub struct DbV0 { + /// LMDB database environment handle. + /// + /// The environment is shared between tasks using `Arc` and is configured for high read + /// concurrency (`max_readers`) and reduced I/O overhead (`NO_READAHEAD`). + env: Arc, + + /// LMDB database containing ``. + /// + /// Heights are stored as 4-byte big-endian keys for correct lexicographic ordering. + heights_to_hashes: Database, + + /// LMDB database containing ``. + /// + /// The compact block is stored via the `DbCompactBlock` wrapper: raw Prost bytes embedded in a + /// JSON payload. + hashes_to_blocks: Database, + + /// Background maintenance task handle. + /// + /// This task periodically performs housekeeping (currently reader-slot cleanup). + db_handler: Option>, + + /// Backend lifecycle status. + status: AtomicStatus, + + /// Configuration snapshot used for path/network selection and sizing parameters. + config: BlockCacheConfig, +} + +impl DbV0 { + /// Spawns a new [`DbV0`] backend. + /// + /// This: + /// - derives the v0 network directory name (`live` / `test` / `local`), + /// - opens or creates the LMDB environment and required databases, + /// - configures LMDB reader concurrency based on CPU count, + /// - spawns a background maintenance task, + /// - and returns the opened backend. + /// + /// # Errors + /// Returns `FinalisedStateError` on any filesystem, LMDB, or task-spawn failure. + pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { + info!("Launching ZainoDB"); + + // Prepare database details and path. + let db_size_bytes = config.storage.database.size.to_byte_count(); + let db_path_dir = match config.network.to_zebra_network().kind() { + NetworkKind::Mainnet => "live", + NetworkKind::Testnet => "test", + NetworkKind::Regtest => "local", + }; + let db_path = config.storage.database.path.join(db_path_dir); + if !db_path.exists() { + fs::create_dir_all(&db_path)?; + } + + // Check system rescources to set max db reeaders, clamped between 512 and 4096. + let cpu_cnt = std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(4); + + // Sets LMDB max_readers based on CPU count (cpu * 32), clamped between 512 and 4096. + // Allows high async read concurrency while keeping memory use low (~192B per slot). + // The 512 min ensures reasonable capacity even on low-core systems. + let max_readers = u32::try_from((cpu_cnt * 32).clamp(512, 4096)) + .expect("max_readers was clamped to fit in u32"); + + // Open LMDB environment and set environmental details. + let env = Environment::new() + .set_max_dbs(12) + .set_map_size(db_size_bytes) + .set_max_readers(max_readers) + .set_flags(EnvironmentFlags::NO_TLS | EnvironmentFlags::NO_READAHEAD) + .open(&db_path)?; + + // Open individual LMDB DBs. + let heights_to_hashes = + Self::open_or_create_db(&env, "heights_to_hashes", DatabaseFlags::empty()).await?; + let hashes_to_blocks = + Self::open_or_create_db(&env, "hashes_to_blocks", DatabaseFlags::empty()).await?; + + // Create ZainoDB + let mut zaino_db = Self { + env: Arc::new(env), + heights_to_hashes, + hashes_to_blocks, + db_handler: None, + status: AtomicStatus::new(StatusType::Spawning), + config: config.clone(), + }; + + // Spawn handler task to perform background validation and trailing tx cleanup. + zaino_db.spawn_handler().await?; + + Ok(zaino_db) + } + + /// Attempts a graceful shutdown and falls back to aborting the maintenance task after a timeout. + /// + /// This is a legacy lifecycle method retained for v0 compatibility. Newer backends should + /// implement shutdown via the `DbCore` trait. + /// + /// # Errors + /// Returns `FinalisedStateError` if LMDB cleanup or sync fails. + pub(crate) async fn close(&mut self) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Closing); + + if let Some(mut handle) = self.db_handler.take() { + let timeout = tokio::time::sleep(Duration::from_secs(5)); + tokio::pin!(timeout); + + tokio::select! { + res = &mut handle => { + match res { + Ok(_) => {} + Err(e) if e.is_cancelled() => {} + Err(e) => warn!("background task ended with error: {e:?}"), + } + } + _ = &mut timeout => { + warn!("background task didn’t exit in time – aborting"); + handle.abort(); + } + } + } + + let _ = self.clean_trailing().await; + if let Err(e) = self.env.sync(true) { + warn!("LMDB fsync before close failed: {e}"); + } + Ok(()) + } + + /// Returns the current backend status. + pub(crate) fn status(&self) -> StatusType { + self.status.load() + } + + /// Blocks until the backend reports `StatusType::Ready`. + /// + /// This is primarily used during startup sequencing so callers do not issue reads before the + /// backend is ready to serve queries. + pub(crate) async fn wait_until_ready(&self) { + let mut ticker = interval(Duration::from_millis(100)); + ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + ticker.tick().await; + if self.status.load() == StatusType::Ready { + break; + } + } + } + + // *** Internal Control Methods *** + + /// Spawns the background maintenance task. + /// + /// The v0 maintenance task is intentionally minimal: + /// - publishes `StatusType::Ready` after spawning, + /// - periodically calls `clean_trailing()` to purge stale LMDB reader slots, + /// - exits when status transitions to `StatusType::Closing`. + /// + /// Note: historical comments refer to validation passes; the current implementation only + /// performs maintenance and does not validate chain contents. + async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { + // Clone everything the task needs so we can move it into the async block. + let zaino_db = Self { + env: Arc::clone(&self.env), + heights_to_hashes: self.heights_to_hashes, + hashes_to_blocks: self.hashes_to_blocks, + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + + let handle = tokio::spawn({ + let zaino_db = zaino_db; + async move { + zaino_db.status.store(StatusType::Ready); + + // *** steady-state loop *** + let mut maintenance = interval(Duration::from_secs(60)); + + loop { + // Check for closing status. + if zaino_db.status.load() == StatusType::Closing { + break; + } + + zaino_db.zaino_db_handler_sleep(&mut maintenance).await; + } + } + }); + + self.db_handler = Some(handle); + Ok(()) + } + + /// Helper method to wait for the next loop iteration or perform maintenance. + /// + /// This selects between: + /// - a short sleep (steady-state pacing), and + /// - the maintenance tick (currently reader-slot cleanup). + async fn zaino_db_handler_sleep(&self, maintenance: &mut tokio::time::Interval) { + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(5)) => {}, + _ = maintenance.tick() => { + if let Err(e) = self.clean_trailing().await { + warn!("clean_trailing failed: {}", e); + } + } + } + } + + /// Clears stale LMDB reader slots by opening and closing a read transaction. + /// + /// LMDB only reclaims reader slots when transactions are closed; this method is a cheap and safe + /// way to encourage reclamation in long-running services. + async fn clean_trailing(&self) -> Result<(), FinalisedStateError> { + let txn = self.env.begin_ro_txn()?; + drop(txn); + Ok(()) + } + + /// Opens an LMDB database if present, otherwise creates it. + /// + /// v0 uses this helper for all tables to make environment creation idempotent across restarts. + async fn open_or_create_db( + env: &Environment, + name: &str, + flags: DatabaseFlags, + ) -> Result { + match env.open_db(Some(name)) { + Ok(db) => Ok(db), + Err(lmdb::Error::NotFound) => env + .create_db(Some(name), flags) + .map_err(FinalisedStateError::LmdbError), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + } + + // *** DB write / delete methods *** + // These should only ever be used in a single DB control task. + + /// Writes a given (finalised) [`IndexedBlock`] to the v0 database. + /// + /// This method enforces the v0 write invariant: + /// - if the database is non-empty, the new block height must equal `current_tip + 1`, + /// - if the database is empty, the first write must be genesis (`GENESIS_HEIGHT`). + /// + /// The following records are written atomically in a single LMDB write transaction: + /// - `heights_to_hashes[height_be] = hash_json` + /// - `hashes_to_blocks[hash_json] = compact_block_json` + /// + /// On failure, the method attempts to delete the partially-written block (best effort) and + /// returns an `InvalidBlock` error that includes the height/hash context. + pub(crate) async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Syncing); + + let compact_block: CompactBlock = block.to_compact_block(); + let zebra_height: ZebraHeight = block.index().height().into(); + let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(*block.index().hash()); + + let height_key = DbHeight(zebra_height).to_be_bytes(); + let hash_key = serde_json::to_vec(&DbHash(zebra_hash))?; + let block_value = serde_json::to_vec(&DbCompactBlock(compact_block))?; + + // check this is the *next* block in the chain. + let block_height = block.index().height().0; + + tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let cur = ro.open_ro_cursor(self.heights_to_hashes)?; + + // Position the cursor at the last header we currently have + match cur.get(None, None, lmdb_sys::MDB_LAST) { + // Database already has blocks + Ok((last_height_bytes, _last_hash_bytes)) => { + let block_height = block.index().height().0; + + let last_height = DbHeight::from_be_bytes( + last_height_bytes.expect("Height is always some in the finalised state"), + )? + .0 + .0; + + // Height must be exactly +1 over the current tip + if block_height != last_height + 1 { + return Err(FinalisedStateError::Custom(format!( + "cannot write block at height {block_height:?}; \ + current tip is {last_height:?}" + ))); + } + } + // no block in db, this must be genesis block. + Err(lmdb::Error::NotFound) => { + if block_height != GENESIS_HEIGHT.0 { + return Err(FinalisedStateError::Custom(format!( + "first block must be height 0, got {block_height:?}" + ))); + } + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + Ok::<_, FinalisedStateError>(()) + })?; + + // if any database writes fail, or block validation fails, remove block from database and return err. + let zaino_db = Self { + env: Arc::clone(&self.env), + heights_to_hashes: self.heights_to_hashes, + hashes_to_blocks: self.hashes_to_blocks, + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + let post_result = tokio::task::spawn_blocking(move || { + // let post_result: Result<(), FinalisedStateError> = (async { + // Write block to ZainoDB + let mut txn = zaino_db.env.begin_rw_txn()?; + + txn.put( + zaino_db.heights_to_hashes, + &height_key, + &hash_key, + lmdb::WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.hashes_to_blocks, + &hash_key, + &block_value, + lmdb::WriteFlags::NO_OVERWRITE, + )?; + + txn.commit()?; + + Ok::<_, FinalisedStateError>(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))?; + + match post_result { + Ok(_) => { + tokio::task::block_in_place(|| self.env.sync(true)) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + self.status.store(StatusType::Ready); + Ok(()) + } + Err(e) => { + let _ = self.delete_block(&block).await; + tokio::task::block_in_place(|| self.env.sync(true)) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + self.status.store(StatusType::RecoverableError); + Err(FinalisedStateError::InvalidBlock { + height: block_height, + hash: *block.index().hash(), + reason: e.to_string(), + }) + } + } + } + + /// Deletes the block at `height` from every v0 table. + /// + /// This method enforces the v0 delete invariant: + /// - the requested height must equal the current database tip. + /// + /// The method determines the tip hash from `heights_to_hashes`, then deletes: + /// - `heights_to_hashes[height_be]` + /// - `hashes_to_blocks[hash_json]` + pub(crate) async fn delete_block_at_height( + &self, + height: crate::Height, + ) -> Result<(), FinalisedStateError> { + let block_height = height.0; + let height_key = DbHeight(zebra_chain::block::Height(block_height)).to_be_bytes(); + + // check this is the *next* block in the chain and return the hash. + let zebra_block_hash: zebra_chain::block::Hash = tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let cur = ro.open_ro_cursor(self.heights_to_hashes)?; + + // Position the cursor at the last header we currently have + match cur.get(None, None, lmdb_sys::MDB_LAST) { + // Database already has blocks + Ok((last_height_bytes, last_hash_bytes)) => { + let last_height = DbHeight::from_be_bytes( + last_height_bytes.expect("Height is always some in the finalised state"), + )? + .0 + .0; + + // Check this is the block at the top of the database. + if block_height != last_height { + return Err(FinalisedStateError::Custom(format!( + "cannot delete block at height {block_height:?}; \ + current tip is {last_height:?}" + ))); + } + + // Deserialize the hash + let db_hash: DbHash = serde_json::from_slice(last_hash_bytes)?; + + Ok(db_hash.0) + } + // no block in db, this must be genesis block. + Err(lmdb::Error::NotFound) => Err(FinalisedStateError::Custom(format!( + "first block must be height 1, got {block_height:?}" + ))), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + })?; + let hash_key = serde_json::to_vec(&DbHash(zebra_block_hash))?; + + // Delete block data + let zaino_db = Self { + env: Arc::clone(&self.env), + heights_to_hashes: self.heights_to_hashes, + hashes_to_blocks: self.hashes_to_blocks, + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + tokio::task::block_in_place(|| { + let mut txn = zaino_db.env.begin_rw_txn()?; + + txn.del(zaino_db.heights_to_hashes, &height_key, None)?; + + txn.del(zaino_db.hashes_to_blocks, &hash_key, None)?; + + let _ = txn.commit(); + + self.env + .sync(true) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + Ok::<_, FinalisedStateError>(()) + })?; + + Ok(()) + } + + /// Deletes the provided block’s entries from every v0 table. + /// + /// This is used as a backup when `delete_block_at_height` fails. + /// + /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. + /// + /// WARNING: No checks are made that this block is at the top of the finalised state, and validated tip is not updated. + /// This enables use for correcting corrupt data within the database but it is left to the user to ensure safe use. + /// Where possible delete_block_at_height should be used instead. + /// + /// NOTE: LMDB database errors are propageted as these show serious database errors, + /// all other errors are returned as `IncorrectBlock`, if this error is returned the block requested + /// should be fetched from the validator and this method called with the correct data. + pub(crate) async fn delete_block( + &self, + block: &IndexedBlock, + ) -> Result<(), FinalisedStateError> { + let zebra_height: ZebraHeight = block.index().height().into(); + let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(*block.index().hash()); + + let height_key = DbHeight(zebra_height).to_be_bytes(); + let hash_key = serde_json::to_vec(&DbHash(zebra_hash))?; + + // Delete all block data from db. + let zaino_db = Self { + env: Arc::clone(&self.env), + heights_to_hashes: self.heights_to_hashes, + hashes_to_blocks: self.hashes_to_blocks, + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + tokio::task::spawn_blocking(move || { + // Delete block data + let mut txn = zaino_db.env.begin_rw_txn()?; + + txn.del(zaino_db.heights_to_hashes, &height_key, None)?; + + txn.del(zaino_db.hashes_to_blocks, &hash_key, None)?; + + let _ = txn.commit(); + + zaino_db + .env + .sync(true) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + + Ok::<_, FinalisedStateError>(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))??; + Ok(()) + } + + // ***** DB fetch methods ***** + + /// Returns the greatest `Height` stored in `heights_to_hashes` (`None` if empty). + /// + /// Heights are stored as big-endian keys, so the LMDB `MDB_LAST` cursor position corresponds to + /// the maximum height. + pub(crate) async fn tip_height(&self) -> Result, FinalisedStateError> { + tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let cur = ro.open_ro_cursor(self.heights_to_hashes)?; + + match cur.get(None, None, lmdb_sys::MDB_LAST) { + Ok((height_bytes, _hash_bytes)) => { + let tip_height = crate::Height( + DbHeight::from_be_bytes( + height_bytes.expect("Height is always some in the finalised state"), + )? + .0 + .0, + ); + Ok(Some(tip_height)) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + }) + } + + /// Fetches the block height for a given block hash. + /// + /// v0 resolves hash → compact block via `hashes_to_blocks` and then reads the embedded height + /// from the compact block message. + async fn get_block_height_by_hash( + &self, + hash: crate::BlockHash, + ) -> Result { + let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(hash); + let hash_key = serde_json::to_vec(&DbHash(zebra_hash))?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let block_bytes: &[u8] = txn.get(self.hashes_to_blocks, &hash_key)?; + let block: DbCompactBlock = serde_json::from_slice(block_bytes)?; + let block_height = block.0.height as u32; + + Ok(crate::Height(block_height)) + }) + } + + /// Fetches the block hash for a given block height. + /// + /// v0 resolves height → hash via `heights_to_hashes`. + async fn get_block_hash_by_height( + &self, + height: crate::Height, + ) -> Result { + let zebra_height: ZebraHeight = height.into(); + let height_key = DbHeight(zebra_height).to_be_bytes(); + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let hash_bytes: &[u8] = txn.get(self.heights_to_hashes, &height_key)?; + let db_hash: DbHash = serde_json::from_slice(hash_bytes)?; + + Ok(crate::BlockHash::from(db_hash.0)) + }) + } + + /// Returns constructed metadata for v0. + /// + /// v0 does not persist real metadata. This method returns: + /// - version `0.0.0`, + /// - a zero schema hash, + /// - `MigrationStatus::Complete` (v0 does not participate in resumable migrations). + async fn get_metadata(&self) -> Result { + Ok(DbMetadata { + version: DbVersion { + major: 0, + minor: 0, + patch: 0, + }, + schema_hash: [0u8; 32], + migration_status: + crate::chain_index::finalised_state::capability::MigrationStatus::Complete, + }) + } + + /// Fetches the compact block for a given height. + /// + /// This resolves height → hash via `heights_to_hashes`, then hash → compact block via + /// `hashes_to_blocks`. + async fn get_compact_block( + &self, + height: crate::Height, + pool_types: PoolTypeFilter, + ) -> Result { + let zebra_hash = + zebra_chain::block::Hash::from(self.get_block_hash_by_height(height).await?); + let hash_key = serde_json::to_vec(&DbHash(zebra_hash))?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let block_bytes: &[u8] = txn.get(self.hashes_to_blocks, &hash_key)?; + let block: DbCompactBlock = serde_json::from_slice(block_bytes)?; + Ok(compact_block_with_pool_types( + block.0, + &pool_types.to_pool_types_vector(), + )) + }) + } + + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// Legacy implementation for backwards compatibility. + /// + /// Behaviour: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - Blocks are fetched one-by-one by calling `get_compact_block(height, pool_types)` for + /// each height in the range. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that have no elements in any requested pool type are omitted from `vtx`, + /// and `CompactTx.index` preserves the original transaction index within the block. + /// + /// Notes: + /// - This is intentionally not optimised (no LMDB cursor walk, no batch/range reads). + /// - Any fetch/deserialize error terminates the stream after emitting a single `tonic::Status`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let is_ascending: bool = start_height <= end_height; + + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + let env = self.env.clone(); + let heights_to_hashes_database: lmdb::Database = self.heights_to_hashes; + let hashes_to_blocks_database: lmdb::Database = self.hashes_to_blocks; + + let pool_types_vector: Vec = pool_types.to_pool_types_vector(); + + tokio::task::spawn_blocking(move || { + fn lmdb_get_status( + database_name: &'static str, + height: Height, + error: lmdb::Error, + ) -> tonic::Status { + match error { + lmdb::Error::NotFound => tonic::Status::not_found(format!( + "missing db entry in {database_name} at height {}", + height.0 + )), + other_error => tonic::Status::internal(format!( + "lmdb get({database_name}) failed at height {}: {other_error}", + height.0 + )), + } + } + + let mut current_height: Height = start_height; + + loop { + let result: Result = (|| { + let txn = env.begin_ro_txn().map_err(|error| { + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")) + })?; + + // height -> hash (heights_to_hashes) + let zebra_height: ZebraHeight = current_height.into(); + let height_key: [u8; 4] = DbHeight(zebra_height).to_be_bytes(); + + let hash_bytes: &[u8] = txn + .get(heights_to_hashes_database, &height_key) + .map_err(|error| { + lmdb_get_status("heights_to_hashes", current_height, error) + })?; + + let db_hash: DbHash = serde_json::from_slice(hash_bytes).map_err(|error| { + tonic::Status::internal(format!( + "height->hash decode failed at height {}: {error}", + current_height.0 + )) + })?; + + // hash -> block (hashes_to_blocks) + let hash_key: Vec = + serde_json::to_vec(&DbHash(db_hash.0)).map_err(|error| { + tonic::Status::internal(format!( + "hash key encode failed at height {}: {error}", + current_height.0 + )) + })?; + + let block_bytes: &[u8] = txn + .get(hashes_to_blocks_database, &hash_key) + .map_err(|error| { + lmdb_get_status("hashes_to_blocks", current_height, error) + })?; + + let db_compact_block: DbCompactBlock = serde_json::from_slice(block_bytes) + .map_err(|error| { + tonic::Status::internal(format!( + "block decode failed at height {}: {error}", + current_height.0 + )) + })?; + + Ok(compact_block_with_pool_types( + db_compact_block.0, + &pool_types_vector, + )) + })(); + + if sender.blocking_send(result).is_err() { + return; + } + + if current_height == end_height { + return; + } + + if is_ascending { + let next_value = match current_height.0.checked_add(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height overflow while iterating ascending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } else { + let next_value = match current_height.0.checked_sub(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height underflow while iterating descending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } +} + +/// Wrapper for `ZebraHeight` used for key encoding. +/// +/// v0 stores heights as 4-byte **big-endian** keys to preserve numeric ordering under LMDB’s +/// lexicographic key ordering. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +struct DbHeight(pub ZebraHeight); + +impl DbHeight { + /// Converts this height to 4-byte **big-endian** bytes. + /// + /// This is used when storing heights as LMDB keys so that increasing heights sort correctly. + fn to_be_bytes(self) -> [u8; 4] { + self.0 .0.to_be_bytes() + } + + /// Parses a 4-byte **big-endian** key into a `DbHeight`. + /// + /// # Errors + /// Returns an error if the key is not exactly 4 bytes long. + fn from_be_bytes(bytes: &[u8]) -> Result { + let arr: [u8; 4] = bytes + .try_into() + .map_err(|_| FinalisedStateError::Custom("Invalid height key length".to_string()))?; + Ok(DbHeight(ZebraHeight(u32::from_be_bytes(arr)))) + } +} + +/// Wrapper for `ZebraHash` so it can be JSON-serialized as an LMDB value/key payload. +/// +/// v0 stores hashes using `serde_json` rather than Zaino’s versioned binary encoding. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +struct DbHash(pub ZebraHash); + +/// Wrapper for `CompactBlock` for JSON storage. +/// +/// `CompactBlock` is a Prost message; v0 stores it by encoding to raw bytes and embedding those +/// bytes inside a serde payload. +#[derive(Debug, Clone, PartialEq)] +struct DbCompactBlock(pub CompactBlock); + +/// Custom `Serialize` implementation using Prost's `encode_to_vec()`. +/// +/// This serializes the compact block as raw bytes so it can be stored via `serde_json` as a byte +/// array payload. +impl Serialize for DbCompactBlock { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let bytes = self.0.encode_to_vec(); + serializer.serialize_bytes(&bytes) + } +} + +/// Custom `Deserialize` implementation using Prost's `decode()`. +/// +/// This reverses the `Serialize` strategy by decoding the stored raw bytes into a `CompactBlock`. +impl<'de> Deserialize<'de> for DbCompactBlock { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let bytes: Vec = serde::de::Deserialize::deserialize(deserializer)?; + CompactBlock::decode(&*bytes) + .map(DbCompactBlock) + .map_err(serde::de::Error::custom) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs new file mode 100644 index 000000000..25abad116 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -0,0 +1,709 @@ +//! ZainoDB Finalised State (Schema V1) +//! +//! This module provides the **V1** implementation of Zaino’s LMDB-backed finalised-state database. +//! It stores a validated, append-only view of the best chain and exposes a set of capability traits +//! (read, write, metadata, block-range fetchers, compact-block generation, and transparent history). +//! +//! ## On-disk layout +//! The V1 on-disk layout is described by an ASCII schema file that is embedded into the binary at +//! compile time (`db_schema_v1_0.txt`). A fixed 32-byte BLAKE2b checksum of that schema description +//! is stored in / compared against the database metadata to detect accidental schema drift. +//! +//! ## Validation model +//! The database maintains a monotonically increasing **validated tip** (`validated_tip`) and a set +//! of validated heights above that tip (`validated_set`) to support out-of-order validation. Reads +//! that require correctness use `resolve_validated_hash_or_height()` to ensure the requested height +//! is validated (performing on-demand validation if required). +//! +//! A background task performs: +//! - an initial full scan of the stored data for checksum / structural correctness, then +//! - steady-state incremental validation of newly appended blocks. +//! +//! ## Concurrency model +//! LMDB supports many concurrent readers and a single writer per environment. This implementation +//! uses `tokio::task::block_in_place` / `spawn_blocking` for LMDB operations to avoid blocking the +//! async runtime, and configures `max_readers` to support high read concurrency. + +use crate::{ + chain_index::{ + finalised_state::{ + capability::{ + BlockCoreExt, BlockShieldedExt, BlockTransparentExt, CompactBlockExt, DbCore, + DbMetadata, DbRead, DbVersion, DbWrite, IndexedBlockExt, MigrationStatus, + }, + entry::{StoredEntryFixed, StoredEntryVar}, + }, + types::{TransactionHash, GENESIS_HEIGHT}, + }, + config::BlockCacheConfig, + error::FinalisedStateError, + AtomicStatus, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactSize, CompactTxData, + FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, SaplingCompactTx, + SaplingTxList, StatusType, TransparentCompactTx, TransparentTxList, TxInCompact, TxLocation, + TxOutCompact, TxidList, ZainoVersionedSerde as _, +}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::{ + chain_index::{finalised_state::capability::TransparentHistExt, types::AddrEventBytes}, + AddrHistRecord, AddrScript, Outpoint, +}; + +use zaino_proto::proto::{compact_formats::CompactBlock, utils::PoolTypeFilter}; +use zebra_chain::parameters::NetworkKind; +use zebra_state::HashOrHeight; + +use async_trait::async_trait; +use core2::io::{self, Read}; +use dashmap::DashSet; +use lmdb::{ + Cursor, Database, DatabaseFlags, Environment, EnvironmentFlags, Transaction as _, WriteFlags, +}; +use sha2::{Digest, Sha256}; +use std::{ + collections::HashSet, + fs, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::time::{interval, MissedTickBehavior}; +use tracing::{error, info, warn}; + +#[cfg(feature = "transparent_address_history_experimental")] +use std::collections::HashMap; + +pub(crate) mod validation; + +pub(crate) mod read_core; +pub(crate) mod write_core; + +pub(crate) mod block_core; +pub(crate) mod block_shielded; +pub(crate) mod block_transparent; + +pub(crate) mod compact_block; +pub(crate) mod indexed_block; + +#[cfg(feature = "transparent_address_history_experimental")] +pub(crate) mod transparent_address_history; + +// ───────────────────────── Schema v1 constants ───────────────────────── + +/// Full V1 schema text file. +/// +/// This is the exact ASCII description of the V1 on-disk layout embedded into the binary at +/// compile-time. The path is relative to this source file. +/// +/// 1. Bring the *exact* ASCII description of the on-disk layout into the binary at compile-time. +pub(crate) const DB_SCHEMA_V1_TEXT: &str = include_str!("db_schema_v1_0.txt"); + +/* +2. Compute the checksum once, outside the code: + + $ cd zaino-state/src/chain_index/finalised_state/db + $ b2sum -l 256 db_schema_v1_0.txt + bc135247b46bb46a4a971e4c2707826f8095e662b6919d28872c71b6bd676593 db_schema_v1_0.txt + + Optional helper if you don’t have `b2sum`: + + $ python - <<'PY' + > import hashlib, pathlib, binascii + > data = pathlib.Path("db_schema_v1.txt").read_bytes() + > print(hashlib.blake2b(data, digest_size=32).hexdigest()) + > PY + +3. Turn those 64 hex digits into a Rust `[u8; 32]` literal: + + echo bc135247b46bb46a4a971e4c2707826f8095e662b6919d28872c71b6bd676593 \ + | sed 's/../0x&, /g' | fold -s -w48 + +*/ + +/// *Current* database V1 schema hash, used for version validation. +/// +/// This value is compared against the schema hash stored in the metadata record to detect schema +/// drift without a corresponding version bump. +pub(crate) const DB_SCHEMA_V1_HASH: [u8; 32] = [ + 0xbc, 0x13, 0x52, 0x47, 0xb4, 0x6b, 0xb4, 0x6a, 0x4a, 0x97, 0x1e, 0x4c, 0x27, 0x07, 0x82, 0x6f, + 0x80, 0x95, 0xe6, 0x62, 0xb6, 0x91, 0x9d, 0x28, 0x87, 0x2c, 0x71, 0xb6, 0xbd, 0x67, 0x65, 0x93, +]; + +/// *Current* database V1 version. +pub(crate) const DB_VERSION_V1: DbVersion = DbVersion { + major: 1, + minor: 0, + patch: 0, +}; + +/// [`DbCore`] capability implementation for [`DbV1`]. +/// +/// This trait exposes lifecycle operations and a high-level status indicator. +#[async_trait] +impl DbCore for DbV1 { + fn status(&self) -> StatusType { + self.status() + } + + async fn shutdown(&self) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Closing); + + if let Some(handle) = &self.db_handler { + let timeout = tokio::time::sleep(Duration::from_secs(5)); + timeout.await; + // TODO: Check if handle is returned else abort + handle.abort(); + } + let _ = self.clean_trailing().await; + if let Err(e) = self.env.sync(true) { + warn!("LMDB fsync before close failed: {e}"); + } + Ok(()) + } +} + +/// Zaino’s Finalised State database V1. +/// +/// This type owns an LMDB [`Environment`] and a fixed set of named databases representing the V1 +/// schema. It implements the capability traits used by the rest of the chain indexer. +/// +/// Data is stored per-height in “best chain” order and is validated (checksums and continuity) +/// before being treated as reliable for downstream reads. +#[derive(Debug)] +pub(crate) struct DbV1 { + /// Shared LMDB environment. + env: Arc, + + /// Block headers: `Height` -> `StoredEntryVar` + /// + /// Stored per-block, in order. + headers: Database, + + /// Txids: `Height` -> `StoredEntryVar` + /// + /// Stored per-block, in order. + txids: Database, + + /// Transparent: `Height` -> `StoredEntryVar>` + /// + /// Stored per-block, in order. + transparent: Database, + + /// Sapling: `Height` -> `StoredEntryVar>` + /// + /// Stored per-block, in order. + sapling: Database, + + /// Orchard: `Height` -> `StoredEntryVar>` + /// + /// Stored per-block, in order. + orchard: Database, + + /// Block commitment tree data: `Height` -> `StoredEntryFixed>` + /// + /// Stored per-block, in order. + commitment_tree_data: Database, + + /// Heights: `Hash` -> `StoredEntryFixed` + /// + /// Used for hash based fetch of the best chain (and random access). + heights: Database, + + /// Spent outpoints: `Outpoint` -> `StoredEntryFixed>` + /// + /// Used to check spent status of given outpoints, retuning spending tx. + #[cfg(feature = "transparent_address_history_experimental")] + spent: Database, + + /// Transparent address history: `AddrScript` -> duplicate values of `StoredEntryFixed`. + /// + /// Stored as an LMDB `DUP_SORT | DUP_FIXED` database keyed by address script bytes. Each duplicate + /// value is a fixed-size entry encoding one address event (mined output or spending input), + /// including flags and checksum. + /// + /// Used to search all transparent address indexes (txids, utxos, balances, deltas) + #[cfg(feature = "transparent_address_history_experimental")] + address_history: Database, + + /// Metadata: singleton entry "metadata" -> `StoredEntryFixed` + metadata: Database, + + /// Contiguous **water-mark**: every height ≤ `validated_tip` is known-good. + /// + /// Wrapped in an `Arc` so the background validator and any foreground tasks + /// all see (and update) the **same** atomic. + validated_tip: Arc, + + /// Heights **above** the tip that have also been validated. + /// + /// Whenever the next consecutive height is inserted we pop it + /// out of this set and bump `validated_tip`, so the map never + /// grows beyond the number of “holes” in the sequence. + validated_set: DashSet, + + /// Background validator / maintenance task handle. + db_handler: Option>, + + /// ZainoDB status. + status: AtomicStatus, + + /// BlockCache config data. + config: BlockCacheConfig, +} + +/// Inherent implementation for [`DbV1`]. +/// +/// This block contains: +/// - environment / database setup (`spawn`, `open_or_create_db`, schema checks), +/// - background validation task management, +/// - write/delete operations for finalised blocks, +/// - validated read fetchers used by the capability trait implementations, and +/// - internal validation / indexing helpers. +impl DbV1 { + /// Spawns a new [`DbV1`] and opens (or creates) the LMDB environment for the configured network. + /// + /// This method: + /// - chooses a versioned path suffix (`...//v1`), + /// - configures LMDB map size and reader slots, + /// - opens or creates all V1 named databases, + /// - validates or initializes the `"metadata"` record (schema hash + version), and + /// - spawns the background validator / maintenance task. + pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { + info!("Launching ZainoDB"); + + // Prepare database details and path. + let db_size_bytes = config.storage.database.size.to_byte_count(); + let db_path_dir = match config.network.to_zebra_network().kind() { + NetworkKind::Mainnet => "mainnet", + NetworkKind::Testnet => "testnet", + NetworkKind::Regtest => "regtest", + }; + let db_path = config.storage.database.path.join(db_path_dir).join("v1"); + if !db_path.exists() { + fs::create_dir_all(&db_path)?; + } + + // Check system rescources to set max db reeaders, clamped between 512 and 4096. + let cpu_cnt = std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(4); + + // Sets LMDB max_readers based on CPU count (cpu * 32), clamped between 512 and 4096. + // Allows high async read concurrency while keeping memory use low (~192B per slot). + // The 512 min ensures reasonable capacity even on low-core systems. + let max_readers = u32::try_from((cpu_cnt * 32).clamp(512, 4096)) + .expect("max_readers was clamped to fit in u32"); + + // Open LMDB environment and set environmental details. + let env = Environment::new() + .set_max_dbs(12) + .set_map_size(db_size_bytes) + .set_max_readers(max_readers) + .set_flags(EnvironmentFlags::NO_TLS | EnvironmentFlags::NO_READAHEAD) + .open(&db_path)?; + + // Open individual LMDB DBs. + let headers = + Self::open_or_create_db(&env, "headers_1_0_0", DatabaseFlags::empty()).await?; + let txids = Self::open_or_create_db(&env, "txids_1_0_0", DatabaseFlags::empty()).await?; + let transparent = + Self::open_or_create_db(&env, "transparent_1_0_0", DatabaseFlags::empty()).await?; + let sapling = + Self::open_or_create_db(&env, "sapling_1_0_0", DatabaseFlags::empty()).await?; + let orchard = + Self::open_or_create_db(&env, "orchard_1_0_0", DatabaseFlags::empty()).await?; + let commitment_tree_data = + Self::open_or_create_db(&env, "commitment_tree_data_1_0_0", DatabaseFlags::empty()) + .await?; + let hashes = Self::open_or_create_db(&env, "hashes_1_0_0", DatabaseFlags::empty()).await?; + + let metadata = Self::open_or_create_db(&env, "metadata", DatabaseFlags::empty()).await?; + + // Create the DbV1 instance. We declare the variable in the outer scope and + // initialise it in the two cfg arms so `zaino_db` is available afterwards. + let mut zaino_db: Self; + + #[cfg(feature = "transparent_address_history_experimental")] + { + let spent = + Self::open_or_create_db(&env, "spent_1_0_0", DatabaseFlags::empty()).await?; + + let address_history = Self::open_or_create_db( + &env, + "address_history_1_0_0", + DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED, + ) + .await?; + + zaino_db = Self { + env: Arc::new(env), + headers, + txids, + transparent, + sapling, + orchard, + commitment_tree_data, + heights: hashes, + spent, + address_history, + metadata, + validated_tip: Arc::new(AtomicU32::new(0)), + validated_set: DashSet::new(), + db_handler: None, + status: AtomicStatus::new(StatusType::Spawning), + config: config.clone(), + }; + } + + #[cfg(not(feature = "transparent_address_history_experimental"))] + { + zaino_db = Self { + env: Arc::new(env), + headers, + txids, + transparent, + sapling, + orchard, + commitment_tree_data, + heights: hashes, + metadata, + validated_tip: Arc::new(AtomicU32::new(0)), + validated_set: DashSet::new(), + db_handler: None, + status: AtomicStatus::new(StatusType::Spawning), + config: config.clone(), + }; + } + + // Validate (or initialise) the metadata entry before we touch any tables. + zaino_db.check_schema_version().await?; + + // Spawn handler task to perform background validation and trailing tx cleanup. + zaino_db.spawn_handler().await?; + + Ok(zaino_db) + } + + /// Try graceful shutdown, fall back to abort after a timeout. + pub(crate) async fn close(&mut self) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Closing); + + if let Some(mut handle) = self.db_handler.take() { + let timeout = tokio::time::sleep(Duration::from_secs(5)); + tokio::pin!(timeout); + + tokio::select! { + res = &mut handle => { + match res { + Ok(_) => {} + Err(e) if e.is_cancelled() => {} + Err(e) => warn!("background task ended with error: {e:?}"), + } + } + _ = &mut timeout => { + warn!("background task didn’t exit in time – aborting"); + handle.abort(); + } + } + } + + let _ = self.clean_trailing().await; + if let Err(e) = self.env.sync(true) { + warn!("LMDB fsync before close failed: {e}"); + } + Ok(()) + } + + /// Returns the status of ZainoDB. + pub(crate) fn status(&self) -> StatusType { + self.status.load() + } + + /// Waits until the DB reaches [`StatusType::Ready`]. + /// + /// NOTE: This does not currently backpressure on LMDB reader availability. + /// + /// TODO: check db for free readers and wait if busy. + pub(crate) async fn wait_until_ready(&self) { + let mut ticker = interval(Duration::from_millis(100)); + ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + ticker.tick().await; + if self.status.load() == StatusType::Ready { + break; + } + } + } + + // *** Internal Control Methods *** + + /// Spawns the background validator / maintenance task. + /// + /// The task runs: + /// - **Startup:** full validation passes (`initial_spent_scan`, `initial_address_history_scan`, + /// `initial_block_scan`). + /// - **Steady state:** periodically attempts to validate the next height after `validated_tip`. + /// Separately, it performs periodic trailing-reader cleanup via `clean_trailing()`. + async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { + // Clone everything the task needs so we can move it into the async block. + let zaino_db = Self { + env: Arc::clone(&self.env), + headers: self.headers, + txids: self.txids, + transparent: self.transparent, + sapling: self.sapling, + orchard: self.orchard, + commitment_tree_data: self.commitment_tree_data, + heights: self.heights, + #[cfg(feature = "transparent_address_history_experimental")] + spent: self.spent, + #[cfg(feature = "transparent_address_history_experimental")] + address_history: self.address_history, + metadata: self.metadata, + validated_tip: Arc::clone(&self.validated_tip), + validated_set: self.validated_set.clone(), + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + + let handle = tokio::spawn({ + let zaino_db = zaino_db; + async move { + // *** initial validation *** + zaino_db.status.store(StatusType::Syncing); + + #[cfg(feature = "transparent_address_history_experimental")] + { + let (r1, r2, r3) = tokio::join!( + zaino_db.initial_spent_scan(), + zaino_db.initial_address_history_scan(), + zaino_db.initial_block_scan(), + ); + + for (desc, result) in [ + ("spent scan", r1), + ("addrhist scan", r2), + ("block scan", r3), + ] { + if let Err(e) = result { + error!("initial {desc} failed: {e}"); + zaino_db.status.store(StatusType::CriticalError); + // TODO: Handle error better? - Return invalid block error from validate? + return; + } + } + } + #[cfg(not(feature = "transparent_address_history_experimental"))] + { + if let Err(e) = zaino_db.initial_block_scan().await { + error!("initial block scan failed: {e}"); + zaino_db.status.store(StatusType::CriticalError); + return; + } + } + + info!( + "initial validation complete – tip={}", + zaino_db.validated_tip.load(Ordering::Relaxed) + ); + zaino_db.status.store(StatusType::Ready); + + // *** steady-state loop *** + let mut maintenance = interval(Duration::from_secs(60)); + + loop { + // Check for closing status. + if zaino_db.status.load() == StatusType::Closing { + break; + } + // try to validate the next consecutive block. + let next_h = zaino_db.validated_tip.load(Ordering::Acquire) + 1; + let next_height = match Height::try_from(next_h) { + Ok(h) => h, + Err(_) => { + warn!("height overflow – validated_tip too large"); + zaino_db.zaino_db_handler_sleep(&mut maintenance).await; + continue; + } + }; + + // Fetch hash of `next_h` from Heights. + let hkey = match next_height.to_bytes() { + Ok(bytes) => bytes, + Err(e) => { + warn!("Failed to serialize height {}: {}", next_height, e); + zaino_db.zaino_db_handler_sleep(&mut maintenance).await; + continue; + } + }; + + let hash_opt = (|| -> Option { + let ro = zaino_db.env.begin_ro_txn().ok()?; + let bytes = ro.get(zaino_db.headers, &hkey).ok()?; + let entry = StoredEntryVar::::deserialize(bytes).ok()?; + Some(*entry.inner().index().hash()) + })(); + + if let Some(hash) = hash_opt { + if let Err(e) = zaino_db.validate_block_blocking(next_height, hash) { + warn!("{e}"); + } + // Immediately loop – maybe the chain has more blocks ready. + continue; + } + + zaino_db.zaino_db_handler_sleep(&mut maintenance).await; + } + } + }); + + self.db_handler = Some(handle); + Ok(()) + } + + /// Helper method to wait for the next loop iteration or perform maintenance. + async fn zaino_db_handler_sleep(&self, maintenance: &mut tokio::time::Interval) { + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(5)) => {}, + _ = maintenance.tick() => { + if let Err(e) = self.clean_trailing().await { + warn!("clean_trailing failed: {}", e); + } + } + } + } + + /// Validates every stored spent-outpoint entry (`Outpoint` -> `TxLocation`) by checksum. + #[cfg(feature = "transparent_address_history_experimental")] + async fn initial_spent_scan(&self) -> Result<(), FinalisedStateError> { + let env = self.env.clone(); + let spent = self.spent; + + tokio::task::spawn_blocking(move || { + let ro = env.begin_ro_txn()?; + let mut cursor = ro.open_ro_cursor(spent)?; + + for (key_bytes, val_bytes) in cursor.iter() { + let entry = StoredEntryFixed::::from_bytes(val_bytes).map_err(|e| { + FinalisedStateError::Custom(format!("corrupt spent entry: {e}")) + })?; + + if !entry.verify(key_bytes) { + return Err(FinalisedStateError::Custom( + "spent record checksum mismatch".into(), + )); + } + } + + Ok(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))? + } + + /// Validates every stored address-history record (`AddrScript` duplicates of `AddrEventBytes`) by checksum. + #[cfg(feature = "transparent_address_history_experimental")] + async fn initial_address_history_scan(&self) -> Result<(), FinalisedStateError> { + let env = self.env.clone(); + let address_history = self.address_history; + + tokio::task::spawn_blocking(move || { + let ro = env.begin_ro_txn()?; + let mut cursor = ro.open_ro_cursor(address_history)?; + + for (addr_bytes, record_bytes) in cursor.iter() { + let entry = + StoredEntryFixed::::from_bytes(record_bytes).map_err(|e| { + FinalisedStateError::Custom(format!("corrupt addrhist entry: {e}")) + })?; + + if !entry.verify(addr_bytes) { + return Err(FinalisedStateError::Custom( + "addrhist record checksum mismatch".into(), + )); + } + } + + Ok(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("spawn_blocking failed: {e}")))? + } + + /// Scans the whole finalised chain once at start-up and validates every block by checksum and continuity. + async fn initial_block_scan(&self) -> Result<(), FinalisedStateError> { + let zaino_db = Self { + env: Arc::clone(&self.env), + headers: self.headers, + txids: self.txids, + transparent: self.transparent, + sapling: self.sapling, + orchard: self.orchard, + commitment_tree_data: self.commitment_tree_data, + heights: self.heights, + #[cfg(feature = "transparent_address_history_experimental")] + spent: self.spent, + #[cfg(feature = "transparent_address_history_experimental")] + address_history: self.address_history, + metadata: self.metadata, + validated_tip: Arc::clone(&self.validated_tip), + validated_set: self.validated_set.clone(), + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + + tokio::task::spawn_blocking(move || { + let ro = zaino_db.env.begin_ro_txn()?; + let mut cursor = ro.open_ro_cursor(zaino_db.heights)?; + + for (hash_bytes, height_entry_bytes) in cursor.iter() { + let hash = BlockHash::from_bytes(hash_bytes)?; + let height = *StoredEntryFixed::::from_bytes(height_entry_bytes) + .map_err(|e| FinalisedStateError::Custom(format!("corrupt height entry: {e}")))? + .inner(); + + zaino_db.validate_block_blocking(height, hash)? + } + + Ok(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("spawn_blocking failed: {e}")))? + } + + /// Clears stale reader slots by opening and closing a read transaction. + async fn clean_trailing(&self) -> Result<(), FinalisedStateError> { + let txn = self.env.begin_ro_txn()?; + drop(txn); + Ok(()) + } + + /// Opens an lmdb database if present else creates a new one. + async fn open_or_create_db( + env: &Environment, + name: &str, + flags: DatabaseFlags, + ) -> Result { + match env.open_db(Some(name)) { + Ok(db) => Ok(db), + Err(lmdb::Error::NotFound) => env + .create_db(Some(name), flags) + .map_err(FinalisedStateError::LmdbError), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + } +} + +impl Drop for DbV1 { + fn drop(&mut self) { + if let Some(handle) = self.db_handler.take() { + handle.abort(); + } + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/block_core.rs b/zaino-state/src/chain_index/finalised_state/db/v1/block_core.rs new file mode 100644 index 000000000..f3f206649 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/block_core.rs @@ -0,0 +1,377 @@ +//! ZainoDB::V1 core block indexing functionality. + +use super::*; + +/// [`BlockCoreExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to block headers, txid lists, and transaction location mapping. +#[async_trait] +impl BlockCoreExt for DbV1 { + async fn get_block_header( + &self, + height: Height, + ) -> Result { + self.get_block_header_data(height).await + } + + async fn get_block_range_headers( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_headers(start, end).await + } + + async fn get_block_txids(&self, height: Height) -> Result { + self.get_block_txids(height).await + } + + async fn get_block_range_txids( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_txids(start, end).await + } + + async fn get_txid( + &self, + tx_location: TxLocation, + ) -> Result { + self.get_txid(tx_location).await + } + + async fn get_tx_location( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError> { + self.get_tx_location(txid).await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Fetch block header data by height. + pub(super) async fn get_block_header_data( + &self, + height: Height, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.headers, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "header data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let entry = StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))?; + + Ok(*entry.inner()) + }) + } + + /// Fetches block headers for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_headers( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.headers) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "header data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryVar::::from_bytes(&bytes) + .map(|e| *e.inner()) + .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}"))) + }) + .collect() + } + + /// Fetch the txid bytes for a given TxLocation. + /// + /// This uses an optimized lookup without decoding the full TxidList. + /// + /// NOTE: This method currently ignores the txid version byte for efficiency. + async fn get_txid( + &self, + tx_location: TxLocation, + ) -> Result { + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + use std::io::Cursor; + + let height = Height::try_from(tx_location.block_height()) + .map_err(|e| FinalisedStateError::Custom(e.to_string()))?; + let height_bytes = height.to_bytes()?; + + let raw = match txn.get(self.txids, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "txid data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let mut cursor = Cursor::new(raw); + + // Parse StoredEntryVar: + + // Skip [0] StoredEntry version + cursor.set_position(1); + + // Read CompactSize: length of serialized body + let _body_len = CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("compact size read error: {e}")) + })?; + + // Read [1] TxidList Record version (skip 1 byte) + cursor.set_position(cursor.position() + 1); + + // Read CompactSize: number of txids + let list_len = CompactSize::read(&mut cursor) + .map_err(|e| FinalisedStateError::Custom(format!("txid list len error: {e}")))?; + + let idx = tx_location.tx_index() as usize; + if idx >= list_len as usize { + return Err(FinalisedStateError::Custom( + "tx_index out of range in txid list".to_string(), + )); + } + + // Each txid entry is: [0] version tag + [1..32] txid + + // So we skip idx * 33 bytes to reach the start of the correct Hash + let offset = cursor.position() + (idx as u64) * TransactionHash::VERSIONED_LEN as u64; + cursor.set_position(offset); + + // Read [0] Txid Record version (skip 1 byte) + cursor.set_position(cursor.position() + 1); + + // Then read 32 bytes for the txid + let mut txid_bytes = [0u8; TransactionHash::ENCODED_LEN]; + cursor + .read_exact(&mut txid_bytes) + .map_err(|e| FinalisedStateError::Custom(format!("txid read error: {e}")))?; + + Ok(TransactionHash::from(txid_bytes)) + }) + } + + /// Fetch block txids by height. + async fn get_block_txids(&self, height: Height) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.txids, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "txid data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry: StoredEntryVar = StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))?; + + Ok(entry.inner().clone()) + }) + } + + /// Fetches block txids for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_txids( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.txids) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "txid data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryVar::::from_bytes(&bytes) + .map(|e| e.inner().clone()) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}"))) + }) + .collect() + } + + // Fetch the TxLocation for the given txid, transaction data is indexed by TxLocation internally. + async fn get_tx_location( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError> { + if let Some(index) = tokio::task::block_in_place(|| self.find_txid_index_blocking(txid))? { + Ok(Some(index)) + } else { + Ok(None) + } + } + + // *** Internal DB methods *** + + /// Finds a TxLocation [block_height, tx_index] from a given txid. + /// Used for Txid based lookup in transaction DBs. + /// + /// WARNING: This is a blocking function and **MUST** be called within a blocking thread / task. + pub(super) fn find_txid_index_blocking( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError> { + let ro = self.env.begin_ro_txn()?; + let mut cursor = ro.open_ro_cursor(self.txids)?; + + let target: [u8; 32] = (*txid).into(); + + for (height_bytes, stored_bytes) in cursor.iter() { + if let Some(tx_index) = + Self::find_txid_position_in_stored_txid_list(&target, stored_bytes) + { + let height = Height::from_bytes(height_bytes)?; + return Ok(Some(TxLocation::new(height.0, tx_index as u16))); + } + } + Ok(None) + } + + /// Efficiently scans a raw `StoredEntryVar` buffer to locate the index + /// of a given transaction ID without full deserialization. + /// + /// The format is: + /// - 1 byte: StoredEntryVar version + /// - CompactSize: length of the item + /// - 1 byte: TxidList version + /// - CompactSize: number of the item + /// - N x (1 byte + 32 bytes): tagged Hash items + /// - 32 bytes: checksum + /// + /// # Arguments + /// - `target_txid`: A `[u8; 32]` representing the transaction ID to match. + /// - `stored`: Raw LMDB byte slice from a `StoredEntryVar`. + /// + /// # Returns + /// - `Some(index)` if a matching txid is found + /// - `None` if the format is invalid or no match + #[inline] + fn find_txid_position_in_stored_txid_list( + target_txid: &[u8; 32], + stored: &[u8], + ) -> Option { + const CHECKSUM_LEN: usize = 32; + + // Check is at least sotred version + compactsize + checksum + // else return none. + if stored.len() < TransactionHash::VERSION_TAG_LEN + 8 + CHECKSUM_LEN { + return None; + } + + let mut cursor = &stored[TransactionHash::VERSION_TAG_LEN..]; + let item_len = CompactSize::read(&mut cursor).ok()? as usize; + if cursor.len() < item_len + CHECKSUM_LEN { + return None; + } + + let (_record_version, mut remaining) = cursor.split_first()?; + let vec_len = CompactSize::read(&mut remaining).ok()? as usize; + + for idx in 0..vec_len { + // Each entry is 1-byte tag + 32-byte hash + let (_tag, rest) = remaining.split_first()?; + let hash_bytes: &[u8; 32] = rest.get(..32)?.try_into().ok()?; + if hash_bytes == target_txid { + return Some(idx); + } + remaining = &rest[32..]; + } + + None + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/block_shielded.rs b/zaino-state/src/chain_index/finalised_state/db/v1/block_shielded.rs new file mode 100644 index 000000000..0229aeb30 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/block_shielded.rs @@ -0,0 +1,604 @@ +//! ZainoDB::V1 shielded block indexing functionality. + +use super::*; + +/// [`BlockShieldedExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to Sapling / Orchard compact transaction data and per-block commitment tree +/// metadata. +#[async_trait] +impl BlockShieldedExt for DbV1 { + async fn get_sapling( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.get_sapling(tx_location).await + } + + async fn get_block_sapling( + &self, + height: Height, + ) -> Result { + self.get_block_sapling(height).await + } + + async fn get_block_range_sapling( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_sapling(start, end).await + } + + async fn get_orchard( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.get_orchard(tx_location).await + } + + async fn get_block_orchard( + &self, + height: Height, + ) -> Result { + self.get_block_orchard(height).await + } + + async fn get_block_range_orchard( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_orchard(start, end).await + } + + async fn get_block_commitment_tree_data( + &self, + height: Height, + ) -> Result { + self.get_block_commitment_tree_data(height).await + } + + async fn get_block_range_commitment_tree_data( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_commitment_tree_data(start, end).await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Fetch the serialized SaplingCompactTx for the given TxLocation, if present. + /// + /// This uses an optimized lookup without decoding the full TxidList. + async fn get_sapling( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + use std::io::{Cursor, Read}; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let height = Height::try_from(tx_location.block_height()) + .map_err(|e| FinalisedStateError::Custom(e.to_string()))?; + let height_bytes = height.to_bytes()?; + + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "sapling data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let mut cursor = Cursor::new(raw); + + // Skip [0] StoredEntry version + cursor.set_position(1); + + // Read CompactSize: length of serialized body + CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("compact size read error: {e}")) + })?; + + // Skip SaplingTxList version byte + cursor.set_position(cursor.position() + 1); + + // Read CompactSize: number of entries + let list_len = CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("sapling tx list len error: {e}")) + })?; + + let idx = tx_location.tx_index() as usize; + if idx >= list_len as usize { + return Err(FinalisedStateError::Custom( + "tx_index out of range in sapling tx list".to_string(), + )); + } + + // Skip preceding entries + for _ in 0..idx { + Self::skip_opt_sapling_entry(&mut cursor) + .map_err(|e| FinalisedStateError::Custom(format!("skip entry error: {e}")))?; + } + + let start = cursor.position(); + + // Peek presence flag + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence).map_err(|e| { + FinalisedStateError::Custom(format!("failed to read Option tag: {e}")) + })?; + + if presence[0] == 0 { + return Ok(None); + } else if presence[0] != 1 { + return Err(FinalisedStateError::Custom(format!( + "invalid Option tag: {}", + presence[0] + ))); + } + + // Rewind to include tag in returned bytes + cursor.set_position(start); + Self::skip_opt_sapling_entry(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("skip entry error (second pass): {e}")) + })?; + + let end = cursor.position(); + + Ok(Some(SaplingCompactTx::from_bytes( + &raw[start as usize..end as usize], + )?)) + }) + } + + /// Fetch block sapling transaction data by height. + async fn get_block_sapling( + &self, + height: Height, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "sapling data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry: StoredEntryVar = StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("sapling decode error: {e}")))?; + + Ok(entry.inner().clone()) + }) + } + + /// Fetches block sapling tx data for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_sapling( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.sapling) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "sapling data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryVar::::from_bytes(&bytes) + .map(|e| e.inner().clone()) + .map_err(|e| FinalisedStateError::Custom(format!("sapling decode error: {e}"))) + }) + .collect() + } + + /// Fetch the serialized OrchardCompactTx for the given TxLocation, if present. + /// + /// This uses an optimized lookup without decoding the full TxidList. + async fn get_orchard( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + use std::io::{Cursor, Read}; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let height = Height::try_from(tx_location.block_height()) + .map_err(|e| FinalisedStateError::Custom(e.to_string()))?; + let height_bytes = height.to_bytes()?; + + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "orchard data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let mut cursor = Cursor::new(raw); + + // Skip [0] StoredEntry version + cursor.set_position(1); + + // Read CompactSize: length of serialized body + CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("compact size read error: {e}")) + })?; + + // Skip OrchardTxList version byte + cursor.set_position(cursor.position() + 1); + + // Read CompactSize: number of entries + let list_len = CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("orchard tx list len error: {e}")) + })?; + + let idx = tx_location.tx_index() as usize; + if idx >= list_len as usize { + return Err(FinalisedStateError::Custom( + "tx_index out of range in orchard tx list".to_string(), + )); + } + + // Skip preceding entries + for _ in 0..idx { + Self::skip_opt_orchard_entry(&mut cursor) + .map_err(|e| FinalisedStateError::Custom(format!("skip entry error: {e}")))?; + } + + let start = cursor.position(); + + // Peek presence flag + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence).map_err(|e| { + FinalisedStateError::Custom(format!("failed to read Option tag: {e}")) + })?; + + if presence[0] == 0 { + return Ok(None); + } else if presence[0] != 1 { + return Err(FinalisedStateError::Custom(format!( + "invalid Option tag: {}", + presence[0] + ))); + } + + // Rewind to include presence flag in output + cursor.set_position(start); + Self::skip_opt_orchard_entry(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("skip entry error (second pass): {e}")) + })?; + + let end = cursor.position(); + + Ok(Some(OrchardCompactTx::from_bytes( + &raw[start as usize..end as usize], + )?)) + }) + } + + /// Fetch block orchard transaction data by height. + async fn get_block_orchard( + &self, + height: Height, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "orchard data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry: StoredEntryVar = StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("orchard decode error: {e}")))?; + + Ok(entry.inner().clone()) + }) + } + + /// Fetches block orchard tx data for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_orchard( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.orchard) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "orchard data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryVar::::from_bytes(&bytes) + .map(|e| e.inner().clone()) + .map_err(|e| FinalisedStateError::Custom(format!("orchard decode error: {e}"))) + }) + .collect() + } + + /// Fetch block commitment tree data by height. + async fn get_block_commitment_tree_data( + &self, + height: Height, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.commitment_tree_data, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "commitment tree data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry = StoredEntryFixed::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) + })?; + + Ok(entry.item) + }) + } + + /// Fetches block commitment tree data for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_commitment_tree_data( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.commitment_tree_data) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "commitment tree data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryFixed::::from_bytes(&bytes) + .map(|e| e.item) + .map_err(|e| { + FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) + }) + }) + .collect() + } + + // *** Internal DB methods *** + + /// Skips one `Option` from the current cursor position. + /// + /// The input should be a cursor over just the inner item "list" bytes of a: + /// - `StoredEntryVar` + /// + /// Advances past: + /// - 1 byte `0x00` if None, or + /// - 1 + 1 + value + spends + outputs if Some (presence + version + body) + /// + /// This is faster than deserialising the whole struct as we only read the compact sizes. + #[inline] + fn skip_opt_sapling_entry(cursor: &mut std::io::Cursor<&[u8]>) -> io::Result<()> { + // Read presence byte + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence)?; + + if presence[0] == 0 { + return Ok(()); + } else if presence[0] != 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid Option tag: {}", presence[0]), + )); + } + + // Read version + cursor.read_exact(&mut [0u8; 1])?; + + // Read value: Option + let mut value_tag = [0u8; 1]; + cursor.read_exact(&mut value_tag)?; + if value_tag[0] == 1 { + // Some(i64): read 8 bytes + cursor.set_position(cursor.position() + 8); + } else if value_tag[0] != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid Option tag: {}", value_tag[0]), + )); + } + + // Read number of spends (CompactSize) + let spend_len = CompactSize::read(&mut *cursor)? as usize; + let spend_skip = spend_len * CompactSaplingSpend::VERSIONED_LEN; + cursor.set_position(cursor.position() + spend_skip as u64); + + // Read number of outputs (CompactSize) + let output_len = CompactSize::read(&mut *cursor)? as usize; + let output_skip = output_len * CompactSaplingOutput::VERSIONED_LEN; + cursor.set_position(cursor.position() + output_skip as u64); + + Ok(()) + } + + /// Skips one `Option` from the current cursor position. + /// + /// The input should be a cursor over just the inner item "list" bytes of a: + /// - `StoredEntryVar` + /// + /// Advances past: + /// - 1 byte `0x00` if None, or + /// - 1 + 1 + value + actions if Some (presence + version + body) + /// + /// This is faster than deserialising the whole struct as we only read the compact sizes. + #[inline] + fn skip_opt_orchard_entry(cursor: &mut std::io::Cursor<&[u8]>) -> io::Result<()> { + // Read presence byte + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence)?; + + if presence[0] == 0 { + return Ok(()); + } else if presence[0] != 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid Option tag: {}", presence[0]), + )); + } + + // Read version + cursor.read_exact(&mut [0u8; 1])?; + + // Read value: Option + let mut value_tag = [0u8; 1]; + cursor.read_exact(&mut value_tag)?; + if value_tag[0] == 1 { + // Some(i64): read 8 bytes + cursor.set_position(cursor.position() + 8); + } else if value_tag[0] != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid Option tag: {}", value_tag[0]), + )); + } + + // Read number of actions (CompactSize) + let action_len = CompactSize::read(&mut *cursor)? as usize; + + // Skip actions: each is 1-byte version + 148-byte body + let action_skip = action_len * CompactOrchardAction::VERSIONED_LEN; + cursor.set_position(cursor.position() + action_skip as u64); + + Ok(()) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/block_transparent.rs b/zaino-state/src/chain_index/finalised_state/db/v1/block_transparent.rs new file mode 100644 index 000000000..6db2aa22f --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/block_transparent.rs @@ -0,0 +1,257 @@ +//! ZainoDB::V1 transparent block indexing functionality. + +use super::*; + +/// [`BlockTransparentExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to transparent compact transaction data at both per-transaction and per-block +/// granularity. +#[async_trait] +impl BlockTransparentExt for DbV1 { + async fn get_transparent( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.get_transparent(tx_location).await + } + + async fn get_block_transparent( + &self, + height: Height, + ) -> Result { + self.get_block_transparent(height).await + } + + async fn get_block_range_transparent( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.get_block_range_transparent(start, end).await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Fetch the serialized TransparentCompactTx for the given TxLocation, if present. + /// + /// This uses an optimized lookup without decoding the full TxidList. + async fn get_transparent( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + use std::io::{Cursor, Read}; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let height = Height::try_from(tx_location.block_height()) + .map_err(|e| FinalisedStateError::Custom(e.to_string()))?; + let height_bytes = height.to_bytes()?; + + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "transparent data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let mut cursor = Cursor::new(raw); + + // Skip [0] StoredEntry version + cursor.set_position(1); + + // Read CompactSize: length of serialized body + let _body_len = CompactSize::read(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("compact size read error: {e}")) + })?; + + // Read [1] TransparentTxList Record version (skip 1 byte) + cursor.set_position(cursor.position() + 1); + + // Read CompactSize: number of records + let list_len = CompactSize::read(&mut cursor) + .map_err(|e| FinalisedStateError::Custom(format!("txid list len error: {e}")))?; + + let idx = tx_location.tx_index() as usize; + if idx >= list_len as usize { + return Err(FinalisedStateError::Custom( + "tx_index out of range in transparent tx data".to_string(), + )); + } + + // Skip preceding entries + for _ in 0..idx { + Self::skip_opt_transparent_entry(&mut cursor) + .map_err(|e| FinalisedStateError::Custom(format!("skip entry error: {e}")))?; + } + + let start = cursor.position(); + + // Peek at the 1-byte presence flag + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence).map_err(|e| { + FinalisedStateError::Custom(format!("failed to read Option tag: {e}")) + })?; + + if presence[0] == 0 { + return Ok(None); + } else if presence[0] != 1 { + return Err(FinalisedStateError::Custom(format!( + "invalid Option tag: {}", + presence[0] + ))); + } + + cursor.set_position(start); + // Skip this entry to compute length + Self::skip_opt_transparent_entry(&mut cursor).map_err(|e| { + FinalisedStateError::Custom(format!("skip entry error (second pass): {e}")) + })?; + + let end = cursor.position(); + let slice = &raw[start as usize..end as usize]; + + Ok(Some(TransparentCompactTx::from_bytes(slice)?)) + }) + } + + /// Fetch block transparent transaction data by height. + async fn get_block_transparent( + &self, + height: Height, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "transparent data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry: StoredEntryVar = StoredEntryVar::from_bytes(raw) + .map_err(|e| { + FinalisedStateError::Custom(format!("transparent decode error: {e}")) + })?; + + Ok(entry.inner().clone()) + }) + } + + /// Fetches block transparent tx data for the given height range. + /// + /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. + async fn get_block_range_transparent( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + + self.validate_block_range(start, end).await?; + let start_bytes = start.to_bytes()?; + let end_bytes = end.to_bytes()?; + + let raw_entries = tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let mut raw_entries = Vec::new(); + let mut cursor = match txn.open_ro_cursor(self.transparent) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "transparent data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + for (k, v) in cursor.iter_from(&start_bytes[..]) { + if k > &end_bytes[..] { + break; + } + raw_entries.push(v.to_vec()); + } + Ok::>, FinalisedStateError>(raw_entries) + })?; + + raw_entries + .into_iter() + .map(|bytes| { + StoredEntryVar::::from_bytes(&bytes) + .map(|e| e.inner().clone()) + .map_err(|e| { + FinalisedStateError::Custom(format!("transparent decode error: {e}")) + }) + }) + .collect() + } + + // *** Internal DB methods *** + + /// Skips one `Option` entry from the current cursor position. + /// + /// The input should be a cursor over just the inner item "list" bytes of a: + /// - `StoredEntryVar` + /// + /// Advances the cursor past either: + /// - 1 byte (`0x00`) if `None`, or + /// - 1 + 1 + vin_size + vout_size if `Some(TransparentCompactTx)` + /// (presence + version + variable vin/vout sections) + /// + /// This is faster than deserialising the whole struct as we only read the compact sizes. + #[inline] + fn skip_opt_transparent_entry(cursor: &mut std::io::Cursor<&[u8]>) -> io::Result<()> { + let _start_pos = cursor.position(); + + // Read 1-byte presence flag + let mut presence = [0u8; 1]; + cursor.read_exact(&mut presence)?; + + if presence[0] == 0 { + return Ok(()); + } else if presence[0] != 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("invalid Option tag: {}", presence[0]), + )); + } + + // Read version (1 byte) + cursor.read_exact(&mut [0u8; 1])?; + + // Read vin_len (CompactSize) + let vin_len = CompactSize::read(&mut *cursor)? as usize; + + // Skip vin entries: each is 1-byte version + 36-byte body + let vin_skip = vin_len * TxInCompact::VERSIONED_LEN; + cursor.set_position(cursor.position() + vin_skip as u64); + + // Read vout_len (CompactSize) + let vout_len = CompactSize::read(&mut *cursor)? as usize; + + // Skip vout entries: each is 1-byte version + 29-byte body + let vout_skip = vout_len * TxOutCompact::VERSIONED_LEN; + cursor.set_position(cursor.position() + vout_skip as u64); + + Ok(()) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/compact_block.rs b/zaino-state/src/chain_index/finalised_state/db/v1/compact_block.rs new file mode 100644 index 000000000..40a310d7d --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/compact_block.rs @@ -0,0 +1,1304 @@ +//! ZainoDB::V1 compact block indexing functionality. + +use super::*; + +/// [`CompactBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + shielded +/// transaction data. +#[async_trait] +impl CompactBlockExt for DbV1 { + async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) + .await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Returns the CompactBlock for the given Height. + async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + // ----- Fetch Header ----- + let raw = match txn.get(self.headers, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let header: BlockHeaderData = *StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))? + .inner(); + + // ----- Fetch Txids ----- + let raw = match txn.get(self.txids, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let txids_stored_entry_var = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))?; + let txids = txids_stored_entry_var.inner().txids(); + + // ----- Fetch Transparent Tx Data ----- + let transparent_stored_entry_var = if pool_types.includes_transparent() { + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("transparent decode error: {e}")) + })?, + ) + } else { + None + }; + let transparent = match transparent_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Fetch Sapling Tx Data ----- + let sapling_stored_entry_var = if pool_types.includes_sapling() { + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("sapling decode error: {e}")) + })?, + ) + } else { + None + }; + let sapling = match sapling_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Fetch Orchard Tx Data ----- + let orchard_stored_entry_var = if pool_types.includes_orchard() { + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("orchard decode error: {e}")) + })?, + ) + } else { + None + }; + let orchard = match orchard_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Construct CompactTx ----- + let vtx: Vec = txids + .iter() + .enumerate() + .filter_map(|(i, txid)| { + let spends = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.spends() + .iter() + .map(|sp| sp.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let outputs = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.outputs() + .iter() + .map(|o| o.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let actions = orchard + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|o| { + o.actions() + .iter() + .map(|a| a.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Omit transactions that have no elements in any requested pool type. + // + // This keeps `vtx` compact (it only contains transactions relevant to the caller’s pool filter), + // but it also means: + // - `vtx.len()` may be smaller than the block transaction count, and + // - transaction indices in `vtx` may be non-contiguous. + // Consumers must use `CompactTx.index` (the original transaction position in the block) rather + // than assuming `vtx` preserves block order densely. + // + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { + return None; + } + + Some(zaino_proto::proto::compact_formats::CompactTx { + index: i as u64, + txid: txid.0.to_vec(), + fee: 0, + spends, + outputs, + actions, + vin, + vout, + }) + }) + .collect(); + + // ----- Fetch Commitment Tree Data ----- + let raw = match txn.get(self.commitment_tree_data, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let commitment_tree_data: CommitmentTreeData = *StoredEntryFixed::from_bytes(raw) + .map_err(|e| { + FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) + })? + .inner(); + + let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), + orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), + }; + + // ----- Construct CompactBlock ----- + Ok(zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height: header.index().height().0 as u64, + hash: header.index().hash().0.to_vec(), + prev_hash: header.index().parent_hash().0.to_vec(), + // Is this safe? + time: header.data().time() as u32, + header: Vec::new(), + vtx, + chain_metadata: Some(chain_metadata), + }) + }) + } + + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// This implementation is designed for high-throughput lightclient serving: + /// - It performs a single cursor-walk over the headers database and keeps all other databases + /// (txids + optional pool-specific tx data + commitment tree data) strictly aligned to the + /// same LMDB key. + /// - It uses *short-lived* read transactions and periodically re-seeks by key, which: + /// - reduces the lifetime of LMDB reader slots, + /// - bounds the amount of data held in the same read snapshot, + /// - and prevents a single long stream from monopolising the environment’s read resources. + /// + /// Ordering / range semantics: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - This function enforces *contiguous heights* in the headers database. Missing heights, key + /// ordering problems, or cursor desynchronisation are treated as internal errors because they + /// indicate database corruption or a violated storage invariant. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// + /// Concurrency model: + /// - Spawns a dedicated blocking task (`spawn_blocking`) which performs LMDB reads and decoding. + /// - Results are pushed into a bounded `mpsc` channel; backpressure is applied if the consumer + /// is slow. + /// + /// Errors: + /// - Database-missing conditions are sent downstream as `tonic::Status::not_found`. + /// - Decode failures, cursor desynchronisation, and invariant violations are sent as + /// `tonic::Status::internal`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + // Do NOT validate the whole requested range up-front here. + // Validate heights on-demand inside the blocking task so we can return + // the stream handle immediately and start sending blocks as they become ready. + // + // Preserve caller ordering: direction is derived from the caller-supplied heights. + let validated_start_height = start_height; + let validated_end_height = end_height; + + let start_key_bytes = validated_start_height.to_bytes()?; + + // Direction is derived from the validated heights. This relies on `validate_block_range` + // preserving input ordering (i.e. not normalising to (min, max)). + let is_ascending = validated_start_height <= validated_end_height; + + // Bounded channel provides backpressure so the blocking task cannot run unbounded ahead of + // the gRPC consumer. + // + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + // Clone everything the blocking task needs so we can move it into the blocking closure. + // This mirrors patterns already used elsewhere in this module. + let zaino_db = Self { + env: Arc::clone(&self.env), + headers: self.headers, + txids: self.txids, + transparent: self.transparent, + sapling: self.sapling, + orchard: self.orchard, + commitment_tree_data: self.commitment_tree_data, + heights: self.heights, + #[cfg(feature = "transparent_address_history_experimental")] + spent: self.spent, + #[cfg(feature = "transparent_address_history_experimental")] + address_history: self.address_history, + metadata: self.metadata, + validated_tip: Arc::clone(&self.validated_tip), + validated_set: self.validated_set.clone(), + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + + tokio::task::spawn_blocking(move || { + /// Maximum number of blocks to stream per LMDB read transaction. + /// + /// The cursor-walk is resumed by re-seeking to the next expected height key. This keeps + /// read transactions short-lived and reduces pressure on LMDB reader slots. + const BLOCKS_PER_READ_TRANSACTION: usize = 1024; + + // ===================================================================================== + // Helper functions + // ===================================================================================== + // + // These helpers keep the main streaming loop readable and ensure that any failure: + // - emits exactly one `tonic::Status` into the stream (best-effort), and then + // - terminates the blocking task. + // + // They intentionally return `Option`/`Result` to allow early-exit with minimal boilerplate. + + /// Send a `tonic::Status` downstream and ignore send errors. + /// + /// A send error means the receiver side has been dropped (e.g. client cancelled the RPC), + /// so the producer should terminate promptly. + fn send_status( + sender: &tokio::sync::mpsc::Sender>, + status: tonic::Status, + ) { + let _ = sender.blocking_send(Err(status)); + } + + /// Open a read-only cursor for `database` inside `txn`. + /// + /// On failure, emits an internal status and returns `None`. + fn open_ro_cursor_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + txn: &'txn lmdb::RoTransaction<'txn>, + database: lmdb::Database, + database_name: &'static str, + ) -> Option> { + match txn.open_ro_cursor(database) { + Ok(cursor) => Some(cursor), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb open_ro_cursor({database_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Position `cursor` exactly at `requested_key` using `MDB_SET_KEY`. + /// + /// Returns the `(key, value)` pair at that key. The returned `key` is expected to equal + /// `requested_key` (the function enforces this). + /// + /// Some LMDB bindings occasionally return `Ok((None, value))` for cursor operations. When + /// that happens: + /// - If `verify_on_none_key` is true, we call `MDB_GET_CURRENT` once to recover and verify + /// the current key. + /// - Otherwise we assume the cursor is correctly positioned and return `(requested_key, value)`. + /// + /// On `NotFound`, emits `not_found_status`. On other failures or verification failure, emits + /// `internal(...)`. In all error cases it returns `None`. + fn cursor_set_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + requested_key: &'txn [u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + verify_on_none_key: bool, + ) -> Option<(&'txn [u8], &'txn [u8])> { + match cursor.get(Some(requested_key), None, lmdb_sys::MDB_SET_KEY) { + Ok((Some(found_key), found_val)) => { + if found_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) returned non-matching key" + )), + ); + None + } else { + Some((found_key, found_val)) + } + } + Ok((None, found_val)) => { + // Some builds / bindings can return None for the key for certain ops. If requested, + // verify the cursor actually landed on the requested key via GET_CURRENT. + if verify_on_none_key { + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) landed on unexpected key: expected {:?}, got {:?}", + requested_key, + recovered_key, + )), + ); + return None; + } + + Some((recovered_key, recovered_val)) + } else { + // Assume SET_KEY success implies match; return the requested key + value. + Some((requested_key, found_val)) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor SET_KEY({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Step the headers cursor using `step_op` and return the next `(key, value)` pair. + /// + /// This is special-cased because the headers cursor is the *driving cursor*; all other + /// cursors must remain aligned to whatever key the headers cursor moves to. + /// + /// Returns: + /// - `Ok(Some((k, v)))` when the cursor moved successfully. + /// - `Ok(None)` when the cursor reached the end (`NotFound`). + /// - `Err(())` when an error status has been emitted and streaming must stop. + #[allow(clippy::complexity)] + fn headers_step_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + headers_cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + ) -> Result, ()> { + match headers_cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => Ok(Some((found_key, found_val))), + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match headers_cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT(headers) failed: {error}" + )), + ); + return Err(()); + } + }; + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal( + "lmdb GET_CURRENT(headers) returned no key".to_string(), + ), + ); + return Err(()); + } + }; + Ok(Some((recovered_key, recovered_val))) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step(headers) failed: {error}" + )), + ); + Err(()) + } + } + } + + /// Step a non-header cursor and enforce that it remains aligned to `expected_key`. + /// + /// The design invariant for this streamer is: + /// - the headers cursor chooses the next key + /// - every other cursor must produce a value at that *same* key (otherwise the per-height + /// databases are inconsistent or a cursor has desynchronised). + /// + /// Returns the value slice for `expected_key` on success. + /// On `NotFound`, emits `not_found_status`. + /// On key mismatch or other errors, emits an internal error. + fn cursor_step_expect_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + expected_key: &[u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + ) -> Option<&'txn [u8]> { + match cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => { + if found_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, found_key + )), + ); + None + } else { + Some(found_val) + } + } + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, recovered_key + )), + ); + None + } else { + Some(recovered_val) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + // ===================================================================================== + // Blocking streaming loop + // ===================================================================================== + + let step_op = if is_ascending { + lmdb_sys::MDB_NEXT + } else { + lmdb_sys::MDB_PREV + }; + + // Contiguous-height enforcement: we expect every emitted block to have exactly this height. + // This catches missing heights and cursor ordering/key-encoding problems early. + let mut expected_height = validated_start_height; + + // Key used to re-seek at the start of each transaction chunk. + // This begins at the start height and advances by exactly one height per emitted block. + let mut next_start_key_bytes: Vec = start_key_bytes; + + loop { + // Stop once we have emitted the inclusive end height. + if is_ascending { + if expected_height > validated_end_height { + return; + } + } else if expected_height < validated_end_height { + return; + } + + // Open a short-lived read transaction for this chunk. + // + // We intentionally drop the transaction regularly to keep reader slots available and + // to avoid holding a single snapshot for very large streams. + let txn = match zaino_db.env.begin_ro_txn() { + Ok(txn) => txn, + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")), + ); + return; + } + }; + + // Open cursors. Headers is the driving cursor; all others must remain key-aligned. + let headers_cursor = + match open_ro_cursor_or_send(&sender, &txn, zaino_db.headers, "headers") { + Some(cursor) => cursor, + None => return, + }; + + let txids_cursor = + match open_ro_cursor_or_send(&sender, &txn, zaino_db.txids, "txids") { + Some(cursor) => cursor, + None => return, + }; + + let transparent_cursor = if pool_types.includes_transparent() { + match open_ro_cursor_or_send(&sender, &txn, zaino_db.transparent, "transparent") + { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let sapling_cursor = if pool_types.includes_sapling() { + match open_ro_cursor_or_send(&sender, &txn, zaino_db.sapling, "sapling") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let orchard_cursor = if pool_types.includes_orchard() { + match open_ro_cursor_or_send(&sender, &txn, zaino_db.orchard, "orchard") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let commitment_tree_cursor = match open_ro_cursor_or_send( + &sender, + &txn, + zaino_db.commitment_tree_data, + "commitment_tree_data", + ) { + Some(cursor) => cursor, + None => return, + }; + + // Position headers cursor at the start key for this chunk. This is the authoritative key + // that all other cursors must align to. + let (current_key, mut raw_header_bytes) = match cursor_set_key_or_send( + &sender, + &headers_cursor, + next_start_key_bytes.as_slice(), + "headers", + tonic::Status::not_found(format!( + "missing header at requested start height key {:?}", + next_start_key_bytes + )), + true, // verify-on-none-key + ) { + Some(pair) => pair, + None => return, + }; + + // Align all other cursors to the exact same key. + let (_txids_key, mut raw_txids_bytes) = match cursor_set_key_or_send( + &sender, + &txids_cursor, + current_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut raw_transparent_bytes: Option<&[u8]> = + if let Some(cursor) = transparent_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_sapling_bytes: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_orchard_bytes: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let (_commitment_key, mut raw_commitment_tree_bytes) = match cursor_set_key_or_send( + &sender, + &commitment_tree_cursor, + current_key, + "commitment_tree_data", + tonic::Status::not_found("block data missing from db (commitment_tree_data)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut blocks_streamed_in_transaction: usize = 0; + + loop { + // ----- Decode and validate block header ----- + let header: BlockHeaderData = match StoredEntryVar::from_bytes(raw_header_bytes) + .map_err(|error| format!("header decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + // Contiguous-height check: ensures cursor ordering and storage invariants are intact. + let current_height = header.index().height(); + if current_height != expected_height { + send_status( + &sender, + tonic::Status::internal(format!( + "missing height or out-of-order headers: expected {}, got {}", + expected_height.0, current_height.0 + )), + ); + return; + } + + // ----- Ensure the block is validated (on-demand) ----- + // We are in a blocking task; call validate_block_blocking directly but only when needed. + if !zaino_db.is_validated(current_height.into()) { + // header.index().hash() is the block hash we just read from DB; call validator. + let block_hash = *header.index().hash(); + + match zaino_db.validate_block_blocking(current_height, block_hash) { + Ok(()) => { + // validation succeeded and mark_validated has been called inside the validator. + } + Err(FinalisedStateError::LmdbError(lmdb::Error::NotFound)) => { + // missing data that was expected: emit DataUnavailable -> translate to not_found + send_status( + &sender, + tonic::Status::internal(format!( + "block data unavailable during validation at height {}", + current_height.0 + )), + ); + return; + } + Err(e) => { + send_status( + &sender, + tonic::Status::internal(format!( + "validation failed for height {}: {e:?}", + current_height.0 + )), + ); + return; + } + } + } + + // ----- Decode txids and optional pool data ----- + let txids_stored_entry_var = + match StoredEntryVar::::from_bytes(raw_txids_bytes) + .map_err(|error| format!("txids decode error: {error}")) + { + Ok(entry) => entry, + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + let txids = txids_stored_entry_var.inner().txids(); + + // Each pool database stores a per-height vector aligned to the txids list: + // one entry per transaction index (typically `Option` per tx). + let transparent_entries: Option> = + if let Some(raw) = raw_transparent_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("transparent decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let sapling_entries: Option> = + if let Some(raw) = raw_sapling_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("sapling decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let orchard_entries: Option> = + if let Some(raw) = raw_orchard_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("orchard decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let transparent = match transparent_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let sapling = match sapling_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let orchard = match orchard_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + + // Invariant: if a pool is requested, its per-height vector length must match txids. + if pool_types.includes_transparent() && transparent.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "transparent list length mismatch at height {}: txids={}, transparent={}", + current_height.0, + txids.len(), + transparent.len(), + )), + ); + return; + } + if pool_types.includes_sapling() && sapling.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "sapling list length mismatch at height {}: txids={}, sapling={}", + current_height.0, + txids.len(), + sapling.len(), + )), + ); + return; + } + if pool_types.includes_orchard() && orchard.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "orchard list length mismatch at height {}: txids={}, orchard={}", + current_height.0, + txids.len(), + orchard.len(), + )), + ); + return; + } + + // ----- Build CompactTx list ----- + // + // `CompactTx.index` is the original transaction index within the block. + // This implementation omits transactions that contain no elements in any requested pool type, + // which means: + // - `vtx.len()` may be smaller than the number of txids in the block, and + // - indices in `vtx` may be non-contiguous. + // Consumers must interpret `CompactTx.index` as authoritative. + // + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. + let mut vtx: Vec = + Vec::with_capacity(txids.len()); + + for (i, txid) in txids.iter().enumerate() { + let spends = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.spends() + .iter() + .map(|sp| sp.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let outputs = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.outputs() + .iter() + .map(|o| o.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let actions = orchard + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|o| { + o.actions() + .iter() + .map(|a| a.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Omit transactions that have no elements in any requested pool type. + // + // Note that omission produces a sparse `vtx` (by original transaction index). Clients must use + // `CompactTx.index` rather than assuming contiguous ordering. + // + // TODO: Re-evaluate whether omission is the desired API behaviour for all consumers. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { + continue; + } + + vtx.push(zaino_proto::proto::compact_formats::CompactTx { + index: i as u64, + txid: txid.0.to_vec(), + fee: 0, + spends, + outputs, + actions, + vin, + vout, + }); + } + + // ----- Decode commitment tree data and construct block ----- + let commitment_tree_data: CommitmentTreeData = + match StoredEntryFixed::from_bytes(raw_commitment_tree_bytes) + .map_err(|error| format!("commitment_tree decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), + orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), + }; + + let compact_block = zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height: header.index().height().0 as u64, + hash: header.index().hash().0.to_vec(), + prev_hash: header.index().parent_hash().0.to_vec(), + // NOTE: `time()` is stored in the DB as a wider integer; this cast assumes it is + // always representable in `u32` for the protobuf. + time: header.data().time() as u32, + header: Vec::new(), + vtx, + chain_metadata: Some(chain_metadata), + }; + + // Send the block downstream; if the receiver is gone, stop immediately. + if sender.blocking_send(Ok(compact_block)).is_err() { + return; + } + + // If we just emitted the inclusive end height, stop without stepping cursors further. + if current_height == validated_end_height { + return; + } + + blocks_streamed_in_transaction += 1; + + // Compute the next expected height (used both for contiguity checking and chunk re-seek). + let next_expected_height = if is_ascending { + match expected_height.0.checked_add(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height overflow while iterating ascending" + .to_string(), + ), + ); + return; + } + } + } else { + match expected_height.0.checked_sub(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height underflow while iterating descending" + .to_string(), + ), + ); + return; + } + } + }; + + // Chunk boundary: drop the current read transaction after N blocks and re-seek in a new + // transaction on the next loop iteration. This avoids a single long-lived snapshot. + if blocks_streamed_in_transaction >= BLOCKS_PER_READ_TRANSACTION { + match next_expected_height.to_bytes() { + Ok(bytes) => { + next_start_key_bytes = bytes; + expected_height = next_expected_height; + break; + } + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!( + "height to_bytes failed at chunk boundary: {error}" + )), + ); + return; + } + } + } + + // Advance all cursors in lockstep. Headers drives the next key; all others must match it. + let next_headers = match headers_step_or_send(&sender, &headers_cursor, step_op) + { + Ok(value) => value, + Err(()) => return, + }; + + let (next_key, next_header_val) = match next_headers { + Some(pair) => pair, + None => { + // Headers ended early; if we have not reached the requested end height, the + // database no longer satisfies the contiguous-height invariant for this range. + if current_height != validated_end_height { + send_status( + &sender, + tonic::Status::internal(format!( + "headers cursor ended early at height {}; expected to reach {}", + current_height.0, validated_end_height.0 + )), + ); + } + return; + } + }; + + let next_txids_val = match cursor_step_expect_key_or_send( + &sender, + &txids_cursor, + step_op, + next_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + ) { + Some(val) => val, + None => return, + }; + + let next_transparent_val: Option<&[u8]> = if let Some(cursor) = + transparent_cursor.as_ref() + { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_sapling_val: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_orchard_val: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_commitment_tree_val = match cursor_step_expect_key_or_send( + &sender, + &commitment_tree_cursor, + step_op, + next_key, + "commitment_tree_data", + tonic::Status::not_found( + "block data missing from db (commitment_tree_data)", + ), + ) { + Some(val) => val, + None => return, + }; + + raw_header_bytes = next_header_val; + raw_txids_bytes = next_txids_val; + raw_transparent_bytes = next_transparent_val; + raw_sapling_bytes = next_sapling_val; + raw_orchard_bytes = next_orchard_val; + raw_commitment_tree_bytes = next_commitment_tree_val; + + expected_height = next_expected_height; + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } + + // *** Internal DB methods *** +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/indexed_block.rs b/zaino-state/src/chain_index/finalised_state/db/v1/indexed_block.rs new file mode 100644 index 000000000..a091dc578 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/indexed_block.rs @@ -0,0 +1,169 @@ +//! ZainoDB::V1 indexed block indexing functionality. + +use super::*; + +/// [`IndexedBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes reconstructed [`IndexedBlock`] values from stored per-height entries. +#[async_trait] +impl IndexedBlockExt for DbV1 { + async fn get_chain_block( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + self.get_chain_block(height).await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Returns the IndexedBlock for the given Height. + /// + /// TODO: Add separate range fetch method! + async fn get_chain_block( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + let validated_height = match self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await + { + Ok(height) => height, + Err(FinalisedStateError::DataUnavailable(_)) => return Ok(None), + Err(other) => return Err(other), + }; + let height_bytes = validated_height.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + // Fetch header data + let raw = match txn.get(self.headers, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let header: BlockHeaderData = *StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))? + .inner(); + + // fetch transaction data + let raw = match txn.get(self.txids, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let txids_list = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))? + .inner() + .clone(); + let txids = txids_list.txids(); + + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let transparent_list = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("transparent decode error: {e}")))? + .inner() + .clone(); + let transparent = transparent_list.tx(); + + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let sapling_list = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("sapling decode error: {e}")))? + .inner() + .clone(); + let sapling = sapling_list.tx(); + + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let orchard_list = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("orchard decode error: {e}")))? + .inner() + .clone(); + let orchard = orchard_list.tx(); + + // Build CompactTxData + let len = txids.len(); + if transparent.len() != len || sapling.len() != len || orchard.len() != len { + return Err(FinalisedStateError::Custom( + "mismatched tx list lengths in block data".to_string(), + )); + } + + let txs: Vec = (0..len) + .map(|i| { + let txid = txids[i]; + let transparent_tx = transparent[i] + .clone() + .unwrap_or_else(|| TransparentCompactTx::new(vec![], vec![])); + let sapling_tx = sapling[i] + .clone() + .unwrap_or_else(|| SaplingCompactTx::new(None, vec![], vec![])); + let orchard_tx = orchard[i] + .clone() + .unwrap_or_else(|| OrchardCompactTx::new(None, vec![])); + + CompactTxData::new(i as u64, txid, transparent_tx, sapling_tx, orchard_tx) + }) + .collect(); + + // fetch commitment tree data + let raw = match txn.get(self.commitment_tree_data, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let commitment_tree_data: CommitmentTreeData = *StoredEntryFixed::from_bytes(raw) + .map_err(|e| { + FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) + })? + .inner(); + + // Construct IndexedBlock + Ok(Some(IndexedBlock::new( + *header.index(), + *header.data(), + txs, + commitment_tree_data, + ))) + }) + } + + // *** Internal DB methods *** +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/read_core.rs b/zaino-state/src/chain_index/finalised_state/db/v1/read_core.rs new file mode 100644 index 000000000..2e60a2733 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/read_core.rs @@ -0,0 +1,125 @@ +//! ZainoDB::V1 core read functionality. + +use super::*; + +/// [`DbRead`] capability implementation for [`DbV1`]. +/// +/// This trait is the read-only surface used by higher layers. Methods typically delegate to +/// inherent async helpers that enforce validated reads where required. +#[async_trait] +impl DbRead for DbV1 { + async fn db_height(&self) -> Result, FinalisedStateError> { + self.tip_height().await + } + + async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError> { + match self.get_block_height_by_hash(hash).await { + Ok(height) => Ok(Some(height)), + Err( + FinalisedStateError::DataUnavailable(_) + | FinalisedStateError::FeatureUnavailable(_), + ) => Ok(None), + Err(other) => Err(other), + } + } + + async fn get_block_hash( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + match self.get_block_header_data(height).await { + Ok(header) => Ok(Some(*header.index().hash())), + Err( + FinalisedStateError::DataUnavailable(_) + | FinalisedStateError::FeatureUnavailable(_), + ) => Ok(None), + Err(other) => Err(other), + } + } + + async fn get_metadata(&self) -> Result { + self.get_metadata().await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Returns the greatest `Height` stored in `headers` + /// (`None` if the DB is still empty). + pub(crate) async fn tip_height(&self) -> Result, FinalisedStateError> { + tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let cur = ro.open_ro_cursor(self.headers)?; + + match cur.get(None, None, lmdb_sys::MDB_LAST) { + Ok((key_bytes, _val_bytes)) => { + // `key_bytes` is exactly what `Height::to_bytes()` produced + let h = Height::from_bytes( + key_bytes.expect("height is always some in the finalised state"), + ) + .map_err(|e| FinalisedStateError::Custom(format!("height decode: {e}")))?; + Ok(Some(h)) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + }) + } + + /// Fetch the block height in the main chain for a given block hash. + async fn get_block_height_by_hash( + &self, + hash: BlockHash, + ) -> Result { + let height = self + .resolve_validated_hash_or_height(HashOrHeight::Hash(hash.into())) + .await?; + Ok(height) + } + + /// Fetch the height range for the given block hashes. + async fn get_block_range_by_hash( + &self, + start_hash: BlockHash, + end_hash: BlockHash, + ) -> Result<(Height, Height), FinalisedStateError> { + let start_height = self + .resolve_validated_hash_or_height(HashOrHeight::Hash(start_hash.into())) + .await?; + let end_height = self + .resolve_validated_hash_or_height(HashOrHeight::Hash(end_hash.into())) + .await?; + + let (validated_start, validated_end) = + self.validate_block_range(start_height, end_height).await?; + + Ok((validated_start, validated_end)) + } + + /// Fetch database metadata. + async fn get_metadata(&self) -> Result { + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + let raw = match txn.get(self.metadata, b"metadata") { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let entry = StoredEntryFixed::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("metadata decode error: {e}")))?; + + Ok(entry.item) + }) + } + + // *** Internal DB methods *** +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/transparent_address_history.rs b/zaino-state/src/chain_index/finalised_state/db/v1/transparent_address_history.rs new file mode 100644 index 000000000..e0ce8d519 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/transparent_address_history.rs @@ -0,0 +1,1010 @@ +//! ZainoDB::V1 transparent address history indexing functionality. + +use super::*; + +/// [`TransparentHistExt`] capability implementation for [`DbV1`]. +/// +/// Provides address history queries built over the LMDB `DUP_SORT`/`DUP_FIXED` address-history +/// database. +#[cfg(feature = "transparent_address_history_experimental")] +#[async_trait] +impl TransparentHistExt for DbV1 { + async fn addr_records( + &self, + addr_script: AddrScript, + ) -> Result>, FinalisedStateError> { + self.addr_records(addr_script).await + } + + async fn addr_and_index_records( + &self, + addr_script: AddrScript, + tx_location: TxLocation, + ) -> Result>, FinalisedStateError> { + self.addr_and_index_records(addr_script, tx_location).await + } + + async fn addr_tx_locations_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + self.addr_tx_locations_by_range(addr_script, start_height, end_height) + .await + } + + async fn addr_utxos_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + self.addr_utxos_by_range(addr_script, start_height, end_height) + .await + } + + async fn addr_balance_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result { + self.addr_balance_by_range(addr_script, start_height, end_height) + .await + } + + async fn get_outpoint_spender( + &self, + outpoint: Outpoint, + ) -> Result, FinalisedStateError> { + self.get_outpoint_spender(outpoint).await + } + + async fn get_outpoint_spenders( + &self, + outpoints: Vec, + ) -> Result>, FinalisedStateError> { + self.get_outpoint_spenders(outpoints).await + } +} + +impl DbV1 { + // *** Public fetcher methods - Used by DbReader *** + + /// Fetch all address history records for a given transparent address. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more valid records exist, + /// - `Ok(None)` if no records exist (not an error), + /// - `Err(...)` if any decoding or DB error occurs. + #[cfg(feature = "transparent_address_history_experimental")] + async fn addr_records( + &self, + addr_script: AddrScript, + ) -> Result>, FinalisedStateError> { + let addr_bytes = addr_script.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let mut cursor = match txn.open_ro_cursor(self.address_history) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let mut raw_records = Vec::new(); + + let iter = match cursor.iter_dup_of(&addr_bytes) { + Ok(iter) => iter, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + for (key, val) in iter { + if key.len() != AddrScript::VERSIONED_LEN { + continue; + } + if val.len() != StoredEntryFixed::::VERSIONED_LEN { + continue; + } + raw_records.push(val.to_vec()); + } + + if raw_records.is_empty() { + return Ok(None); + } + + let mut records = Vec::with_capacity(raw_records.len()); + for val in raw_records { + let entry = StoredEntryFixed::::from_bytes(&val).map_err(|e| { + FinalisedStateError::Custom(format!("addrhist decode error: {e}")) + })?; + records.push(entry.item); + } + + Ok(Some(records)) + }) + } + + /// Fetch all address history records for a given address and TxLocation. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more matching records are found at that index, + /// - `Ok(None)` if no matching records exist (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + async fn addr_and_index_records( + &self, + addr_script: AddrScript, + tx_location: TxLocation, + ) -> Result>, FinalisedStateError> { + let addr_bytes = addr_script.to_bytes()?; + + let rec_results = tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let fetch_records_result = + self.addr_hist_records_by_addr_and_index_in_txn(&ro, &addr_bytes, tx_location); + ro.commit()?; + fetch_records_result + }); + + let raw_records = match rec_results { + Ok(records) => records, + Err(FinalisedStateError::LmdbError(lmdb::Error::NotFound)) => return Ok(None), + Err(e) => return Err(e), + }; + + if raw_records.is_empty() { + return Ok(None); + } + + let mut records = Vec::with_capacity(raw_records.len()); + + for val in raw_records { + let entry = StoredEntryFixed::::from_bytes(&val) + .map_err(|e| FinalisedStateError::Custom(format!("addrhist decode error: {e}")))?; + records.push(entry.item); + } + + Ok(Some(records)) + } + + /// Fetch all distinct `TxLocation` values for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more matching records are found, + /// - `Ok(None)` if no matches found (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + async fn addr_tx_locations_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + let addr_bytes = addr_script.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let mut cursor = match txn.open_ro_cursor(self.address_history) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let mut set: HashSet = HashSet::new(); + + let iter = match cursor.iter_dup_of(&addr_bytes) { + Ok(iter) => iter, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + for (key, val) in iter { + if key.len() != AddrScript::VERSIONED_LEN + || val.len() != StoredEntryFixed::::VERSIONED_LEN + { + continue; + } + + // Parse the tx_location out of val: + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + + let block_height = u32::from_be_bytes([val[2], val[3], val[4], val[5]]); + if block_height < start_height.0 || block_height > end_height.0 { + continue; + } + + let tx_index = u16::from_be_bytes([val[6], val[7]]); + set.insert(TxLocation::new(block_height, tx_index)); + } + let mut indices: Vec<_> = set.into_iter().collect(); + indices.sort_by_key(|txi| (txi.block_height(), txi.tx_index())); + + if indices.is_empty() { + Ok(None) + } else { + Ok(Some(indices)) + } + }) + } + + /// Fetch all UTXOs (unspent mined outputs) for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Each entry is `(TxLocation, vout, value)`. + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more UTXOs are found, + /// - `Ok(None)` if none found (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + async fn addr_utxos_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + let addr_bytes = addr_script.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let mut cursor = match txn.open_ro_cursor(self.address_history) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let mut utxos = Vec::new(); + + let iter = match cursor.iter_dup_of(&addr_bytes) { + Ok(iter) => iter, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + for (key, val) in iter { + if key.len() != AddrScript::VERSIONED_LEN + || val.len() != StoredEntryFixed::::VERSIONED_LEN + { + continue; + } + + // Parse the tx_location out of val: + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + + let block_height = u32::from_be_bytes([val[2], val[3], val[4], val[5]]); + if block_height < start_height.0 || block_height > end_height.0 { + continue; + } + + let flags = val[10]; + if (flags & AddrEventBytes::FLAG_MINED == 0) + || (flags & AddrEventBytes::FLAG_SPENT != 0) + { + continue; + } + + let tx_index = u16::from_be_bytes([val[6], val[7]]); + let vout = u16::from_be_bytes([val[8], val[9]]); + let value = u64::from_le_bytes([ + val[11], val[12], val[13], val[14], val[15], val[16], val[17], val[18], + ]); + + utxos.push((TxLocation::new(block_height, tx_index), vout, value)); + } + + if utxos.is_empty() { + Ok(None) + } else { + Ok(Some(utxos)) + } + }) + } + + /// Computes the transparent balance change for `addr_script` over the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Includes: + /// - `+value` for mined outputs + /// - `−value` for spent inputs + /// + /// Returns the signed net value as `i64`, or error on failure. + #[cfg(feature = "transparent_address_history_experimental")] + async fn addr_balance_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result { + let addr_bytes = addr_script.to_bytes()?; + + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + let mut cursor = match txn.open_ro_cursor(self.address_history) { + Ok(cursor) => cursor, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "no data for address".to_string(), + )) + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + let mut balance: i64 = 0; + + let iter = match cursor.iter_dup_of(&addr_bytes) { + Ok(iter) => iter, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "no data for address".to_string(), + )) + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + for (key, val) in iter { + if key.len() != AddrScript::VERSIONED_LEN + || val.len() != StoredEntryFixed::::VERSIONED_LEN + { + continue; + } + + // Parse the tx_location out of val: + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + + let height = u32::from_be_bytes([val[2], val[3], val[4], val[5]]); + if height < start_height.0 || height > end_height.0 { + continue; + } + + let flags = val[10]; + let value = u64::from_le_bytes([ + val[11], val[12], val[13], val[14], val[15], val[16], val[17], val[18], + ]) as i64; + + if flags & AddrEventBytes::FLAG_IS_INPUT != 0 { + balance -= value; + } else if flags & AddrEventBytes::FLAG_MINED != 0 { + balance += value; + } + } + + Ok(balance) + }) + } + + /// Fetch the `TxLocation` that spent a given outpoint, if any. + /// + /// Returns: + /// - `Ok(Some(TxLocation))` if the outpoint is spent. + /// - `Ok(None)` if no entry exists (not spent or not known). + /// - `Err(...)` on deserialization or DB error. + #[cfg(feature = "transparent_address_history_experimental")] + async fn get_outpoint_spender( + &self, + outpoint: Outpoint, + ) -> Result, FinalisedStateError> { + let key = outpoint.to_bytes()?; + let txn = self.env.begin_ro_txn()?; + + tokio::task::block_in_place(|| match txn.get(self.spent, &key) { + Ok(bytes) => { + let entry = StoredEntryFixed::::from_bytes(bytes).map_err(|e| { + FinalisedStateError::Custom(format!("spent entry decode error: {e}")) + })?; + Ok(Some(entry.item)) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + }) + } + + /// Fetch the `TxLocation` entries for a batch of outpoints. + /// + /// For each input: + /// - Returns `Some(TxLocation)` if spent, + /// - `None` if not found, + /// - or returns `Err` immediately if any DB or decode error occurs. + #[cfg(feature = "transparent_address_history_experimental")] + async fn get_outpoint_spenders( + &self, + outpoints: Vec, + ) -> Result>, FinalisedStateError> { + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + outpoints + .into_iter() + .map(|outpoint| { + let key = outpoint.to_bytes()?; + match txn.get(self.spent, &key) { + Ok(bytes) => { + let entry = + StoredEntryFixed::::from_bytes(bytes).map_err(|e| { + FinalisedStateError::Custom(format!( + "spent entry decode error for {outpoint:?}: {e}" + )) + })?; + Ok(Some(entry.item)) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + }) + .collect() + }) + } + + // *** Internal DB methods *** + + /// Returns all raw AddrHist records for a given AddrScript and TxLocation. + /// + /// Returns a Vec of serialized entries, for given addr_script and ix_index. + /// + /// Efficiently filters by matching block + tx index bytes in-place. + /// + /// WARNING: This operates *inside* an existing RO txn. + #[cfg(feature = "transparent_address_history_experimental")] + pub(super) fn addr_hist_records_by_addr_and_index_in_txn( + &self, + txn: &lmdb::RoTransaction<'_>, + addr_script_bytes: &[u8], + tx_location: TxLocation, + ) -> Result>, FinalisedStateError> { + // Open a single cursor. + let cursor = txn.open_ro_cursor(self.address_history)?; + let mut results: Vec> = Vec::new(); + + // Build the seek data prefix that matches the stored bytes: + // [StoredEntry version, record version, height_be(4), tx_index_be(2)] + let stored_entry_tag = StoredEntryFixed::::VERSION; + let record_tag = AddrEventBytes::VERSION; + + // Reserve the exact number of bytes we need for the SET_RANGE value prefix: + // + // - 1 byte: outer StoredEntry version (StoredEntryFixed::::VERSION) + // - 1 byte: inner record version (AddrEventBytes::VERSION) + // - 4 bytes: block_height (big-endian) + // - 2 bytes: tx_index (big-endian) + // + // This minimal prefix (2 + 4 + 2 = 8 bytes) is all we need for MDB_SET_RANGE to + // position at the first duplicate whose value >= (height, tx_index). Using + // `with_capacity` avoids reallocations while we build the prefix. We do *not* + // append vout/flags/value/checksum here because we only need the leading bytes + // to seek into the dup-sorted data. + let mut seek_data = Vec::with_capacity(2 + 4 + 2); + seek_data.push(stored_entry_tag); + seek_data.push(record_tag); + seek_data.extend_from_slice(&tx_location.block_height().to_be_bytes()); + seek_data.extend_from_slice(&tx_location.tx_index().to_be_bytes()); + + // Use MDB_SET_RANGE to position the cursor at the first duplicate for this key whose + // duplicate value is >= seek_data (this is the efficient B-tree seek). + let op_set_range = lmdb_sys::MDB_SET_RANGE; + match cursor.get(Some(addr_script_bytes), Some(&seek_data[..]), op_set_range) { + Ok((maybe_key, mut cur_val)) => { + // If there's no key, nothing to do + let mut cur_key = match maybe_key { + Some(k) => k, + None => return Ok(results), + }; + + // If the seek landed on a different key, there are no candidates for this addr. + if cur_key.len() != AddrScript::VERSIONED_LEN + || &cur_key[..AddrScript::VERSIONED_LEN] != addr_script_bytes + { + return Ok(results); + } + + // Iterate from the positioned duplicate forward using MDB_NEXT_DUP. + let op_next_dup = lmdb_sys::MDB_NEXT_DUP; + + loop { + // Validate lengths, same as original function. + if cur_key.len() != AddrScript::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history key length mismatch".into(), + )); + } + if cur_val.len() != StoredEntryFixed::::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history value length mismatch".into(), + )); + } + if cur_val[0] != StoredEntryFixed::::VERSION + || cur_val[1] != AddrEventBytes::VERSION + { + return Err(FinalisedStateError::Custom( + "address history value version tag mismatch".into(), + )); + } + + // Read height and tx_index *in-place* from the value bytes: + // - [0] stored entry tag + // - [1] record tag + // - [2..=5] height (BE) + // - [6..=7] tx_index (BE) + let block_index = + u32::from_be_bytes([cur_val[2], cur_val[3], cur_val[4], cur_val[5]]); + let tx_idx = u16::from_be_bytes([cur_val[6], cur_val[7]]); + + if block_index == tx_location.block_height() && tx_idx == tx_location.tx_index() + { + // Matching entry — collect the full stored entry bytes (same behaviour). + results.push(cur_val.to_vec()); + } else if block_index > tx_location.block_height() + || (block_index == tx_location.block_height() + && tx_idx > tx_location.tx_index()) + { + // We've passed the requested tx_location in duplicate ordering -> stop + // (duplicates are ordered by value, so once we pass, no matches remain). + break; + } + + // Advance to the next duplicate for the same key. + match cursor.get(None, None, op_next_dup) { + Ok((maybe_k, next_val)) => { + // If key changed or no key returned, stop. + let k = match maybe_k { + Some(k) => k, + None => break, + }; + if k.len() != AddrScript::VERSIONED_LEN + || &k[..AddrScript::VERSIONED_LEN] != addr_script_bytes + { + break; + } + // Update cur_key and cur_val and continue. + cur_key = k; + cur_val = next_val; + continue; + } + Err(lmdb::Error::NotFound) => break, + Err(e) => return Err(e.into()), + } + } // loop + } + Err(lmdb::Error::NotFound) => { + // Nothing at or after seek -> empty result + } + Err(e) => return Err(e.into()), + } + + Ok(results) + } + + /// Inserts a mined-output record into the address‐history map. + #[cfg(feature = "transparent_address_history_experimental")] + #[inline] + pub(super) fn build_transaction_output_histories<'a>( + map: &mut HashMap>, + tx_location: TxLocation, + outputs: impl Iterator, + ) { + for (output_idx, output) in outputs { + let addr_script = AddrScript::new(*output.script_hash(), output.script_type()); + let output_record = AddrHistRecord::new( + tx_location, + output_idx as u16, + output.value(), + AddrHistRecord::FLAG_MINED, + ); + map.entry(addr_script) + .and_modify(|v| v.push(output_record)) + .or_insert_with(|| vec![output_record]); + } + } + + /// Inserts both the “spend” record and the “mined” previous‐output record + /// (used to update the output record spent in this transaction). + #[cfg(feature = "transparent_address_history_experimental")] + #[inline] + #[allow(clippy::type_complexity)] + pub(super) fn build_input_history( + map: &mut HashMap>, + input_tx_location: TxLocation, + input_index: u16, + input: &TxInCompact, + prev_output: &TxOutCompact, + prev_output_tx_location: TxLocation, + ) { + let addr_script = AddrScript::new(*prev_output.script_hash(), prev_output.script_type()); + let input_record = AddrHistRecord::new( + input_tx_location, + input_index, + prev_output.value(), + AddrHistRecord::FLAG_IS_INPUT, + ); + let prev_output_record = ( + AddrScript::new(*prev_output.script_hash(), prev_output.script_type()), + AddrHistRecord::new( + prev_output_tx_location, + input.prevout_index() as u16, + prev_output.value(), + AddrHistRecord::FLAG_MINED, + ), + ); + map.entry(addr_script) + .and_modify(|v| v.push((input_record, prev_output_record))) + .or_insert_with(|| vec![(input_record, prev_output_record)]); + } + + /// Delete all `addrhist` duplicates for `addr_bytes` that + /// * belong to `block_height`, **and** + /// * match the requested record type(s). + /// + /// * `delete_inputs` – remove records whose flag-byte contains FLAG_IS_INPUT + /// * `delete_outputs` – remove records whose flag-byte contains FLAG_MINED + /// + /// `expected` is the number of records to delete; + /// + /// WARNING: This operates *inside* an existing RW txn and must **not** commit it. + #[cfg(feature = "transparent_address_history_experimental")] + pub(super) fn delete_addrhist_dups_in_txn( + &self, + txn: &mut lmdb::RwTransaction<'_>, + addr_bytes: &[u8], + block_height: Height, + delete_inputs: bool, + delete_outputs: bool, + expected: usize, + ) -> Result<(), FinalisedStateError> { + if !delete_inputs && !delete_outputs { + return Err(FinalisedStateError::Custom( + "called delete_addrhist_dups with neither inputs nor outputs to delete".into(), + )); + } + if expected == 0 { + return Err(FinalisedStateError::Custom( + "called delete_addrhist_dups with 0 expected deletes".into(), + )); + } + + let mut remaining = expected; + let height_be = block_height.0.to_be_bytes(); + + let mut cur = txn.open_rw_cursor(self.address_history)?; + + match cur + .get(Some(addr_bytes), None, lmdb_sys::MDB_SET_KEY) + .and_then(|_| cur.get(None, None, lmdb_sys::MDB_LAST_DUP)) + { + Ok((_k, mut val)) => loop { + // Parse AddrEventBytes: + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + if val.len() == StoredEntryFixed::::VERSIONED_LEN + && val[2..6] == height_be + { + let flags = val[10]; + let is_input = flags & AddrEventBytes::FLAG_IS_INPUT != 0; + let is_output = flags & AddrEventBytes::FLAG_MINED != 0; + + if (delete_inputs && is_input) || (delete_outputs && is_output) { + cur.del(WriteFlags::empty())?; + remaining -= 1; + if remaining == 0 { + break; + } + } + } else if val.len() != StoredEntryFixed::::VERSIONED_LEN { + tracing::warn!("bad addrhist dup (len={})", val.len()); + } + + // step backwards through duplicates + match cur.get(None, None, lmdb_sys::MDB_PREV_DUP) { + Ok((_k, v)) => val = v, + Err(lmdb::Error::NotFound) => { + if remaining == 0 { + break; + } + return Err(FinalisedStateError::Custom(format!( + "expected {expected} records, deleted {}", + expected - remaining + ))); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + }, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::Custom( + "no addrhist record for key".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + + drop(cur); + Ok(()) + } + + /// Mark a specific AddrHistRecord as spent in the addrhist DB. + /// Looks up a record by script and tx_location, sets FLAG_SPENT, and updates it in place. + /// + /// Returns Ok(true) if a record was updated, Ok(false) if not found, or Err on DB error. + /// + /// WARNING: This operates *inside* an existing RW txn and must **not** commit it. + #[cfg(feature = "transparent_address_history_experimental")] + pub(super) fn mark_addr_hist_record_spent_in_txn( + &self, + txn: &mut lmdb::RwTransaction<'_>, + addr_script: &AddrScript, + + expected_prev_entry_bytes: &[u8], + ) -> Result { + let addr_bytes = addr_script.to_bytes()?; + + let mut cur = txn.open_rw_cursor(self.address_history)?; + + for (key, val) in cur.iter_dup_of(&addr_bytes)? { + if key.len() != AddrScript::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history key length mismatch".into(), + )); + } + if val.len() != StoredEntryFixed::::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history value length mismatch".into(), + )); + } + + if val != expected_prev_entry_bytes { + continue; + } + + let mut hist_record = [0u8; StoredEntryFixed::::VERSIONED_LEN]; + hist_record.copy_from_slice(val); + + let flags = hist_record[10]; + if (flags & AddrHistRecord::FLAG_IS_INPUT) != 0 { + return Err(FinalisedStateError::Custom( + "attempt to mark an input-row as spent".into(), + )); + } + // idempotent + if (flags & AddrHistRecord::FLAG_SPENT) != 0 { + return Ok(true); + } + + if (flags & AddrHistRecord::FLAG_MINED) == 0 { + return Err(FinalisedStateError::Custom( + "attempt to mark non-mined addrhist record as spent".into(), + )); + } + + hist_record[10] |= AddrHistRecord::FLAG_SPENT; + + let checksum = StoredEntryFixed::::blake2b256( + &[&addr_bytes, &hist_record[1..19]].concat(), + ); + hist_record[19..51].copy_from_slice(&checksum); + + cur.put(&addr_bytes, &hist_record, WriteFlags::CURRENT)?; + return Ok(true); + } + + Ok(false) + } + + /// Mark a specific AddrHistRecord as unspent in the addrhist DB. + /// Looks up a record by script and tx_location, sets FLAG_SPENT, and updates it in place. + /// + /// Returns Ok(true) if a record was updated, Ok(false) if not found, or Err on DB error. + /// + /// WARNING: This operates *inside* an existing RW txn and must **not** commit it. + #[cfg(feature = "transparent_address_history_experimental")] + pub(super) fn mark_addr_hist_record_unspent_in_txn( + &self, + txn: &mut lmdb::RwTransaction<'_>, + addr_script: &AddrScript, + + expected_prev_entry_bytes: &[u8], + ) -> Result { + let addr_bytes = addr_script.to_bytes()?; + + let mut cur = txn.open_rw_cursor(self.address_history)?; + + for (key, val) in cur.iter_dup_of(&addr_bytes)? { + if key.len() != AddrScript::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history key length mismatch".into(), + )); + } + if val.len() != StoredEntryFixed::::VERSIONED_LEN { + return Err(FinalisedStateError::Custom( + "address history value length mismatch".into(), + )); + } + + if val != expected_prev_entry_bytes { + continue; + } + + // we've located the exact duplicate bytes we built earlier. + let mut hist_record = [0u8; StoredEntryFixed::::VERSIONED_LEN]; + hist_record.copy_from_slice(val); + + // parse flags (located at byte index 10 in the StoredEntry layout) + let flags = hist_record[10]; + + // Sanity: the record we intend to mark should be a mined output (not an input). + if (flags & AddrHistRecord::FLAG_IS_INPUT) != 0 { + return Err(FinalisedStateError::Custom( + "attempt to mark an input-row as unspent".into(), + )); + } + + // If it's already unspent, treat as successful (idempotent). + if (flags & AddrHistRecord::FLAG_SPENT) == 0 { + drop(cur); + return Ok(true); + } + + // If the record is not marked MINED, that's an invariant failure. + // We surface it rather than producing a non-mined record. + if (flags & AddrHistRecord::FLAG_MINED) == 0 { + return Err(FinalisedStateError::Custom( + "attempt to mark non-mined addrhist record as unspent".into(), + )); + } + + // Preserve all existing flags (including MINED), and remove SPENT. + hist_record[10] &= !AddrHistRecord::FLAG_SPENT; + + // Recompute checksum over entry header + payload (bytes 1..19). + let checksum = StoredEntryFixed::::blake2b256( + &[&addr_bytes, &hist_record[1..19]].concat(), + ); + hist_record[19..51].copy_from_slice(&checksum); + + // Write back in place for the exact duplicate we matched. + cur.put(&addr_bytes, &hist_record, WriteFlags::CURRENT)?; + drop(cur); + + return Ok(true); + } + + Ok(false) + } + + /// Fetches the previous transparent output for the given outpoint. + /// Returns `TxOutCompact` or an explicit error if not found or invalid. + /// + /// Used to build addrhist records. + /// + /// WARNING: This is a blocking function and **MUST** be called within a blocking thread / task. + #[cfg(feature = "transparent_address_history_experimental")] + pub(super) fn get_previous_output_blocking( + &self, + outpoint: Outpoint, + ) -> Result { + // Find the tx’s location in the chain + let prev_txid = TransactionHash::from(*outpoint.prev_txid()); + let tx_location = self + .find_txid_index_blocking(&prev_txid)? + .ok_or_else(|| FinalisedStateError::Custom("Previous txid not found".into()))?; + + // Fetch the output from the transparent db. + let block_height = tx_location.block_height(); + let tx_index = tx_location.tx_index() as usize; + let out_index = outpoint.prev_index() as usize; + + let ro = self.env.begin_ro_txn()?; + let height_key = Height(block_height).to_bytes()?; + let stored_bytes = ro.get(self.transparent, &height_key)?; + + Self::find_txout_in_stored_transparent_tx_list(stored_bytes, tx_index, out_index) + .ok_or_else(|| { + FinalisedStateError::Custom("Previous output not found at given index".into()) + }) + } + + /// Efficiently scans a raw `StoredEntryVar` buffer to locate the + /// specific output at [tx_idx, output_idx] without full deserialization. + /// + /// # Arguments + /// - `stored`: the raw LMDB byte buffer + /// - `target_tx_idx`: index in the tx list + /// - `target_output_idx`: index in the outputs of that tx + /// + /// # Returns + /// - `Some(TxOutCompact)` if found and present, otherwise `None` + #[cfg(feature = "transparent_address_history_experimental")] + #[inline] + fn find_txout_in_stored_transparent_tx_list( + stored: &[u8], + target_tx_idx: usize, + target_output_idx: usize, + ) -> Option { + const CHECKSUM_LEN: usize = 32; + + if stored.len() < TransactionHash::VERSION_TAG_LEN + 8 + CHECKSUM_LEN { + return None; + } + + let mut cursor = &stored[TransactionHash::VERSION_TAG_LEN..]; + let item_len = CompactSize::read(&mut cursor).ok()? as usize; + if cursor.len() < item_len + CHECKSUM_LEN { + return None; + } + + let (_record_version, mut remaining) = cursor.split_first()?; + let vec_len = CompactSize::read(&mut remaining).ok()? as usize; + + for i in 0..vec_len { + let (option_tag, rest) = remaining.split_first()?; + remaining = rest; + + if *option_tag == 0 { + // None: nothing to skip, go to next + if i == target_tx_idx { + return None; + } + } else if *option_tag == 1 { + let (_tx_version, rest) = remaining.split_first()?; + remaining = rest; + + let vin_len = CompactSize::read(&mut remaining).ok()? as usize; + + for _ in 0..vin_len { + if remaining.len() < TxInCompact::VERSIONED_LEN { + return None; + } + remaining = &remaining[TxInCompact::VERSIONED_LEN..]; + } + + let vout_len = CompactSize::read(&mut remaining).ok()? as usize; + + for out_idx in 0..vout_len { + if remaining.len() < TxOutCompact::VERSIONED_LEN { + return None; + } + + let out_bytes = &remaining[..TxOutCompact::VERSIONED_LEN]; + + if i == target_tx_idx && out_idx == target_output_idx { + return TxOutCompact::from_bytes(out_bytes).ok(); + } + + remaining = &remaining[TxOutCompact::VERSIONED_LEN..]; + } + } else { + // Non-canonical Option tag + return None; + } + } + None + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/validation.rs b/zaino-state/src/chain_index/finalised_state/db/v1/validation.rs new file mode 100644 index 000000000..4d605c382 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/validation.rs @@ -0,0 +1,739 @@ +//! ZainoDB::V1 DB validation and varification functionality. +//! +//! The finalised-state database supports **incremental, concurrency-safe validation** of blocks that +//! have already been written to LMDB. +//! +//! Validation is tracked using two structures: +//! +//! - `validated_tip` (atomic u32): every height `<= validated_tip` is known-good (contiguous prefix). +//! - `validated_set` (DashSet): a sparse set of individually validated heights `> validated_tip` +//! (i.e., “holes” validated out-of-order). +//! +//! This scheme provides: +//! - O(1) fast-path for the common case (`height <= validated_tip`), +//! - O(1) expected membership tests above the tip, +//! - and an efficient “coalescing” step that advances `validated_tip` when gaps are filled. +//! +//! IMPORTANT: +//! - Validation here is *structural / integrity* validation of stored records plus basic chain +//! continuity checks (parent hash, header merkle root vs txids). +//! - It is intentionally “lightweight” and does **not** attempt full consensus verification. +//! - NOTE / TODO: It is planned to add basic shielded tx data validation using the "block_commitments" +//! field in [`BlockData`] however this is currently unimplemented. + +use super::*; + +impl DbV1 { + /// Return `true` if `height` is already known-good. + /// + /// Semantics: + /// - `height <= validated_tip` is always validated (contiguous prefix). + /// - For `height > validated_tip`, membership is tracked in `validated_set`. + /// + /// Performance: + /// - O(1) in the fast-path (`height <= validated_tip`). + /// - O(1) expected for DashSet membership checks when `height > validated_tip`. + /// + /// Concurrency: + /// - `validated_tip` is read with `Acquire` so subsequent reads of dependent state in the same + /// thread are not reordered before the tip read. + pub(super) fn is_validated(&self, h: u32) -> bool { + let tip = self.validated_tip.load(Ordering::Acquire); + h <= tip || self.validated_set.contains(&h) + } + + /// Mark `height` as validated and coalesce contiguous ranges into `validated_tip`. + /// + /// This method maintains the invariant: + /// - After completion, all heights `<= validated_tip` are validated. + /// - All validated heights `> validated_tip` remain represented in `validated_set`. + /// + /// Algorithm: + /// 1. If `height == validated_tip + 1`, attempt to atomically advance `validated_tip`. + /// 2. If that succeeds, repeatedly consume `validated_tip + 1` from `validated_set` and advance + /// `validated_tip` until the next height is not present. + /// 3. If `height > validated_tip + 1`, record it as an out-of-order validated “hole” in + /// `validated_set`. + /// 4. If `height <= validated_tip`, it is already covered by the contiguous prefix; no action. + /// + /// Concurrency: + /// - Uses CAS to ensure only one thread advances `validated_tip` at a time. + /// - Stores after successful coalescing use `Release` so other threads observing the new tip do not + /// see older state re-ordered after the tip update. + /// + /// NOTE: + /// - This function is intentionally tolerant of races: redundant inserts / removals are benign. + fn mark_validated(&self, h: u32) { + let mut next = h; + loop { + let tip = self.validated_tip.load(Ordering::Acquire); + + // Fast-path: extend the tip directly? + if next == tip + 1 { + // Try to claim the new tip. + if self + .validated_tip + .compare_exchange(tip, next, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + // Successfully advanced; now look for further consecutive heights + // already in the DashSet. + next += 1; + while self.validated_set.remove(&next).is_some() { + self.validated_tip.store(next, Ordering::Release); + next += 1; + } + break; + } + // CAS failed: someone else updated the tip – retry loop. + } else if next > tip { + // Out-of-order hole: just remember it and exit. + self.validated_set.insert(next); + break; + } else { + // Already below tip – nothing to do. + break; + } + } + } + + /// Lightweight per-block validation. + /// + /// This validates the internal consistency of the LMDB-backed records for the specified + /// `(height, hash)` pair and marks the height as validated on success. + /// + /// Validations performed: + /// - Per-height tables: checksum + deserialization integrity for: + /// - `headers` (BlockHeaderData) + /// - `txids` (TxidList) + /// - `transparent` (TransparentTxList) + /// - `sapling` (SaplingTxList) + /// - `orchard` (OrchardTxList) + /// - `commitment_tree_data` (CommitmentTreeData; fixed entry) + /// - Hash→height mapping: + /// - checksum integrity under `hash_key` + /// - mapped height equals the requested `height` + /// - Chain continuity: + /// - for `height > 1`, the block header `parent_hash` equals the stored hash at `height - 1` + /// - Header merkle root: + /// - merkle root computed from `txids` matches the header’s merkle root + /// - Transparent indices / histories: + /// - each non-coinbase transparent input must have a `spent` record pointing at this tx + /// - each transparent output must have an addrhist mined record + /// - each non-coinbase transparent input must have an addrhist input record + /// + /// Fast-path: + /// - If `height` is already known validated (`is_validated`), this is a no-op. + /// + /// Error semantics: + /// - Returns `FinalisedStateError::InvalidBlock { .. }` when any integrity/continuity check fails. + /// - Returns LMDB errors for underlying storage failures (e.g., missing keys), which are then + /// typically mapped by callers into `DataUnavailable` where appropriate. + /// + /// WARNING: + /// - This is a blocking function and **MUST** be called from a blocking context + /// (`tokio::task::block_in_place` or `spawn_blocking`). + pub(super) fn validate_block_blocking( + &self, + height: Height, + hash: BlockHash, + ) -> Result<(), FinalisedStateError> { + if self.is_validated(height.into()) { + return Ok(()); + } + + let height_key = height + .to_bytes() + .map_err(|e| FinalisedStateError::Custom(format!("height serialize: {e}")))?; + let hash_key = hash + .to_bytes() + .map_err(|e| FinalisedStateError::Custom(format!("hash serialize: {e}")))?; + + // Helper to fabricate the error. + let fail = |reason: &str| FinalisedStateError::InvalidBlock { + height: height.into(), + hash, + reason: reason.to_owned(), + }; + + let ro = self.env.begin_ro_txn()?; + + // *** header *** + let header_entry = { + let raw = ro + .get(self.headers, &height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("header corrupt data: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("header checksum mismatch")); + } + entry + }; + + // *** txids *** + let txid_list_entry = { + let raw = ro + .get(self.txids, &height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("txids corrupt data: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("txids checksum mismatch")); + } + entry + }; + + // *** transparent *** + #[cfg(feature = "transparent_address_history_experimental")] + let transparent_tx_list = { + let raw = ro.get(self.transparent, &height_key)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("transparent corrupt data: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("transparent checksum mismatch")); + } + entry + }; + + // *** sapling *** + { + let raw = ro + .get(self.sapling, &height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("sapling corrupt data: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("sapling checksum mismatch")); + } + } + + // *** orchard *** + { + let raw = ro + .get(self.orchard, &height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("orchard corrupt data: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("orchard checksum mismatch")); + } + } + + // *** commitment_tree_data (fixed) *** + { + let raw = ro + .get(self.commitment_tree_data, &height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryFixed::::from_bytes(raw) + .map_err(|e| fail(&format!("commitment_tree corrupt bytes: {e}")))?; + if !entry.verify(&height_key) { + return Err(fail("commitment_tree checksum mismatch")); + } + } + + // *** hash→height mapping *** + { + let raw = ro + .get(self.heights, &hash_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryFixed::::from_bytes(raw) + .map_err(|e| fail(&format!("hash -> height corrupt bytes: {e}")))?; + if !entry.verify(&hash_key) { + return Err(fail("hash -> height checksum mismatch")); + } + if entry.item != height { + return Err(fail("hash -> height mapping mismatch")); + } + } + + // *** Parent block hash validation (chain continuity) *** + if height.0 > 1 { + let parent_block_hash = { + let parent_block_height = Height::try_from(height.0.saturating_sub(1)) + .map_err(|e| fail(&format!("invalid parent height: {e}")))?; + let parent_block_height_key = parent_block_height + .to_bytes() + .map_err(|e| fail(&format!("parent height serialize: {e}")))?; + let raw = ro + .get(self.headers, &parent_block_height_key) + .map_err(FinalisedStateError::LmdbError)?; + let entry = StoredEntryVar::::from_bytes(raw) + .map_err(|e| fail(&format!("parent header corrupt data: {e}")))?; + + *entry.inner().index().hash() + }; + + let check_hash = header_entry.inner().index().parent_hash(); + + if &parent_block_hash != check_hash { + return Err(fail("parent hash mismatch")); + } + } + + // *** Merkle root / Txid validation *** + let txids: Vec<[u8; 32]> = txid_list_entry + .inner() + .txids() + .iter() + .map(|h| h.0) + .collect(); + + let header_merkle_root = header_entry.inner().data().merkle_root(); + + let check_root = Self::calculate_block_merkle_root(&txids); + + if &check_root != header_merkle_root { + return Err(fail("merkle root mismatch")); + } + + // *** spent + addrhist validation *** + #[cfg(feature = "transparent_address_history_experimental")] + { + let tx_list = transparent_tx_list.inner().tx(); + + for (tx_index, tx_opt) in tx_list.iter().enumerate() { + let tx_index = tx_index as u16; + let txid_index = TxLocation::new(height.0, tx_index); + + let Some(tx) = tx_opt else { continue }; + + // Outputs: check addrhist mined record + for (vout, output) in tx.outputs().iter().enumerate() { + let addr_bytes = + AddrScript::new(*output.script_hash(), output.script_type()).to_bytes()?; + let rec_bytes = self.addr_hist_records_by_addr_and_index_in_txn( + &ro, + &addr_bytes, + txid_index, + )?; + + let matched = rec_bytes.iter().any(|val| { + // avoid deserialization: check IS_MINED + correct vout + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + + let flags = val[10]; + let vout_rec = u16::from_be_bytes([val[8], val[9]]); + (flags & AddrEventBytes::FLAG_MINED) != 0 && vout_rec as usize == vout + }); + + if !matched { + return Err(fail("missing addrhist mined output record")); + } + } + + // Inputs: check spent + addrhist input record + for (input_index, input) in tx.inputs().iter().enumerate() { + // Continue if coinbase. + if input.is_null_prevout() { + continue; + } + + // Check spent record + let outpoint = Outpoint::new(*input.prevout_txid(), input.prevout_index()); + let outpoint_bytes = outpoint.to_bytes()?; + let val = ro.get(self.spent, &outpoint_bytes).map_err(|_| { + fail(&format!("missing spent index for outpoint {outpoint:?}")) + })?; + let entry = StoredEntryFixed::::from_bytes(val) + .map_err(|e| fail(&format!("corrupt spent entry: {e}")))?; + if !entry.verify(&outpoint_bytes) { + return Err(fail("spent entry checksum mismatch")); + } + if entry.inner() != &txid_index { + return Err(fail("spent entry has wrong TxLocation")); + } + + // Check addrhist input record + let prev_output = self.get_previous_output_blocking(outpoint)?; + let addr_bytes = + AddrScript::new(*prev_output.script_hash(), prev_output.script_type()) + .to_bytes()?; + let rec_bytes = self.addr_hist_records_by_addr_and_index_in_txn( + &ro, + &addr_bytes, + txid_index, + )?; + + let matched = rec_bytes.iter().any(|val| { + // avoid deserialization: check IS_INPUT + correct vout + // - [0] StoredEntry tag + // - [1] record tag + // - [2..=5] height + // - [6..=7] tx_index + // - [8..=9] vout + // - [10] flags + // - [11..=18] value + // - [19..=50] checksum + + let flags = val[10]; + let stored_vout = u16::from_be_bytes([val[8], val[9]]); + + (flags & AddrEventBytes::FLAG_IS_INPUT) != 0 + && stored_vout as usize == input_index + }); + + if !matched { + return Err(fail("missing addrhist input record")); + } + } + } + } + + self.mark_validated(height.into()); + Ok(()) + } + + /// Double-SHA-256 (SHA256d), as used by Bitcoin/Zcash headers and merkle nodes. + /// + /// Input and output are raw bytes (no endianness conversions are performed here). + fn sha256d(data: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::new(); + Digest::update(&mut hasher, data); // first pass + let first = hasher.finalize_reset(); + Digest::update(&mut hasher, first); // second pass + let second = hasher.finalize(); + + let mut out = [0u8; 32]; + out.copy_from_slice(&second); + out + } + + /// Compute the merkle root of a non-empty slice of 32-byte transaction IDs. + /// + /// Requirements: + /// - `txids` must be in block order. + /// - `txids` must already be in the internal byte order (little endian) expected by the header merkle root + /// comparison performed by this module (no byte order transforms are applied here). + /// + /// Behavior: + /// - Duplicates the final element when the layer width is odd, matching Bitcoin/Zcash merkle rules. + /// - Uses SHA256d over 64-byte concatenated pairs at each layer. + fn calculate_block_merkle_root(txids: &[[u8; 32]]) -> [u8; 32] { + assert!( + !txids.is_empty(), + "block must contain at least the coinbase" + ); + let mut layer: Vec<[u8; 32]> = txids.to_vec(); + + // Iterate until we have reduced to one hash. + while layer.len() > 1 { + let mut next = Vec::with_capacity(layer.len().div_ceil(2)); + + // Combine pairs (duplicate the last when the count is odd). + for chunk in layer.chunks(2) { + let left = &chunk[0]; + let right = if chunk.len() == 2 { + &chunk[1] + } else { + &chunk[0] + }; + + // Concatenate left‖right and hash twice. + let mut buf = [0u8; 64]; + buf[..32].copy_from_slice(left); + buf[32..].copy_from_slice(right); + next.push(Self::sha256d(&buf)); + } + + layer = next; + } + + layer[0] + } + + /// Ensure `height` is validated. If it's already validated this is a cheap O(1) check. + /// Otherwise this will perform blocking validation (`validate_block_blocking`) and mark + /// the height validated on success. + /// + /// This is the canonical, async-friendly entrypoint you should call from async code. + pub(crate) async fn validate_height( + &self, + height: Height, + hash: BlockHash, + ) -> Result<(), FinalisedStateError> { + // Cheap fast-path first, no blocking. + if self.is_validated(height.into()) { + return Ok(()); + } + + // Run blocking validation in a blocking context. + // Using block_in_place keeps the per-call semantics similar to other callers. + tokio::task::block_in_place(|| self.validate_block_blocking(height, hash)) + } + + /// Validate a contiguous inclusive range of block heights `[start, end]`. + /// + /// This method is optimized to skip heights already known validated via `validated_tip` / + /// `validated_set`. + /// + /// Semantics: + /// - Accepts either ordering of `start` and `end`. + /// - Validates the inclusive set `{min(start,end) ..= max(start,end)}` in ascending order. + /// - If the entire normalized range is already validated, returns `(start, end)` without + /// touching LMDB (preserves the caller's original ordering). + /// - Otherwise, validates each missing height in ascending order using `validate_block_blocking`. + /// + /// WARNING: + /// - This uses `tokio::task::block_in_place` internally and performs LMDB reads; callers should + /// avoid invoking it from latency-sensitive async paths unless they explicitly intend to + /// validate on-demand. + pub(super) async fn validate_block_range( + &self, + start: Height, + end: Height, + ) -> Result<(Height, Height), FinalisedStateError> { + // Normalize the range for validation, but preserve `(start, end)` ordering in the return. + let (range_start, range_end) = if start.0 <= end.0 { + (start, end) + } else { + (end, start) + }; + + let tip = self.validated_tip.load(Ordering::Acquire); + let mut h = std::cmp::max(range_start.0, tip); + + if h > range_end.0 { + return Ok((start, end)); + } + + tokio::task::block_in_place(|| { + while h <= range_end.0 { + if self.is_validated(h) { + h += 1; + continue; + } + + let height = Height(h); + let height_bytes = height.to_bytes()?; + let ro = self.env.begin_ro_txn()?; + let bytes = ro.get(self.headers, &height_bytes).map_err(|e| { + if e == lmdb::Error::NotFound { + FinalisedStateError::Custom("height not found in best chain".into()) + } else { + FinalisedStateError::LmdbError(e) + } + })?; + + let hash = *StoredEntryVar::::deserialize(bytes)? + .inner() + .index() + .hash(); + + match self.validate_block_blocking(height, hash) { + Ok(()) => {} + Err(FinalisedStateError::LmdbError(lmdb::Error::NotFound)) => { + return Err(FinalisedStateError::DataUnavailable( + "block data unavailable".into(), + )); + } + Err(e) => return Err(e), + } + + h += 1; + } + Ok::<_, FinalisedStateError>((start, end)) + }) + } + + /// Same as `resolve_hash_or_height`, **but guarantees the block is validated**. + /// + /// * If the block hasn’t been validated yet we do it on-demand + /// * On success the block hright is returned; on any failure you get a + /// `FinalisedStateError` + /// + /// TODO: Remove HashOrHeight? + pub(super) async fn resolve_validated_hash_or_height( + &self, + hash_or_height: HashOrHeight, + ) -> Result { + let height = match hash_or_height { + // Height lookup path. + HashOrHeight::Height(z_height) => { + let height = Height::try_from(z_height.0) + .map_err(|_| FinalisedStateError::Custom("height out of range".into()))?; + + // Check if height is below validated tip, + // this avoids hash lookups for height based fetch under the valdated tip. + if height.0 <= self.validated_tip.load(Ordering::Acquire) { + return Ok(height); + } + + let hkey = height.to_bytes()?; + + tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let bytes = ro.get(self.headers, &hkey).map_err(|e| { + if e == lmdb::Error::NotFound { + FinalisedStateError::DataUnavailable( + "height not found in best chain".into(), + ) + } else { + FinalisedStateError::LmdbError(e) + } + })?; + + let hash = *StoredEntryVar::::deserialize(bytes)? + .inner() + .index() + .hash(); + + match self.validate_block_blocking(height, hash) { + Ok(()) => {} + Err(FinalisedStateError::LmdbError(lmdb::Error::NotFound)) => { + return Err(FinalisedStateError::DataUnavailable( + "block data unavailable".into(), + )); + } + Err(e) => return Err(e), + } + + Ok::(hash) + })?; + height + } + + // Hash lookup path. + HashOrHeight::Hash(z_hash) => { + let height = self.resolve_hash_or_height(hash_or_height).await?; + let hash = BlockHash::from(z_hash); + tokio::task::block_in_place(|| { + match self.validate_block_blocking(height, hash) { + Ok(()) => {} + Err(FinalisedStateError::LmdbError(lmdb::Error::NotFound)) => { + return Err(FinalisedStateError::DataUnavailable( + "block data unavailable".into(), + )); + } + Err(e) => return Err(e), + } + + Ok::(height) + })?; + height + } + }; + + Ok(height) + } + + /// Resolve a `HashOrHeight` to the block height stored on disk. + /// + /// * Height -> returned unchanged (zero cost). + /// * Hash -> lookup in `hashes` db. + /// + /// TODO: Remove HashOrHeight? + async fn resolve_hash_or_height( + &self, + hash_or_height: HashOrHeight, + ) -> Result { + match hash_or_height { + // Fast path: we already have the hash. + HashOrHeight::Height(z_height) => Ok(Height::try_from(z_height.0) + .map_err(|_| FinalisedStateError::DataUnavailable("height out of range".into()))?), + + // Height lookup path. + HashOrHeight::Hash(z_hash) => { + let hash = BlockHash::from(z_hash.0); + let hkey = hash.to_bytes()?; + + let height: Height = tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + let bytes = ro.get(self.heights, &hkey).map_err(|e| { + if e == lmdb::Error::NotFound { + FinalisedStateError::DataUnavailable( + "height not found in best chain".into(), + ) + } else { + FinalisedStateError::LmdbError(e) + } + })?; + + let entry = *StoredEntryFixed::::deserialize(bytes)?.inner(); + Ok::(entry) + })?; + + Ok(height) + } + } + } + + /// Ensure the `metadata` table contains **exactly** our `DB_SCHEMA_V1`. + /// + /// * Brand-new DB → insert the entry. + /// * Existing DB → verify checksum, version, and schema hash. + pub(super) async fn check_schema_version(&self) -> Result<(), FinalisedStateError> { + tokio::task::block_in_place(|| { + let mut txn = self.env.begin_rw_txn()?; + + match txn.get(self.metadata, b"metadata") { + // ***** Existing DB ***** + Ok(raw_bytes) => { + let stored: StoredEntryFixed = + StoredEntryFixed::from_bytes(raw_bytes).map_err(|e| { + FinalisedStateError::Custom(format!("corrupt metadata CBOR: {e}")) + })?; + if !stored.verify(b"metadata") { + return Err(FinalisedStateError::Custom( + "metadata checksum mismatch – DB corruption suspected".into(), + )); + } + + let meta = stored.item; + + // Error if major version differs + if meta.version.major != DB_VERSION_V1.major { + return Err(FinalisedStateError::Custom(format!( + "unsupported schema major version {} (expected {})", + meta.version.major, DB_VERSION_V1.major + ))); + } + + // Warn if schema hash mismatches + // NOTE: There could be a schema mismatch at launch during minor migrations, + // so we do not return an error here. Maybe we can improve this? + if meta.schema_hash != DB_SCHEMA_V1_HASH { + warn!( + "schema hash mismatch: db_schema_v1.txt has likely changed \ + without bumping version; expected 0x{:02x?}, found 0x{:02x?}", + &DB_SCHEMA_V1_HASH[..4], + &meta.schema_hash[..4], + ); + } + } + + // ***** Fresh DB (key not found) ***** + Err(lmdb::Error::NotFound) => { + let entry = StoredEntryFixed::new( + b"metadata", + DbMetadata { + version: DB_VERSION_V1, + schema_hash: DB_SCHEMA_V1_HASH, + // Fresh database, no migration required. + migration_status: MigrationStatus::Empty, + }, + ); + txn.put( + self.metadata, + b"metadata", + &entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + } + + // ***** Any other LMDB error ***** + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + + txn.commit()?; + Ok(()) + }) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/db/v1/write_core.rs b/zaino-state/src/chain_index/finalised_state/db/v1/write_core.rs new file mode 100644 index 000000000..0e4a8e33c --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/db/v1/write_core.rs @@ -0,0 +1,1033 @@ +//! ZainoDB::V1 core write functionality. + +use super::*; + +/// [`DbWrite`] capability implementation for [`DbV1`]. +/// +/// This trait represents the mutating surface (append / delete tip / update metadata). Writes are +/// performed via LMDB write transactions and validated before becoming visible as “known-good”. +#[async_trait] +impl DbWrite for DbV1 { + async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { + self.write_block(block).await + } + + async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError> { + self.delete_block_at_height(height).await + } + + async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { + self.delete_block(block).await + } + + async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { + self.update_metadata(metadata).await + } +} + +impl DbV1 { + //! *** DB write / delete methods *** + //! **These should only ever be used in a single DB control task.** + + /// Writes a given (finalised) [`IndexedBlock`] to ZainoDB. + /// + /// NOTE: This method should never leave a block partially written to the database. + pub(crate) async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { + self.status.store(StatusType::Syncing); + let block_hash = *block.index().hash(); + let block_hash_bytes = block_hash.to_bytes()?; + let block_height = block.index().height(); + let block_height_bytes = block_height.to_bytes()?; + + // Check if this specific block already exists (idempotent write support for shared DB). + // This handles the case where multiple processes share the same ZainoDB. + let block_already_exists = tokio::task::block_in_place(|| { + let ro = self.env.begin_ro_txn()?; + + // First, check if a block at this specific height already exists + match ro.get(self.headers, &block_height_bytes) { + Ok(stored_header_bytes) => { + // Block exists at this height - verify it's the same block + // Data is stored as StoredEntryVar, so deserialize properly + let stored_entry = + StoredEntryVar::::from_bytes(stored_header_bytes) + .map_err(|e| { + FinalisedStateError::Custom(format!( + "header decode error during idempotency check: {e}" + )) + })?; + let stored_header = stored_entry.inner(); + if *stored_header.index().hash() == block_hash { + // Same block already written, this is a no-op success + return Ok(true); + } else { + return Err(FinalisedStateError::Custom(format!( + "block at height {block_height:?} already exists with different hash \ + (stored: {:?}, incoming: {:?})", + stored_header.index().hash(), + block_hash + ))); + } + } + Err(lmdb::Error::NotFound) => { + // Block doesn't exist at this height, check if it's the next in sequence + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + + // Now verify this is the next block in the chain + let cur = ro.open_ro_cursor(self.headers)?; + match cur.get(None, None, lmdb_sys::MDB_LAST) { + // Database already has blocks + Ok((last_height_bytes, _last_header_bytes)) => { + let last_height = Height::from_bytes( + last_height_bytes.expect("Height is always some in the finalised state"), + )?; + + // Height must be exactly +1 over the current tip + if block_height.0 != last_height.0 + 1 { + return Err(FinalisedStateError::Custom(format!( + "cannot write block at height {block_height:?}; \ + current tip is {last_height:?}" + ))); + } + } + // no block in db, this must be genesis block. + Err(lmdb::Error::NotFound) => { + if block_height.0 != GENESIS_HEIGHT.0 { + return Err(FinalisedStateError::Custom(format!( + "first block must be height 0, got {block_height:?}" + ))); + } + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + Ok::<_, FinalisedStateError>(false) + })?; + + // If block already exists with same hash, return success without re-writing + if block_already_exists { + self.status.store(StatusType::Ready); + info!( + "Block {} at height {} already exists in ZainoDB, skipping write.", + &block_hash, &block_height.0 + ); + return Ok(()); + } + + // Build DBHeight + let height_entry = StoredEntryFixed::new(&block_hash_bytes, block.index().height()); + + // Build header + let header_entry = StoredEntryVar::new( + &block_height_bytes, + BlockHeaderData::new(*block.index(), *block.data()), + ); + + // Build commitment tree data + let commitment_tree_entry = + StoredEntryFixed::new(&block_height_bytes, *block.commitment_tree_data()); + + // Build transaction indexes + let tx_len = block.transactions().len(); + let mut txids = Vec::with_capacity(tx_len); + let mut txid_set: HashSet = HashSet::with_capacity(tx_len); + let mut transparent = Vec::with_capacity(tx_len); + let mut sapling = Vec::with_capacity(tx_len); + let mut orchard = Vec::with_capacity(tx_len); + + #[cfg(feature = "transparent_address_history_experimental")] + let mut spent_map: HashMap = HashMap::new(); + + #[cfg(feature = "transparent_address_history_experimental")] + #[allow(clippy::type_complexity)] + let mut addrhist_inputs_map: HashMap< + AddrScript, + Vec<(AddrHistRecord, (AddrScript, AddrHistRecord))>, + > = HashMap::new(); + + #[cfg(feature = "transparent_address_history_experimental")] + let mut addrhist_outputs_map: HashMap> = HashMap::new(); + + for (_tx_index, tx) in block.transactions().iter().enumerate() { + let hash = tx.txid(); + + if txid_set.insert(*hash) { + txids.push(*hash); + } + + // Transparent transactions + let transparent_data = + if tx.transparent().inputs().is_empty() && tx.transparent().outputs().is_empty() { + None + } else { + Some(tx.transparent().clone()) + }; + transparent.push(transparent_data); + + // Sapling transactions + let sapling_data = + if tx.sapling().spends().is_empty() && tx.sapling().outputs().is_empty() { + None + } else { + Some(tx.sapling().clone()) + }; + sapling.push(sapling_data); + + // Orchard transactions + let orchard_data = if tx.orchard().actions().is_empty() { + None + } else { + Some(tx.orchard().clone()) + }; + orchard.push(orchard_data); + + #[cfg(feature = "transparent_address_history_experimental")] + { + // Transaction location + let tx_location = TxLocation::new(block_height.into(), _tx_index as u16); + + // Transparent Outputs: Build Address History + DbV1::build_transaction_output_histories( + &mut addrhist_outputs_map, + tx_location, + tx.transparent().outputs().iter().enumerate(), + ); + + // Transparent Inputs: Build Spent Outpoints Index and Address History + for (input_index, input) in tx.transparent().inputs().iter().enumerate() { + if input.is_null_prevout() { + continue; + } + let prev_outpoint = Outpoint::new(*input.prevout_txid(), input.prevout_index()); + spent_map.insert(prev_outpoint, tx_location); + + //Check if output is in *this* block, else fetch from DB. + let prev_tx_hash = TransactionHash(*prev_outpoint.prev_txid()); + if txid_set.contains(&prev_tx_hash) { + // Fetch transaction index within block + if let Some(tx_index) = txids.iter().position(|h| h == &prev_tx_hash) { + // Fetch Transparent data for transaction + if let Some(Some(prev_transparent)) = transparent.get(tx_index) { + // Fetch output from transaction + if let Some(prev_output) = prev_transparent + .outputs() + .get(prev_outpoint.prev_index() as usize) + { + let prev_output_tx_location = + TxLocation::new(block_height.0, tx_index as u16); + DbV1::build_input_history( + &mut addrhist_inputs_map, + tx_location, + input_index as u16, + input, + prev_output, + prev_output_tx_location, + ); + } + } + } + } else if let Ok((prev_output, prev_output_tx_location)) = + tokio::task::block_in_place(|| { + let prev_output = self.get_previous_output_blocking(prev_outpoint)?; + let prev_output_tx_location = self + .find_txid_index_blocking(&TransactionHash::from( + *prev_outpoint.prev_txid(), + ))? + .ok_or_else(|| { + FinalisedStateError::Custom("Previous txid not found".into()) + })?; + Ok::<(_, _), FinalisedStateError>(( + prev_output, + prev_output_tx_location, + )) + }) + { + DbV1::build_input_history( + &mut addrhist_inputs_map, + tx_location, + input_index as u16, + input, + &prev_output, + prev_output_tx_location, + ); + } else { + return Err(FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: "Invalid block data: invalid transparent input.".to_string(), + }); + } + } + } + } + + let txid_entry = StoredEntryVar::new(&block_height_bytes, TxidList::new(txids)); + let transparent_entry = + StoredEntryVar::new(&block_height_bytes, TransparentTxList::new(transparent)); + let sapling_entry = StoredEntryVar::new(&block_height_bytes, SaplingTxList::new(sapling)); + let orchard_entry = StoredEntryVar::new(&block_height_bytes, OrchardTxList::new(orchard)); + + // if any database writes fail, or block validation fails, remove block from database and return err. + let zaino_db = Self { + env: Arc::clone(&self.env), + headers: self.headers, + txids: self.txids, + transparent: self.transparent, + sapling: self.sapling, + orchard: self.orchard, + commitment_tree_data: self.commitment_tree_data, + heights: self.heights, + #[cfg(feature = "transparent_address_history_experimental")] + spent: self.spent, + #[cfg(feature = "transparent_address_history_experimental")] + address_history: self.address_history, + metadata: self.metadata, + validated_tip: Arc::clone(&self.validated_tip), + validated_set: self.validated_set.clone(), + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + let join_handle = tokio::task::spawn_blocking(move || { + // Write block to ZainoDB + let mut txn = zaino_db.env.begin_rw_txn()?; + + txn.put( + zaino_db.headers, + &block_height_bytes, + &header_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.heights, + &block_hash_bytes, + &height_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.txids, + &block_height_bytes, + &txid_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.transparent, + &block_height_bytes, + &transparent_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.sapling, + &block_height_bytes, + &sapling_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.orchard, + &block_height_bytes, + &orchard_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + txn.put( + zaino_db.commitment_tree_data, + &block_height_bytes, + &commitment_tree_entry.to_bytes()?, + WriteFlags::NO_OVERWRITE, + )?; + + #[cfg(feature = "transparent_address_history_experimental")] + { + // Write spent to ZainoDB + for (outpoint, tx_location) in spent_map { + let outpoint_bytes = &outpoint.to_bytes()?; + let tx_location_entry_bytes = + StoredEntryFixed::new(outpoint_bytes, tx_location).to_bytes()?; + txn.put( + zaino_db.spent, + &outpoint_bytes, + &tx_location_entry_bytes, + WriteFlags::NO_OVERWRITE, + )?; + } + + // Write outputs to ZainoDB addrhist + for (addr_script, records) in addrhist_outputs_map { + let addr_bytes = addr_script.to_bytes()?; + + // Convert all records to their StoredEntryFixed for ordering. + let mut stored_entries = Vec::with_capacity(records.len()); + for record in records { + let packed_record = AddrEventBytes::from_record(&record).map_err(|e| { + FinalisedStateError::Custom(format!("AddrEventBytes pack error: {e:?}")) + })?; + let entry = StoredEntryFixed::new(&addr_bytes, packed_record); + let entry_bytes = entry.to_bytes()?; + stored_entries.push((record, entry_bytes)); + } + + // Order by byte encoding for LMDB DUP_SORT insertion order + stored_entries.sort_by(|a, b| a.1.cmp(&b.1)); + + for (_record, record_entry_bytes) in stored_entries { + txn.put( + zaino_db.address_history, + &addr_bytes, + &record_entry_bytes, + WriteFlags::empty(), + )?; + } + } + + // Write inputs to ZainoDB addrhist + for (addr_script, records) in addrhist_inputs_map { + let addr_bytes = addr_script.to_bytes()?; + + // Convert all records to their StoredEntryFixed for ordering. + let mut stored_entries = Vec::with_capacity(records.len()); + for (record, prev_output) in records { + let packed_record = AddrEventBytes::from_record(&record).map_err(|e| { + FinalisedStateError::Custom(format!("AddrEventBytes pack error: {e:?}")) + })?; + let entry = StoredEntryFixed::new(&addr_bytes, packed_record); + let entry_bytes = entry.to_bytes()?; + stored_entries.push((record, entry_bytes, prev_output)); + } + + // Order by byte encoding for LMDB DUP_SORT insertion order + stored_entries.sort_by(|a, b| a.1.cmp(&b.1)); + + for (_record, record_entry_bytes, (prev_output_script, prev_output_record)) in + stored_entries + { + txn.put( + zaino_db.address_history, + &addr_bytes, + &record_entry_bytes, + WriteFlags::empty(), + )?; + + // mark corresponding output as spent + let prev_addr_bytes = prev_output_script.to_bytes()?; + let packed_prev = AddrEventBytes::from_record(&prev_output_record) + .map_err(|e| { + FinalisedStateError::Custom(format!( + "AddrEventBytes pack error: {e:?}" + )) + })?; + let prev_entry_bytes = + StoredEntryFixed::new(&prev_addr_bytes, packed_prev).to_bytes()?; + let updated = zaino_db.mark_addr_hist_record_spent_in_txn( + &mut txn, + &prev_output_script, + &prev_entry_bytes, + )?; + if !updated { + // Log and treat as invalid block — marking the prev-output must succeed. + return Err(FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: format!( + "failed to mark prev-output spent: addr={} tloc={:?} vout={}", + hex::encode(addr_bytes), + prev_output_record.tx_location(), + prev_output_record.out_index() + ), + }); + } + } + } + } + + txn.commit()?; + + zaino_db.env.sync(true).map_err(|e| { + FinalisedStateError::Custom(format!("LMDB sync failed before validation: {e}")) + })?; + + zaino_db.validate_block_blocking(block_height, block_hash)?; + + Ok::<_, FinalisedStateError>(()) + }); + + // Wait for the join and handle panic / cancellation explicitly so we can + // attempt to remove any partially written block. + let post_result = match join_handle.await { + Ok(inner_res) => inner_res, + Err(join_err) => { + warn!("Tokio task error (spawn_blocking join error): {}", join_err); + + // Best-effort delete of partially written block; ignore delete result. + let _ = self.delete_block(&block).await; + + return Err(FinalisedStateError::Custom(format!( + "Tokio task error: {}", + join_err + ))); + } + }; + + match post_result { + Ok(_) => { + tokio::task::block_in_place(|| self.env.sync(true)) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + self.status.store(StatusType::Ready); + if block.index().height().0 % 100 == 0 { + info!( + "Successfully committed block {} at height {} to ZainoDB.", + &block.index().hash(), + &block.index().height() + ); + } else { + tracing::debug!( + "Successfully committed block {} at height {} to ZainoDB.", + &block.index().hash(), + &block.index().height() + ); + } + + Ok(()) + } + Err(FinalisedStateError::LmdbError(lmdb::Error::KeyExist)) => { + // Block write failed because key already exists - another process wrote it + // between our check and our write. + // + // Wait briefly and verify it's the same block and was fully written to the finalised state. + // Partially written block should be deleted from the database and the write error reported + // so the on disk tables are never corrupted by a partial block writes. + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + + let height_bytes = block_height.to_bytes()?; + let verification_result = tokio::task::block_in_place(|| { + // Sync to see latest commits from other processes + self.env.sync(true).ok(); + let ro = self.env.begin_ro_txn()?; + match ro.get(self.headers, &height_bytes) { + Ok(stored_header_bytes) => { + // Data is stored as StoredEntryVar + let stored_entry = + StoredEntryVar::::from_bytes(stored_header_bytes) + .map_err(|e| { + FinalisedStateError::Custom(format!( + "header decode error in KeyExist handler: {e}" + )) + })?; + let stored_header = stored_entry.inner(); + if *stored_header.index().hash() == block_hash { + // Block hash exists, verify block was fully written. + self.validate_block_blocking(block_height, block_hash) + .map(|()| true) + .map_err(|e| { + FinalisedStateError::Custom(format!( + "Block write fail at height {}, with hash {:?}, \ + validation error: {}", + block_height.0, block_hash, e + )) + }) + } else { + Err(FinalisedStateError::Custom(format!( + "KeyExist race: different block at height {} \ + (stored: {:?}, incoming: {:?})", + block_height.0, + stored_header.index().hash(), + block_hash + ))) + } + } + Err(lmdb::Error::NotFound) => Err(FinalisedStateError::Custom(format!( + "KeyExist but block not found at height {} after sync", + block_height.0 + ))), + Err(e) => Err(FinalisedStateError::LmdbError(e)), + } + }); + + match verification_result { + Ok(_) => { + // Block was already written correctly by another process + self.status.store(StatusType::Ready); + info!( + "Block {} at height {} was already written by another process, skipping.", + &block_hash, &block_height.0 + ); + Ok(()) + } + Err(e) => { + warn!("Error writing block to DB: {e}"); + warn!( + "Deleting corrupt block from DB at height: {} with hash: {:?}", + block_height.0, block_hash.0 + ); + + let _ = self.delete_block(&block).await; + tokio::task::block_in_place(|| self.env.sync(true)).map_err(|e| { + FinalisedStateError::Custom(format!("LMDB sync failed: {e}")) + })?; + self.status.store(StatusType::CriticalError); + self.status.store(StatusType::RecoverableError); + Err(FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: e.to_string(), + }) + } + } + } + Err(e) => { + warn!("Error writing block to DB: {e}"); + warn!( + "Deleting corrupt block from DB at height: {} with hash: {:?}", + block_height.0, block_hash.0 + ); + + let _ = self.delete_block(&block).await; + tokio::task::block_in_place(|| self.env.sync(true)) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + + // NOTE: this does not need to be critical if we implement self healing, + // which we have the tools to do. + self.status.store(StatusType::CriticalError); + + if e.to_string().contains("MDB_MAP_FULL") { + warn!("Configured max database size exceeded, update `storage.database.size` in zaino's config."); + return Err(FinalisedStateError::Custom(format!( + "Database configuration error: {e}" + ))); + } + + Err(FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: e.to_string(), + }) + } + } + } + + /// Deletes a block identified height from every finalised table. + pub(crate) async fn delete_block_at_height( + &self, + height: Height, + ) -> Result<(), FinalisedStateError> { + // Check block is at the top of the finalised state + tokio::task::block_in_place(|| { + let height_bytes = height.to_bytes()?; + let ro = self.env.begin_ro_txn()?; + let mut cursor = ro.open_ro_cursor(self.headers)?; + + let mut iter = cursor.iter_from(&height_bytes); + + let Some((current_height_bytes, _)) = iter.next() else { + return Err(FinalisedStateError::Custom("block not found".into())); + }; + if current_height_bytes != height_bytes.as_slice() { + return Err(FinalisedStateError::Custom(format!( + "block with height {:?} not found in headers", + Height::from_bytes(&height_bytes)? + ))); + } + + if iter.next().is_some() { + return Err(FinalisedStateError::Custom(format!( + "can only delete tip block at height {:?}, but higher blocks exist", + Height::from_bytes(&height_bytes)? + ))); + } + Ok::<_, FinalisedStateError>(()) + })?; + + // fetch chain_block from db and delete + let Some(chain_block) = self.get_chain_block(height).await? else { + return Err(FinalisedStateError::DataUnavailable(format!( + "attempted to delete missing block: {}", + height.0 + ))); + }; + self.delete_block(&chain_block).await?; + + // update validated_tip / validated_set + let validated_tip = self.validated_tip.load(Ordering::Acquire); + if height.0 > validated_tip { + self.validated_set.remove(&height.0); + } else if height.0 == validated_tip { + self.validated_tip + .store(validated_tip.saturating_sub(1), Ordering::Release); + } + + tokio::task::block_in_place(|| { + self.env + .sync(true) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + Ok::<_, FinalisedStateError>(()) + })?; + + Ok(()) + } + + /// This is used as a backup when delete_block_at_height fails. + /// + /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. + /// + /// The IndexedBlock ir required to ensure that Outputs spent at this block height are re-marked as unspent. + /// + /// WARNING: No checks are made that this block is at the top of the finalised state, and validated tip is not updated. + /// This enables use for correcting corrupt data within the database but it is left to the user to ensure safe use. + /// Where possible delete_block_at_height should be used instead. + /// + /// NOTE: LMDB database errors are propageted as these show serious database errors, + /// all other errors are returned as `IncorrectBlock`, if this error is returned the block requested + /// should be fetched from the validator and this method called with the correct data. + pub(crate) async fn delete_block( + &self, + block: &IndexedBlock, + ) -> Result<(), FinalisedStateError> { + // Check block height and hash + let block_height = block.index().height(); + let block_height_bytes = + block_height + .to_bytes() + .map_err(|_| FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: "Corrupt block data: failed to serialise hash".to_string(), + })?; + + let block_hash = *block.index().hash(); + let block_hash_bytes = + block_hash + .to_bytes() + .map_err(|_| FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: "Corrupt block data: failed to serialise hash".to_string(), + })?; + + // Build transaction indexes + let tx_len = block.transactions().len(); + let mut txids = Vec::with_capacity(tx_len); + let mut txid_set: HashSet = HashSet::with_capacity(tx_len); + let mut transparent = Vec::with_capacity(tx_len); + + #[cfg(feature = "transparent_address_history_experimental")] + let mut spent_map: Vec = Vec::new(); + + #[cfg(feature = "transparent_address_history_experimental")] + #[allow(clippy::type_complexity)] + let mut addrhist_inputs_map: HashMap< + AddrScript, + Vec<(AddrHistRecord, (AddrScript, AddrHistRecord))>, + > = HashMap::new(); + + #[cfg(feature = "transparent_address_history_experimental")] + let mut addrhist_outputs_map: HashMap> = HashMap::new(); + + for (_tx_index, tx) in block.transactions().iter().enumerate() { + let hash = tx.txid(); + + if txid_set.insert(*hash) { + txids.push(*hash); + } + + // Transparent transactions + let transparent_data = + if tx.transparent().inputs().is_empty() && tx.transparent().outputs().is_empty() { + None + } else { + Some(tx.transparent().clone()) + }; + transparent.push(transparent_data); + + #[cfg(feature = "transparent_address_history_experimental")] + { + // Transaction location + let tx_location = TxLocation::new(block_height.into(), _tx_index as u16); + + // Transparent Outputs: Build Address History + DbV1::build_transaction_output_histories( + &mut addrhist_outputs_map, + tx_location, + tx.transparent().outputs().iter().enumerate(), + ); + + // Transparent Inputs: Build Spent Outpoints Index and Address History + for (input_index, input) in tx.transparent().inputs().iter().enumerate() { + if input.is_null_prevout() { + continue; + } + let prev_outpoint = Outpoint::new(*input.prevout_txid(), input.prevout_index()); + spent_map.push(prev_outpoint); + + //Check if output is in *this* block, else fetch from DB. + let prev_tx_hash = TransactionHash(*prev_outpoint.prev_txid()); + if txid_set.contains(&prev_tx_hash) { + // Fetch transaction index within block + if let Some(tx_index) = txids.iter().position(|h| h == &prev_tx_hash) { + // Fetch Transparent data for transaction + if let Some(Some(prev_transparent)) = transparent.get(tx_index) { + // Fetch output from transaction + if let Some(prev_output) = prev_transparent + .outputs() + .get(prev_outpoint.prev_index() as usize) + { + let prev_output_tx_location = + TxLocation::new(block_height.0, tx_index as u16); + DbV1::build_input_history( + &mut addrhist_inputs_map, + tx_location, + input_index as u16, + input, + prev_output, + prev_output_tx_location, + ); + } + } + } + } else if let Ok((prev_output, prev_output_tx_location)) = + tokio::task::block_in_place(|| { + let prev_output = self.get_previous_output_blocking(prev_outpoint)?; + + let prev_output_tx_location = self + .find_txid_index_blocking(&TransactionHash::from( + *prev_outpoint.prev_txid(), + )) + .map_err(|e| FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: e.to_string(), + })? + .ok_or_else(|| FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: "Invalid block data: invalid txid data.".to_string(), + })?; + + Ok::<(_, _), FinalisedStateError>(( + prev_output, + prev_output_tx_location, + )) + }) + { + DbV1::build_input_history( + &mut addrhist_inputs_map, + tx_location, + input_index as u16, + input, + &prev_output, + prev_output_tx_location, + ); + } else { + return Err(FinalisedStateError::InvalidBlock { + height: block.height().0, + hash: *block.hash(), + reason: "Invalid block data: invalid transparent input.".to_string(), + }); + } + } + } + } + + // Delete all block data from db. + let zaino_db = Self { + env: Arc::clone(&self.env), + headers: self.headers, + txids: self.txids, + transparent: self.transparent, + sapling: self.sapling, + orchard: self.orchard, + commitment_tree_data: self.commitment_tree_data, + heights: self.heights, + #[cfg(feature = "transparent_address_history_experimental")] + spent: self.spent, + #[cfg(feature = "transparent_address_history_experimental")] + address_history: self.address_history, + metadata: self.metadata, + validated_tip: Arc::clone(&self.validated_tip), + validated_set: self.validated_set.clone(), + db_handler: None, + status: self.status.clone(), + config: self.config.clone(), + }; + tokio::task::spawn_blocking(move || { + // Delete spent data + let mut txn = zaino_db.env.begin_rw_txn()?; + + #[cfg(feature = "transparent_address_history_experimental")] + { + for outpoint in &spent_map { + let outpoint_bytes = + &outpoint + .to_bytes() + .map_err(|_| FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: "Corrupt block data: failed to serialise outpoint" + .to_string(), + })?; + match txn.del(zaino_db.spent, outpoint_bytes, None) { + Ok(()) | Err(lmdb::Error::NotFound) => {} + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + } + + // Delete addrhist input data and mark old outputs spent in this block as unspent + for (addr_script, records) in &addrhist_inputs_map { + let addr_bytes = addr_script.to_bytes()?; + + // Mark outputs spent in this block as unspent + for (_record, (prev_output_script, prev_output_record)) in records { + { + let prev_addr_bytes = prev_output_script.to_bytes()?; + let packed_prev = AddrEventBytes::from_record(prev_output_record) + .map_err(|e| { + FinalisedStateError::Custom(format!( + "AddrEventBytes pack error: {e:?}" + )) + })?; + + // Build the *spent* form of the stored entry so it matches the DB + // (mark_addr_hist_record_spent_blocking sets FLAG_SPENT and + // recomputes the checksum). We must pass the spent bytes here + // because the DB currently contains the spent version. + let prev_entry_bytes = + StoredEntryFixed::new(&prev_addr_bytes, packed_prev).to_bytes()?; + + // Turn the mined-entry into the spent-entry (mutate flags + checksum) + let mut spent_prev_entry = prev_entry_bytes.clone(); + // Set SPENT flag (flags byte is at index 10 in StoredEntry layout) + spent_prev_entry[10] |= AddrHistRecord::FLAG_SPENT; + // Recompute checksum over bytes 1..19 as StoredEntryFixed expects. + let checksum = StoredEntryFixed::::blake2b256( + &[&prev_addr_bytes, &spent_prev_entry[1..19]].concat(), + ); + spent_prev_entry[19..51].copy_from_slice(&checksum); + + let updated = zaino_db.mark_addr_hist_record_unspent_in_txn( + &mut txn, + prev_output_script, + &spent_prev_entry, + )?; + + if !updated { + // Log and treat as invalid block — marking the prev-output must succeed. + return Err(FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: format!( + "failed to mark prev-output spent: addr={} tloc={:?} vout={}", + hex::encode(addr_bytes), + prev_output_record.tx_location(), + prev_output_record.out_index() + ), + }); + } + } + } + + // Delete all input records created in this block. + zaino_db + .delete_addrhist_dups_in_txn( + &mut txn, + &addr_script.to_bytes().map_err(|_| { + FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: "Corrupt block data: failed to serialise addr_script" + .to_string(), + } + })?, + block_height, + true, + false, + records.len(), + ) + // TODO: check internals to propagate important errors. + .map_err(|_| FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: "Corrupt block data: failed to delete inputs".to_string(), + })?; + } + + // Delete addrhist output data + for (addr_script, records) in &addrhist_outputs_map { + zaino_db.delete_addrhist_dups_in_txn( + &mut txn, + &addr_script + .to_bytes() + .map_err(|_| FinalisedStateError::InvalidBlock { + height: block_height.0, + hash: block_hash, + reason: "Corrupt block data: failed to serialise addr_script" + .to_string(), + })?, + block_height, + false, + true, + records.len(), + )?; + } + } + + // Delete block data + for &db in &[ + zaino_db.headers, + zaino_db.txids, + zaino_db.transparent, + zaino_db.sapling, + zaino_db.orchard, + zaino_db.commitment_tree_data, + ] { + match txn.del(db, &block_height_bytes, None) { + Ok(()) | Err(lmdb::Error::NotFound) => {} + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + } + + match txn.del(zaino_db.heights, &block_hash_bytes, None) { + Ok(()) | Err(lmdb::Error::NotFound) => {} + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + } + + let _ = txn.commit(); + + zaino_db + .env + .sync(true) + .map_err(|e| FinalisedStateError::Custom(format!("LMDB sync failed: {e}")))?; + + Ok::<_, FinalisedStateError>(()) + }) + .await + .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))??; + Ok(()) + } + + /// Updates the metadata hed by the database. + pub(crate) async fn update_metadata( + &self, + metadata: DbMetadata, + ) -> Result<(), FinalisedStateError> { + tokio::task::block_in_place(|| { + let mut txn = self.env.begin_rw_txn()?; + + let entry = StoredEntryFixed::new(b"metadata", metadata); + txn.put( + self.metadata, + b"metadata", + &entry.to_bytes()?, + WriteFlags::empty(), + )?; + + txn.commit()?; + Ok(()) + }) + } + + // *** Internal DB methods *** +} diff --git a/zaino-state/src/chain_index/finalised_state/entry.rs b/zaino-state/src/chain_index/finalised_state/entry.rs new file mode 100644 index 000000000..73183f376 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/entry.rs @@ -0,0 +1,314 @@ +//! Checksummed database entry wrappers (fixed and variable length) +//! +//! This file defines small wrapper types used by concrete DB versions for storing values in +//! LMDB with an **integrity checksum**. +//! +//! Each wrapper stores: +//! - the inner *versioned* record `T: ZainoVersionedSerde`, and +//! - a BLAKE2b-256 checksum computed over `key || encoded_item`. +//! +//! The checksum is intended to: +//! - detect corruption or partial writes, +//! - detect accidental key/value mismatches (e.g., writing under the wrong key encoding), +//! - and provide a cheap integrity check during migrations or debugging. +//! +//! ## Integrity model (scope) +//! +//! The checksum is a **corruption and correctness** signal, not a cryptographic authentication +//! mechanism. It helps detect accidental corruption, partial writes, or key/value mismatches, but +//! it does not provide authenticity against a malicious database writer, this must be ensured in +//! actual database implementations by validating block data on startup and on block writes. +//! +//! # Two wrapper forms +//! +//! - [`StoredEntryFixed`] for fixed-length values: +//! - requires `T: FixedEncodedLen` so that the total encoded value length is constant. +//! - important when LMDB uses `DUP_SORT` and/or `DUP_FIXED` flags where record sizing matters. +//! +//! - [`StoredEntryVar`] for variable-length values: +//! - prefixes the serialized record with a CompactSize length so decoding is bounded and safe. +//! +//! Both wrappers are themselves versioned (`ZainoVersionedSerde`), which means their outer layout can +//! evolve in a controlled way if required. +//! +//! # Encoding contract (conceptual) +//! +//! `StoredEntryFixed` encodes as: +//! - StoredEntry version tag +//! - `T::serialize()` bytes (which include `T`'s own record version tag) +//! - 32-byte checksum +//! +//! `StoredEntryVar` encodes as: +//! - StoredEntry version tag +//! - CompactSize(length of `T::serialize()` bytes) +//! - `T::serialize()` bytes +//! - 32-byte checksum +//! +//! # Usage guidelines +//! +//! - Always compute the checksum using the **exact bytes** used as the DB key (i.e. the encoded key). +//! - On read, verify the checksum before trusting decoded contents. +//! - Treat checksum mismatch as a corruption/incompatibility signal: +//! - return a hard error, +//! - or trigger a rebuild path, depending on the calling context. +//! +//! # Development: when to pick fixed vs var +//! +//! - Use `StoredEntryFixed` when: +//! - `T` has a stable, fixed-size encoding and you want predictable sizing, or +//! - the LMDB table relies on fixed-size duplicates. +//! +//! - Use `StoredEntryVar` when: +//! - `T` naturally contains variable-length payloads (vectors, scripts, etc.), or +//! - the value size may grow over time and you want to avoid schema churn. +//! +//! If you change the wrapper layout itself, bump the wrapper’s `ZainoVersionedSerde::VERSION` and +//! maintain a decode path (or bump the DB major version and migrate). + +use crate::{ + read_fixed_le, version, write_fixed_le, CompactSize, FixedEncodedLen, ZainoVersionedSerde, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + Blake2bVar, +}; +use core2::io::{self, Read, Write}; + +/// Fixed-length checksummed database value wrapper. +/// +/// This wrapper is designed for LMDB tables that rely on fixed-size value records, including those +/// configured with `DUP_SORT` and/or `DUP_FIXED`. +/// +/// The wrapper stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), and +/// - a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// ## Invariants +/// - `T` must have a fixed encoded length (including its own version tag), enforced by +/// [`FixedEncodedLen`]. +/// - The checksum must be computed using the **exact key bytes** used in LMDB for this entry. +/// - On read, callers should verify the checksum before trusting decoded contents. +/// +/// ## Encoded format (conceptual) +/// +/// ┌─────── byte 0 ───────┬────────────── T::serialize() bytes ──────────────┬─── 32 bytes ────┐ +/// │ StoredEntry version │ (includes T's own record version tag + body) │ B2B256 checksum │ +/// └──────────────────────┴──────────────────────────────────────────────────┴─────────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct StoredEntryFixed { + /// The inner record stored in this entry. + pub(crate) item: T, + + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. + pub(crate) checksum: [u8; 32], +} + +impl StoredEntryFixed { + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. If the caller + /// hashes a different key encoding than what is used for storage, verification will fail. + pub(crate) fn new>(key: K, item: T) -> Self { + let body = { + let mut v = Vec::with_capacity(T::VERSIONED_LEN); + item.serialize(&mut v).unwrap(); + v + }; + let checksum = Self::blake2b256(&[key.as_ref(), &body].concat()); + Self { item, checksum } + } + + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. + /// + /// # Usage + /// Callers should treat a checksum mismatch as a corruption or incompatibility signal and + /// return a hard error (or trigger a rebuild path), depending on context. + pub(crate) fn verify>(&self, key: K) -> bool { + let body = { + let mut v = Vec::with_capacity(T::VERSIONED_LEN); + self.item.serialize(&mut v).unwrap(); + v + }; + let candidate = Self::blake2b256(&[key.as_ref(), &body].concat()); + candidate == self.checksum + } + + /// Returns a reference to the inner record. + pub(crate) fn inner(&self) -> &T { + &self.item + } + + /// Computes a BLAKE2b-256 checksum over `data`. + /// + /// This is the hashing primitive used by both wrappers. The checksum is not keyed. + pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { + let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); + hasher.update(data); + let mut output = [0u8; 32]; + hasher + .finalize_variable(&mut output) + .expect("Failed to finalize hash"); + output + } +} + +/// Versioned on-disk encoding for fixed-length checksummed entries. +/// +/// Body layout (after the `StoredEntryFixed` version tag): +/// 1. `T::serialize()` bytes (fixed length: `T::VERSIONED_LEN`) +/// 2. 32-byte checksum +/// +/// Note: `T::serialize()` includes `T`’s own version tag and body. +impl ZainoVersionedSerde for StoredEntryFixed { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + self.item.serialize(&mut *w)?; + write_fixed_le::<32, _>(&mut *w, &self.checksum) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut body = vec![0u8; T::VERSIONED_LEN]; + r.read_exact(&mut body)?; + let item = T::deserialize(&body[..])?; + + let checksum = read_fixed_le::<32, _>(r)?; + Ok(Self { item, checksum }) + } +} + +/// `StoredEntryFixed` has a fixed encoded body length. +/// +/// Body length = `T::VERSIONED_LEN` + 32 bytes checksum. +impl FixedEncodedLen for StoredEntryFixed { + const ENCODED_LEN: usize = T::VERSIONED_LEN + 32; +} + +/// Variable-length checksummed database value wrapper. +/// +/// This wrapper is used for values whose serialized representation is not fixed-size. It stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), +/// - a CompactSize length prefix for the serialized record, +/// - and a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// The length prefix allows decoding to be bounded and avoids reading untrusted trailing bytes. +/// +/// ## Encoded format (conceptual) +/// +/// ┌────── byte 0 ───────┬────── CompactSize(len) ──────┬────── len bytes ──────┬─ 32 bytes ─┐ +/// │ StoredEntry version │ len = item.serialize().len() │ T::serialize() bytes │ checksum │ +/// └─────────────────────┴──────────────────────────────┴───────────────────────┴────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct StoredEntryVar { + /// The inner record stored in this entry. + pub(crate) item: T, + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. + pub(crate) checksum: [u8; 32], +} + +impl StoredEntryVar { + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. + pub(crate) fn new>(key: K, item: T) -> Self { + let body = { + let mut v = Vec::new(); + item.serialize(&mut v).unwrap(); + v + }; + let checksum = Self::blake2b256(&[key.as_ref(), &body].concat()); + Self { item, checksum } + } + + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. + pub(crate) fn verify>(&self, key: K) -> bool { + let mut body = Vec::new(); + self.item.serialize(&mut body).unwrap(); + let candidate = Self::blake2b256(&[key.as_ref(), &body].concat()); + candidate == self.checksum + } + + /// Returns a reference to the inner record. + pub(crate) fn inner(&self) -> &T { + &self.item + } + + /// Computes a BLAKE2b-256 checksum over `data`. + pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { + let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); + hasher.update(data); + let mut output = [0u8; 32]; + hasher + .finalize_variable(&mut output) + .expect("Failed to finalize hash"); + output + } +} + +/// Versioned on-disk encoding for variable-length checksummed entries. +/// +/// Body layout (after the `StoredEntryVar` version tag): +/// 1. CompactSize `len` (the length of `T::serialize()` bytes) +/// 2. `len` bytes of `T::serialize()` (includes `T`’s own version tag and body) +/// 3. 32-byte checksum +/// +/// Implementations must ensure the length prefix matches the exact serialized record bytes written, +/// otherwise decoding will fail or misalign. +impl ZainoVersionedSerde for StoredEntryVar { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut body = Vec::new(); + self.item.serialize(&mut body)?; + + CompactSize::write(&mut *w, body.len())?; + w.write_all(&body)?; + write_fixed_le::<32, _>(&mut *w, &self.checksum) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let len = CompactSize::read(&mut *r)? as usize; + + let mut body = vec![0u8; len]; + r.read_exact(&mut body)?; + let item = T::deserialize(&body[..])?; + + let checksum = read_fixed_le::<32, _>(r)?; + Ok(Self { item, checksum }) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/migrations.rs b/zaino-state/src/chain_index/finalised_state/migrations.rs new file mode 100644 index 000000000..f77c20d19 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/migrations.rs @@ -0,0 +1,507 @@ +//! Database version migration framework and implementations +//! +//! This file defines how `ZainoDB` migrates on-disk databases between database versions. +//! +//! Migrations are orchestrated by [`MigrationManager`], which is invoked from `ZainoDB::spawn` when +//! `current_version < target_version`. +//! +//! The migration model is **stepwise**: +//! - each migration maps one concrete `DbVersion` to the next supported `DbVersion`, +//! - the manager iteratively applies steps until the target is reached. +//! +//! # Key concepts +//! +//! - [`Migration`] trait: +//! - declares `CURRENT_VERSION` and `TO_VERSION` constants, +//! - provides an async `migrate(...)` entry point. +//! +//! - [`MigrationManager`]: +//! - holds the router, config, current and target versions, and a `BlockchainSource`, +//! - repeatedly selects and runs the next migration via `get_migration()`. +//! +//! - [`MigrationStep`]: +//! - enum-based dispatch wrapper used by `MigrationManager` to select between multiple concrete +//! `Migration` implementations (Rust cannot return different `impl Trait` types from a `match`). +//! +//! - [`capability::MigrationStatus`]: +//! - stored in `DbMetadata` and used to resume work safely after shutdown. +//! +//! # How major migrations work in this codebase +//! +//! This module is designed around the router’s **primary + shadow** model: +//! +//! - The *primary* DB continues serving read/write traffic. +//! - A *shadow* DB (new schema version) is created and built in parallel. +//! - Once the shadow DB is fully built and marked complete, it is promoted to primary. +//! - The old primary DB is shut down and deleted from disk once all handles are dropped. +//! +//! This minimises downtime and allows migrations that require a full rebuild (rather than an +//! in-place rewrite) without duplicating the entire DB indefinitely. +//! +//! It ia also possible (if migration allows) to partially build the new database version, switch +//! specific functionality to the shadow, and partialy delete old the database version, rather than +//! building the new database in full. This enables developers to minimise transient disk usage +//! during migrations. +//! +//! # Notes on MigrationType +//! +//! Database versioning (and migration) is split into three distinct types, dependant of the severity +//! of changes being made to the database: +//! - Major versions / migrations: +//! - Major schema / capability changes, notably changes that require refetching the complete +//! blockchain from the backing validator / finaliser to build / update database indices. +//! - Migrations should follow the "primary" database / "shadow" database model. The legacy database +//! should be spawned as the "primary" and set to carry on serving data during migration. The new +//! database version is then spawned as the "shadow" and built in a background process. Once the +//! "shadow" is built to "primary" db tip height it is promoted to primary, taking over serving +//! data from the legacy database, the demoted database can then be safely removed from disk. It is +//! also possible to partially build the new database version , promote specific database capability, +//! and delete specific tables from the legacy database, reducing transient disk usage. +//! - Minor versions / migrations: +//! - Updates involving minor schema / capability changes, notably changes that can be rebuilt in place +//! (changes that do not require fetching new data from the backing validator / finaliser) or that can +//! rely on updates to the versioned serialisation / deserialisation of database structures. +//! - Migrations for minor patch bumps can follow several paths. If the database table being updated +//! holds variable length items, and the actual data being held is not changed (only format changes +//! being applied) then it may be possible to rely on serialisation / deserialisation updates to the +//! items being chenged, with the database table holding a mix of serialisation versions. However, +//! if the table being updated is of fixed length items, or the actual data held is being updated, +//! then it will be necessary to rebuild that table in full, possibly requiring database downtime for +//! the migration. Since this only involves moving data already held in the database (rather than +//! fetching new data from the backing validator) migration should be quick and short downtimes are +//! accepted. +//! - Patch versions / migrations: +//! - Changes to database code that do not touch the database schema, these include bug fixes, +//! performance improvements etc. +//! - Migrations for patch updates only need to handle updating the stored DbMetadata singleton. +//! +//! # Development: adding a new migration step +//! +//! 1. Introduce a new `struct MigrationX_Y_ZToA_B_C;` and implement `Migration`. +//! 2. Add a new `MigrationStep` variant and register it in `MigrationManager::get_migration()` by +//! matching on the *current* version. +//! 3. Ensure the migration is: +//! - deterministic, +//! - resumable (use `DbMetadata::migration_status` and/or shadow tip), +//! - crash-safe (never leaves a partially promoted DB). +//! 4. Add tests/fixtures for: +//! - starting from the old version, +//! - resuming mid-build if applicable, +//! - validating the promoted DB serves required capabilities. +//! +//! # Implemented migrations +//! +//! ## v0.0.0 → v1.0.0 +//! +//! `Migration0_0_0To1_0_0` performs a **full shadow rebuild from genesis**. +//! +//! Rationale (as enforced by code/comments): +//! - The legacy v0 DB is a lightwallet-specific store that only builds compact blocks from Sapling +//! activation onwards. +//! - v1 requires data from genesis (notably for transparent address history indices), therefore a +//! partial “continue from Sapling” build is insufficient. +//! +//! Mechanics: +//! - Spawn v1 as a shadow backend. +//! - Determine the current shadow tip (to resume if interrupted). +//! - Fetch blocks and commitment tree roots from the `BlockchainSource` starting at either genesis +//! or `shadow_tip + 1`, building `BlockMetadata` and `IndexedBlock`. +//! - Keep building until the shadow catches up to the primary tip (looping because the primary can +//! advance during the build). +//! - Mark `migration_status = Complete` in shadow metadata. +//! - Promote shadow to primary via `router.promote_shadow()`. +//! - Delete the old v0 directory asynchronously once all strong references are dropped. + +use super::{ + capability::{ + BlockCoreExt, Capability, DbCore as _, DbRead, DbVersion, DbWrite, MigrationStatus, + }, + db::DbBackend, + router::Router, +}; + +use crate::{ + chain_index::{source::BlockchainSource, types::GENESIS_HEIGHT}, + config::BlockCacheConfig, + error::FinalisedStateError, + BlockHash, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, +}; + +use zebra_chain::parameters::NetworkKind; + +use async_trait::async_trait; +use std::sync::Arc; +use tracing::info; + +/// Broad categorisation of migration severity. +/// +/// This enum exists as a design aid to communicate intent and constraints: +/// - **Patch**: code-only changes; schema is unchanged; typically only `DbMetadata` needs updating. +/// - **Minor**: compatible schema / encoding evolution; may require in-place rebuilds of selected tables. +/// - **Major**: capability or schema changes that require rebuilding indices from the backing validator, +/// typically using the router’s primary/shadow model. +/// +/// Note: this enum is not currently used to dispatch behaviour in this file; concrete steps are +/// selected by [`MigrationManager::get_migration`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MigrationType { + /// Patch-level changes: no schema change; metadata updates only. + Patch, + + /// Minor-level changes: compatible schema/encoding changes; may require in-place table rebuild. + Minor, + + /// Major-level changes: new schema/capabilities; usually requires shadow rebuild and promotion. + Major, +} + +/// A single migration step from one concrete on-disk version to the next. +/// +/// Migrations are designed to be **composable** and **stepwise**: each implementation should map a +/// specific `CURRENT_VERSION` to a specific `TO_VERSION`. The manager then iterates until the target +/// version is reached. +/// +/// ## Resumability and crash-safety +/// Migration implementations are expected to be resumable where practical. In this codebase, major +/// migrations typically use: +/// - a shadow database that can be incrementally built, +/// - the shadow tip height as an implicit progress marker, +/// - and [`MigrationStatus`] in `DbMetadata` as an explicit progress marker. +/// +/// Implementations must never promote a partially-correct database to primary. +#[async_trait] +pub trait Migration { + /// The exact on-disk version this step migrates *from*. + const CURRENT_VERSION: DbVersion; + + /// The exact on-disk version this step migrates *to*. + const TO_VERSION: DbVersion; + + /// Returns the version this step migrates *from*. + fn current_version(&self) -> DbVersion { + Self::CURRENT_VERSION + } + + /// Returns the version this step migrates *to*. + fn to_version(&self) -> DbVersion { + Self::TO_VERSION + } + + /// Performs the migration step. + /// + /// Implementations may: + /// - spawn a shadow backend, + /// - build or rebuild indices, + /// - update metadata and migration status, + /// - and promote the shadow backend to primary via the router. + /// + /// # Errors + /// Returns `FinalisedStateError` if the migration cannot proceed safely or deterministically. + async fn migrate( + &self, + router: Arc, + cfg: BlockCacheConfig, + source: T, + ) -> Result<(), FinalisedStateError>; +} + +/// Orchestrates a sequence of migration steps until `target_version` is reached. +/// +/// `MigrationManager` is constructed by `ZainoDB::spawn` when it detects that the on-disk database +/// is older than the configured target version. +/// +/// The manager: +/// - selects the next step based on the current version, +/// - runs it, +/// - then advances `current_version` to the step’s `TO_VERSION` and repeats. +/// +/// The router is shared so that migration steps can use the primary/shadow routing model. +pub(super) struct MigrationManager { + /// Router controlling primary/shadow backends and capability routing. + pub(super) router: Arc, + + /// Block-cache configuration (paths, network, configured target DB version, etc.). + pub(super) cfg: BlockCacheConfig, + + /// The on-disk version currently detected/opened. + pub(super) current_version: DbVersion, + + /// The configured target version to migrate to. + pub(super) target_version: DbVersion, + + /// Backing data source used to fetch blocks / tree roots for rebuild-style migrations. + pub(super) source: T, +} + +impl MigrationManager { + /// Iteratively performs each migration step from current version to target version. + /// + /// The manager applies steps in order, where each step maps one specific `DbVersion` to the next. + /// The loop terminates once `current_version >= target_version`. + /// + /// # Errors + /// Returns an error if a migration step is missing for the current version, or if any migration + /// step fails. + pub(super) async fn migrate(&mut self) -> Result<(), FinalisedStateError> { + while self.current_version < self.target_version { + let migration = self.get_migration()?; + migration + .migrate( + Arc::clone(&self.router), + self.cfg.clone(), + self.source.clone(), + ) + .await?; + self.current_version = migration.to_version::(); + } + + Ok(()) + } + + /// Returns the next migration step for the current on-disk version. + /// + /// This must be updated whenever a new supported DB version is introduced. The match is strict: + /// if a step is missing, migration is aborted rather than attempting an unsafe fallback. + fn get_migration(&self) -> Result { + match ( + self.current_version.major, + self.current_version.minor, + self.current_version.patch, + ) { + (0, 0, 0) => Ok(MigrationStep::Migration0_0_0To1_0_0(Migration0_0_0To1_0_0)), + (_, _, _) => Err(FinalisedStateError::Custom(format!( + "Missing migration from version {}", + self.current_version + ))), + } + } +} + +/// Concrete migration step selector. +/// +/// Rust cannot return `impl Migration` from a `match` that selects between multiple concrete +/// migration types. `MigrationStep` is the enum-based dispatch wrapper used by [`MigrationManager`] +/// to select a step and call `migrate(...)`, and to read the step’s `TO_VERSION`. +enum MigrationStep { + Migration0_0_0To1_0_0(Migration0_0_0To1_0_0), +} + +impl MigrationStep { + fn to_version(&self) -> DbVersion { + match self { + MigrationStep::Migration0_0_0To1_0_0(_step) => { + >::TO_VERSION + } + } + } + + async fn migrate( + &self, + router: Arc, + cfg: BlockCacheConfig, + source: T, + ) -> Result<(), FinalisedStateError> { + match self { + MigrationStep::Migration0_0_0To1_0_0(step) => step.migrate(router, cfg, source).await, + } + } +} + +// ***** Migrations ***** + +/// Major migration: v0.0.0 → v1.0.0. +/// +/// This migration performs a shadow rebuild of the v1 database from genesis, then promotes the +/// completed shadow to primary and schedules deletion of the old v0 database directory once all +/// handles are dropped. +/// +/// See the module-level documentation for the detailed rationale and mechanics. +struct Migration0_0_0To1_0_0; + +#[async_trait] +impl Migration for Migration0_0_0To1_0_0 { + const CURRENT_VERSION: DbVersion = DbVersion { + major: 0, + minor: 0, + patch: 0, + }; + const TO_VERSION: DbVersion = DbVersion { + major: 1, + minor: 0, + patch: 0, + }; + + /// Performs the v0 → v1 major migration using the router’s primary/shadow model. + /// + /// The legacy v0 database only supports compact block data from Sapling activation onwards. + /// DbV1 requires a complete rebuild from genesis to correctly build indices (notably transparent + /// address history). For this reason, this migration does not attempt partial incremental builds + /// from Sapling; it rebuilds v1 in full in a shadow backend, then promotes it. + /// + /// ## Resumption behaviour + /// If the process is shut down mid-migration: + /// - the v1 shadow DB directory may already exist, + /// - shadow tip height is used to resume from `shadow_tip + 1`, + /// - and `MigrationStatus` is used as a coarse progress marker. + /// + /// Promotion occurs only after the v1 build loop has caught up to the primary tip and the shadow + /// metadata is marked `Complete`. + async fn migrate( + &self, + router: Arc, + cfg: BlockCacheConfig, + source: T, + ) -> Result<(), FinalisedStateError> { + info!("Starting v0.0.0 to v1.0.0 migration."); + // Open V1 as shadow + let shadow = Arc::new(DbBackend::spawn_v1(&cfg).await?); + router.set_shadow(Arc::clone(&shadow), Capability::empty()); + + let migration_status = shadow.get_metadata().await?.migration_status(); + + match migration_status { + MigrationStatus::Empty + | MigrationStatus::PartialBuidInProgress + | MigrationStatus::PartialBuildComplete + | MigrationStatus::FinalBuildInProgress => { + // build shadow to primary_db_height, + // start from shadow_db_height in case database was shutdown mid-migration. + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + let shadow_db_height_opt = shadow.db_height().await?; + let mut shadow_db_height = shadow_db_height_opt.unwrap_or(GENESIS_HEIGHT); + let mut build_start_height = if shadow_db_height_opt.is_some() { + parent_chain_work = *shadow + .get_block_header(shadow_db_height) + .await? + .index() + .chainwork(); + + shadow_db_height + 1 + } else { + shadow_db_height + }; + let mut primary_db_height = router.db_height().await?.unwrap_or(GENESIS_HEIGHT); + + info!( + "Starting shadow database build, current database tips: v0:{} v1:{}", + primary_db_height, shadow_db_height + ); + + loop { + if shadow_db_height >= primary_db_height { + break; + } + + for height in (build_start_height.0)..=primary_db_height.0 { + let block = source + .get_block(zebra_state::HashOrHeight::Height( + zebra_chain::block::Height(height), + )) + .await? + .ok_or_else(|| { + FinalisedStateError::Custom(format!( + "block not found at height {height}" + )) + })?; + let hash = BlockHash::from(block.hash().0); + + let (sapling_root_data, orchard_root_data) = + source.get_commitment_tree_roots(hash).await?; + let (sapling_root, sapling_root_size) = + sapling_root_data.ok_or_else(|| { + FinalisedStateError::Custom(format!( + "sapling commitment tree data missing for block {hash:?} at height {height}" + )) + })?; + let (orchard_root, orchard_root_size) = + orchard_root_data.ok_or_else(|| { + FinalisedStateError::Custom(format!( + "orchard commitment tree data missing for block {hash:?} at height {height}" + )) + })?; + + let metadata = BlockMetadata::new( + sapling_root, + sapling_root_size as u32, + orchard_root, + orchard_root_size as u32, + parent_chain_work, + cfg.network.to_zebra_network(), + ); + + let block_with_metadata = BlockWithMetadata::new(block.as_ref(), metadata); + let chain_block = + IndexedBlock::try_from(block_with_metadata).map_err(|_| { + FinalisedStateError::Custom( + "Failed to build chain block".to_string(), + ) + })?; + + parent_chain_work = *chain_block.chainwork(); + + shadow.write_block(chain_block).await?; + } + + std::thread::sleep(std::time::Duration::from_millis(100)); + + shadow_db_height = shadow.db_height().await?.unwrap_or(Height(0)); + build_start_height = shadow_db_height + 1; + primary_db_height = router.db_height().await?.unwrap_or(Height(0)); + } + + // update db metadata migration status + let mut metadata = shadow.get_metadata().await?; + metadata.migration_status = MigrationStatus::Complete; + shadow.update_metadata(metadata).await?; + + info!("v1 database build complete."); + } + + MigrationStatus::Complete => { + // Migration complete, continue with DbV0 deletion. + } + } + + info!("promoting v1 database to primary."); + + // Promote V1 to primary + let db_v0 = router.promote_shadow()?; + + // Delete V0 + tokio::spawn(async move { + // Wait until all Arc clones are dropped + while Arc::strong_count(&db_v0) > 1 { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + + // shutdown database + if let Err(e) = db_v0.shutdown().await { + tracing::warn!("Old primary shutdown failed: {e}"); + } + + // Now safe to delete old database files + let db_path_dir = match cfg.network.to_zebra_network().kind() { + NetworkKind::Mainnet => "live", + NetworkKind::Testnet => "test", + NetworkKind::Regtest => "local", + }; + let db_path = cfg.storage.database.path.join(db_path_dir); + + info!("Wiping v0 database from disk."); + + match tokio::fs::remove_dir_all(&db_path).await { + Ok(_) => tracing::info!("Deleted old database at {}", db_path.display()), + Err(e) => tracing::error!( + "Failed to delete old database at {}: {}", + db_path.display(), + e + ), + } + }); + + info!("v0.0.0 to v1.0.0 migration complete."); + + Ok(()) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/reader.rs b/zaino-state/src/chain_index/finalised_state/reader.rs new file mode 100644 index 000000000..ae7851c39 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/reader.rs @@ -0,0 +1,493 @@ +//! Read-only view onto a running `ZainoDB` (DbReader) +//! +//! This file defines [`DbReader`], the **read-only** interface that should be used for *all* chain +//! data fetches from the finalised database. +//! +//! `DbReader` exists for two reasons: +//! +//! 1. **API hygiene:** it narrows the surface to reads and discourages accidental use of write APIs +//! from query paths. +//! 2. **Migration safety:** it routes each call through [`Router`](super::router::Router) using a +//! [`CapabilityRequest`](crate::chain_index::finalised_state::capability::CapabilityRequest), +//! ensuring the underlying backend supports the requested feature (especially important during +//! major migrations where different DB versions may coexist). +//! +//! # How routing works +//! +//! Each method in `DbReader` requests a specific capability (e.g. `BlockCoreExt`, `TransparentHistExt`). +//! Internally, `DbReader::db(cap)` calls `ZainoDB::backend_for_cap(cap)`, which consults the router. +//! +//! - If the capability is currently served by the shadow DB (shadow mask contains the bit), the +//! query runs against shadow. +//! - Otherwise, it runs against primary if primary supports it. +//! - If neither backend supports it, the call returns `FinalisedStateError::FeatureUnavailable(...)`. +//! +//! # Version constraints and error handling +//! +//! Some queries are only available in newer DB versions (notably most v1 extension traits). +//! Callers should either: +//! - require a minimum DB version (via configuration and/or metadata checks), or +//! - handle `FeatureUnavailable` errors gracefully when operating against legacy databases. +//! +//! # Development: adding a new read method +//! +//! 1. Decide whether the new query belongs under an existing extension trait or needs a new one. +//! 2. If a new capability is required: +//! - add a new `Capability` bit and `CapabilityRequest` variant in `capability.rs`, +//! - implement the corresponding extension trait for supported DB versions, +//! - delegate through `DbBackend` and route via the router. +//! 3. Add the new method on `DbReader` that requests the corresponding `CapabilityRequest` and calls +//! into the backend. +//! +//! # Usage pattern +//! +//! `DbReader` is created from an `Arc` using [`ZainoDB::to_reader`](super::ZainoDB::to_reader). +//! Prefer passing `DbReader` through query layers rather than passing `ZainoDB` directly. + +use zaino_proto::proto::utils::PoolTypeFilter; + +use crate::{ + chain_index::{finalised_state::capability::CapabilityRequest, types::TransactionHash}, + error::FinalisedStateError, + BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, IndexedBlock, + OrchardCompactTx, OrchardTxList, SaplingCompactTx, SaplingTxList, StatusType, + TransparentCompactTx, TransparentTxList, TxLocation, TxidList, +}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::{ + chain_index::{finalised_state::capability::TransparentHistExt, types::AddrEventBytes}, + AddrScript, Outpoint, +}; + +use super::{ + capability::{ + BlockCoreExt, BlockShieldedExt, BlockTransparentExt, CompactBlockExt, DbMetadata, + IndexedBlockExt, + }, + db::DbBackend, + ZainoDB, +}; + +use std::sync::Arc; + +#[derive(Clone, Debug)] +/// `DbReader` is the preferred entry point for serving chain queries: +/// - it exposes only read APIs, +/// - it routes each operation via [`CapabilityRequest`] to ensure the selected backend supports the +/// requested feature, +/// - and it remains stable across major migrations because routing is handled internally by the +/// [`Router`](super::router::Router). +/// +/// ## Cloning and sharing +/// `DbReader` is cheap to clone; clones share the underlying `Arc`. +pub(crate) struct DbReader { + /// Shared handle to the running `ZainoDB` instance. + pub(crate) inner: Arc, +} + +impl DbReader { + /// Resolves the backend that should serve `cap` right now. + /// + /// This is the single routing choke-point for all `DbReader` methods. It delegates to + /// `ZainoDB::backend_for_cap`, which consults the router’s primary/shadow masks. + /// + /// # Errors + /// Returns `FinalisedStateError::FeatureUnavailable(...)` if no currently-open backend + /// advertises the requested capability. + #[inline(always)] + fn db(&self, cap: CapabilityRequest) -> Result, FinalisedStateError> { + self.inner.backend_for_cap(cap) + } + + // ***** DB Core Read ***** + + /// Returns the current runtime status of the serving database. + /// + /// This reflects the status of the backend currently serving `READ_CORE`, which is the minimum + /// capability required for basic chain queries. + pub(crate) fn status(&self) -> StatusType { + self.inner.status() + } + + /// Returns the greatest block `Height` stored in the database, or `None` if the DB is empty. + pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { + self.inner.db_height().await + } + + /// Fetches the persisted database metadata singleton (`DbMetadata`). + pub(crate) async fn get_metadata(&self) -> Result { + self.inner.get_metadata().await + } + + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This is a convenience wrapper around `ZainoDB::wait_until_ready` and should typically be + /// awaited once during startup before serving queries. + pub(crate) async fn wait_until_ready(&self) { + self.inner.wait_until_ready().await + } + + /// Fetches the main-chain height for a given block hash, if present in finalised state. + pub(crate) async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError> { + self.inner.get_block_height(hash).await + } + + /// Fetches the main-chain block hash for a given block height, if present in finalised state. + pub(crate) async fn get_block_hash( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + self.inner.get_block_hash(height).await + } + + // ***** Block Core Ext ***** + + /// Fetch the TxLocation for the given txid, transaction data is indexed by TxLocation internally. + pub(crate) async fn get_tx_location( + &self, + txid: &TransactionHash, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockCoreExt)? + .get_tx_location(txid) + .await + } + + /// Fetch block header data by height. + pub(crate) async fn get_block_header( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockCoreExt)? + .get_block_header(height) + .await + } + + /// Fetches block headers for the given height range. + pub(crate) async fn get_block_range_headers( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockCoreExt)? + .get_block_range_headers(start, end) + .await + } + + /// Fetch the txid bytes for a given TxLocation. + pub(crate) async fn get_txid( + &self, + tx_location: TxLocation, + ) -> Result { + self.db(CapabilityRequest::BlockCoreExt)? + .get_txid(tx_location) + .await + } + + /// Fetch block txids by height. + pub(crate) async fn get_block_txids( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockCoreExt)? + .get_block_txids(height) + .await + } + + /// Fetches block txids for the given height range. + pub(crate) async fn get_block_range_txids( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockCoreExt)? + .get_block_range_txids(start, end) + .await + } + + // ***** Block Transparent Ext ***** + + /// Fetch the serialized TransparentCompactTx for the given TxLocation, if present. + pub(crate) async fn get_transparent( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockTransparentExt)? + .get_transparent(tx_location) + .await + } + + /// Fetch block transparent transaction data by height. + pub(crate) async fn get_block_transparent( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockTransparentExt)? + .get_block_transparent(height) + .await + } + + /// Fetches block transparent tx data for the given height range. + pub(crate) async fn get_block_range_transparent( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockTransparentExt)? + .get_block_range_transparent(start, end) + .await + } + + // ***** Block shielded Ext ***** + + /// Fetch the serialized SaplingCompactTx for the given TxLocation, if present. + pub(crate) async fn get_sapling( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_sapling(tx_location) + .await + } + + /// Fetch block sapling transaction data by height. + pub(crate) async fn get_block_sapling( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_sapling(height) + .await + } + + /// Fetches block sapling tx data for the given height range. + pub(crate) async fn get_block_range_sapling( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_range_sapling(start, end) + .await + } + + /// Fetch the serialized OrchardCompactTx for the given TxLocation, if present. + pub(crate) async fn get_orchard( + &self, + tx_location: TxLocation, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_orchard(tx_location) + .await + } + + /// Fetch block orchard transaction data by height. + pub(crate) async fn get_block_orchard( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_orchard(height) + .await + } + + /// Fetches block orchard tx data for the given height range. + pub(crate) async fn get_block_range_orchard( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_range_orchard(start, end) + .await + } + + /// Fetch block commitment tree data by height. + pub(crate) async fn get_block_commitment_tree_data( + &self, + height: Height, + ) -> Result { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_commitment_tree_data(height) + .await + } + + /// Fetches block commitment tree data for the given height range. + pub(crate) async fn get_block_range_commitment_tree_data( + &self, + start: Height, + end: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::BlockShieldedExt)? + .get_block_range_commitment_tree_data(start, end) + .await + } + + // ***** Transparent Hist Ext ***** + + /// Fetch all address history records for a given transparent address. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more valid records exist, + /// - `Ok(None)` if no records exist (not an error), + /// - `Err(...)` if any decoding or DB error occurs. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn addr_records( + &self, + addr_script: AddrScript, + ) -> Result>, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .addr_records(addr_script) + .await + } + + /// Fetch all address history records for a given address and TxLocation. + /// + /// Returns: + /// - `Ok(Some(records))` if one or more matching records are found at that index, + /// - `Ok(None)` if no matching records exist (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn addr_and_index_records( + &self, + addr_script: AddrScript, + tx_location: TxLocation, + ) -> Result>, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .addr_and_index_records(addr_script, tx_location) + .await + } + + /// Fetch all distinct `TxLocation` values for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more matching records are found, + /// - `Ok(None)` if no matches found (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn addr_tx_locations_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .addr_tx_locations_by_range(addr_script, start_height, end_height) + .await + } + + /// Fetch all UTXOs (unspent mined outputs) for `addr_script` within the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Each entry is `(TxLocation, vout, value)`. + /// + /// Returns: + /// - `Ok(Some(vec))` if one or more UTXOs are found, + /// - `Ok(None)` if none found (not an error), + /// - `Err(...)` on decode or DB failure. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn addr_utxos_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result>, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .addr_utxos_by_range(addr_script, start_height, end_height) + .await + } + + /// Computes the transparent balance change for `addr_script` over the + /// height range `[start_height, end_height]` (inclusive). + /// + /// Includes: + /// - `+value` for mined outputs + /// - `−value` for spent inputs + /// + /// Returns the signed net value as `i64`, or error on failure. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn addr_balance_by_range( + &self, + addr_script: AddrScript, + start_height: Height, + end_height: Height, + ) -> Result { + self.db(CapabilityRequest::TransparentHistExt)? + .addr_balance_by_range(addr_script, start_height, end_height) + .await + } + + /// Fetch the `TxLocation` that spent a given outpoint, if any. + /// + /// Returns: + /// - `Ok(Some(TxLocation))` if the outpoint is spent. + /// - `Ok(None)` if no entry exists (not spent or not known). + /// - `Err(...)` on deserialization or DB error. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn get_outpoint_spender( + &self, + outpoint: Outpoint, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .get_outpoint_spender(outpoint) + .await + } + + /// Fetch the `TxLocation` entries for a batch of outpoints. + /// + /// For each input: + /// - Returns `Some(TxLocation)` if spent, + /// - `None` if not found, + /// - or returns `Err` immediately if any DB or decode error occurs. + #[cfg(feature = "transparent_address_history_experimental")] + pub(crate) async fn get_outpoint_spenders( + &self, + outpoints: Vec, + ) -> Result>, FinalisedStateError> { + self.db(CapabilityRequest::TransparentHistExt)? + .get_outpoint_spenders(outpoints) + .await + } + + // ***** IndexedBlock Ext ***** + + /// Returns the IndexedBlock for the given Height. + /// + /// TODO: Add separate range fetch method! + pub(crate) async fn get_chain_block( + &self, + height: Height, + ) -> Result, FinalisedStateError> { + self.db(CapabilityRequest::IndexedBlockExt)? + .get_chain_block(height) + .await + } + + // ***** CompactBlock Ext ***** + + /// Returns the CompactBlock for the given Height. + pub(crate) async fn get_compact_block( + &self, + height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.db(CapabilityRequest::CompactBlockExt)? + .get_compact_block(height, pool_types) + .await + } + + pub(crate) async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.db(CapabilityRequest::CompactBlockExt)? + .get_compact_block_stream(start_height, end_height, pool_types) + .await + } +} diff --git a/zaino-state/src/chain_index/finalised_state/router.rs b/zaino-state/src/chain_index/finalised_state/router.rs new file mode 100644 index 000000000..a9f9c8472 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/router.rs @@ -0,0 +1,418 @@ +//! Capability-based database router (primary + shadow) +//! +//! This file implements [`Router`], which allows `ZainoDB` to selectively route operations to one of +//! two database backends: +//! - a **primary** (active) DB, and +//! - an optional **shadow** DB used during major migrations. +//! +//! The router is designed to support incremental and low-downtime migrations by splitting the DB +//! feature set into capability groups. Each capability group can be served by either backend, +//! controlled by atomic bitmasks. +//! +//! # Why a router exists +//! +//! Major schema upgrades are often most safely implemented as a rebuild into a new DB rather than an +//! in-place rewrite. The router enables that by allowing the system to: +//! - keep serving requests from the old DB while building the new one, +//! - optionally move specific read capabilities to the shadow DB once they are correct there, +//! - then atomically promote the shadow DB to primary at the end. +//! +//! # Concurrency and atomicity model +//! +//! The router uses `ArcSwap` / `ArcSwapOption` for lock-free backend swapping and `AtomicU32` masks +//! for capability routing. +//! +//! - Backend selection (`backend(...)`) is wait-free and based on the current masks. +//! - Promotion (`promote_shadow`) swaps the primary Arc atomically; existing in-flight operations +//! remain valid because they hold an `Arc`. +//! +//! Memory ordering is explicit (`Acquire`/`Release`/`AcqRel`) to ensure mask updates are observed +//! consistently relative to backend pointer updates. +//! +//! # Capability routing semantics +//! +//! `Router::backend(req)` resolves as: +//! 1. If `shadow_mask` contains the requested bit and shadow exists → return shadow. +//! 2. Else if `primary_mask` contains the requested bit → return primary. +//! 3. Else → return `FinalisedStateError::FeatureUnavailable`. +//! +//! # Shadow lifecycle (migration-only API) +//! +//! The following methods are intended to be called **only** by the migration manager: +//! - `set_shadow(...)` +//! - `extend_shadow_caps(...)` +//! - `promote_shadow()` +//! +//! Promotion performs: +//! - shadow → primary swap, +//! - resets shadow and shadow mask, +//! - updates the primary mask from the promoted backend’s declared capabilities, +//! - returns the old primary backend so the migration can shut it down and delete its files safely. +//! +//! # Trait impls +//! +//! `Router` implements the core DB traits (`DbCore`, `DbRead`, `DbWrite`) by routing READ_CORE/WRITE_CORE +//! to whichever backend currently serves those capabilities. +//! +//! # Development notes +//! +//! - If you introduce a new capability bit, ensure it is: +//! - added to `CapabilityRequest`, +//! - implemented by the relevant DB version(s), +//! - and considered in migration routing policy (whether it can move to shadow incrementally). +//! +//! - When implementing incremental migrations (moving caps before final promotion), ensure the shadow +//! backend is kept consistent with the primary for those capabilities (or restrict such caps to +//! read-only queries that can tolerate lag with explicit semantics). + +use super::{ + capability::{Capability, DbCore, DbMetadata, DbRead, DbWrite}, + db::DbBackend, +}; + +use crate::{ + chain_index::finalised_state::capability::CapabilityRequest, error::FinalisedStateError, + BlockHash, Height, IndexedBlock, StatusType, +}; + +use arc_swap::{ArcSwap, ArcSwapOption}; +use async_trait::async_trait; +use std::sync::{ + atomic::{AtomicU32, Ordering}, + Arc, +}; + +#[derive(Debug)] +/// Capability-based database router. +/// +/// `Router` is the internal dispatch layer used by `ZainoDB` to route operations to either: +/// - a **primary** database backend (the active DB), or +/// - an optional **shadow** backend used during major version migrations. +/// +/// Routing is driven by per-backend **capability bitmasks**: +/// - If a requested capability bit is set in the shadow mask and a shadow backend exists, the call +/// is routed to shadow. +/// - Otherwise, if the bit is set in the primary mask, the call is routed to primary. +/// - Otherwise, the feature is reported as unavailable. +/// +/// ## Concurrency model +/// - Backend pointers are stored using `ArcSwap` / `ArcSwapOption` to allow atomic, lock-free swaps. +/// - Capability masks are stored in `AtomicU32` and read using `Acquire` ordering in the hot path. +/// - Promoting shadow to primary is atomic and safe for in-flight calls because callers hold +/// `Arc` clones. +/// +/// ## Intended usage +/// The shadow-related APIs (`set_shadow`, `extend_shadow_caps`, `promote_shadow`) are intended to be +/// used only by the migration manager to support low-downtime rebuild-style migrations. +pub(crate) struct Router { + /// Primary active database backend. + /// + /// This is the default backend used for any capability bit that is not explicitly routed to the + /// shadow backend via [`Router::shadow_mask`]. + /// + /// Stored behind [`ArcSwap`] so it can be replaced atomically during promotion without locking. + primary: ArcSwap, + + /// Shadow database backend (optional). + /// + /// During a major migration, a new-version backend is built and installed here. Individual + /// capability groups can be routed to the shadow by setting bits in [`Router::shadow_mask`]. + /// + /// Outside of migrations this should remain `None`. + shadow: ArcSwapOption, + + /// Capability mask for the primary backend. + /// + /// A bit being set means “this capability may be served by the primary backend”. + /// + /// The mask is initialized from `primary.capability()` and can be restricted/extended during + /// migrations to ensure that requests are only routed to backends that can satisfy them. + primary_mask: AtomicU32, + + /// Capability mask for the shadow backend. + /// + /// A bit being set means “this capability should be served by the shadow backend (if present)”. + /// + /// Routing precedence is: + /// 1. shadow if the bit is set and shadow exists, + /// 2. else primary if the bit is set, + /// 3. else feature unavailable. + shadow_mask: AtomicU32, +} + +/// Database version router. +/// +/// Routes database capabilities to either a primary backend or (during major migrations) an optional +/// shadow backend. +/// +/// ## Routing guarantees +/// - The router only returns a backend if the corresponding capability bit is enabled in the +/// backend’s active mask. +/// - Backend selection is lock-free and safe for concurrent use. +/// - Promotion swaps the primary backend atomically; in-flight operations remain valid because they +/// hold their own `Arc` clones. +impl Router { + // ***** Router creation ***** + + /// Creates a new [`Router`] with `primary` installed as the active backend. + /// + /// The primary capability mask is initialized from `primary.capability()`. The shadow backend is + /// initially unset and must only be configured during major migrations. + /// + /// ## Notes + /// - The router does not validate that `primary.capability()` matches the masks that may later be + /// set by migration code; migration orchestration must keep the masks conservative. + pub(crate) fn new(primary: Arc) -> Self { + let cap = primary.capability(); + Self { + primary: ArcSwap::from(primary), + shadow: ArcSwapOption::empty(), + primary_mask: AtomicU32::new(cap.bits()), + shadow_mask: AtomicU32::new(0), + } + } + + // ***** Capability router ***** + + /// Returns the database backend that should serve `cap`. + /// + /// Routing order: + /// 1. If the shadow mask contains the requested bit *and* a shadow backend exists, return shadow. + /// 2. Else if the primary mask contains the requested bit, return primary. + /// 3. Otherwise return [`FinalisedStateError::FeatureUnavailable`]. + /// + /// ## Correctness contract + /// The masks are the source of truth for routing. If migration code enables a bit on the shadow + /// backend before the corresponding data/index is correct there, callers may observe incorrect + /// results. Therefore, migrations must only route a capability to shadow once it is complete and + /// consistent for that capability’s semantics. + #[inline] + pub(crate) fn backend( + &self, + cap: CapabilityRequest, + ) -> Result, FinalisedStateError> { + let bit = cap.as_capability().bits(); + + if self.shadow_mask.load(Ordering::Acquire) & bit != 0 { + if let Some(shadow_db) = self.shadow.load().as_ref() { + return Ok(Arc::clone(shadow_db)); + } + } + if self.primary_mask.load(Ordering::Acquire) & bit != 0 { + return Ok(self.primary.load_full()); + } + + Err(FinalisedStateError::FeatureUnavailable(cap.name())) + } + + // ***** Shadow database control ***** + // + // These methods should only ever be used by the migration manager. + + /// Installs `shadow` as the current shadow backend and sets its routed capability mask to `caps`. + /// + /// This is the entry point for starting a major migration: + /// - spawn/open the new-version backend, + /// - call `set_shadow(new_backend, initial_caps)`, + /// - optionally expand shadow routing incrementally with [`Router::extend_shadow_caps`]. + /// + /// ## Ordering + /// The shadow backend pointer is stored first, then the shadow mask is published with `Release` + /// ordering. Readers use `Acquire` to observe both consistently. + pub(crate) fn set_shadow(&self, shadow: Arc, caps: Capability) { + self.shadow.store(Some(shadow)); + self.shadow_mask.store(caps.bits(), Ordering::Release); + } + + /// Adds additional capabilities to the shadow routing mask. + /// + /// This enables incremental migrations where certain read capabilities can move to the shadow + /// backend once the corresponding indices are complete there. + /// + /// ## Notes + /// - This only changes routing; it does not validate the shadow backend’s correctness. + /// - Use conservative routing policies: prefer moving read-only capabilities first. + pub(crate) fn extend_shadow_caps(&self, caps: Capability) { + self.shadow_mask.fetch_or(caps.bits(), Ordering::AcqRel); + } + + /// Promotes the current shadow backend to become the new primary backend. + /// + /// Promotion performs the following steps: + /// - Removes the shadow backend (`shadow = None`). + /// - Sets `primary_mask` to the promoted backend’s declared capabilities. + /// - Clears `shadow_mask`. + /// - Atomically swaps the `primary` backend pointer to the promoted backend. + /// + /// Returns the old primary backend so the caller (migration manager) can: + /// - wait for all outstanding `Arc` clones to drop, + /// - shut it down, + /// - and finally remove the old on-disk directory safely. + /// + /// # Errors + /// Returns [`FinalisedStateError::Critical`] if no shadow backend is currently installed. + pub(crate) fn promote_shadow(&self) -> Result, FinalisedStateError> { + let Some(new_primary) = self.shadow.swap(None) else { + return Err(FinalisedStateError::Critical( + "shadow not found!".to_string(), + )); + }; + + self.primary_mask + .store(new_primary.capability().bits(), Ordering::Release); + self.shadow_mask.store(0, Ordering::Release); + + Ok(self.primary.swap(new_primary)) + } + + // ***** Primary database capability control ***** + + /// Disables specific capabilities on the primary backend by clearing bits in `primary_mask`. + /// + /// This is primarily used during migrations to prevent routing particular operations to the old + /// backend once the migration wants them served elsewhere. + /// + /// ## Safety + /// This only affects routing. It does not stop in-flight operations already holding an + /// `Arc` clone. + pub(crate) fn limit_primary_caps(&self, caps: Capability) { + self.primary_mask.fetch_and(!caps.bits(), Ordering::AcqRel); + } + + /// Enables specific capabilities on the primary backend by setting bits in `primary_mask`. + /// + /// This can be used to restore routing to the primary backend after temporarily restricting it. + pub(crate) fn extend_primary_caps(&self, caps: Capability) { + self.primary_mask.fetch_or(caps.bits(), Ordering::AcqRel); + } + + /// Overwrites the entire primary capability mask. + /// + /// This is a sharp tool intended for migration orchestration. Prefer incremental helpers + /// (`limit_primary_caps`, `extend_primary_caps`) unless a full reset is required. + pub(crate) fn set_primary_mask(&self, new_mask: Capability) { + self.primary_mask.store(new_mask.bits(), Ordering::Release); + } +} + +// ***** Core DB functionality ***** + +/// Core database façade implementation for the router. +/// +/// `DbCore` methods are routed via capability selection: +/// - `status()` consults the backend that currently serves `READ_CORE`. +/// - `shutdown()` attempts to shut down both primary and shadow backends (if present). +#[async_trait] +impl DbCore for Router { + /// Returns the runtime status of the database system. + /// + /// This is derived from whichever backend currently serves `READ_CORE`. If `READ_CORE` is not + /// available (misconfiguration or partial migration state), this returns [`StatusType::Busy`] + /// as a conservative fallback. + fn status(&self) -> StatusType { + match self.backend(CapabilityRequest::ReadCore) { + Ok(backend) => backend.status(), + Err(_) => StatusType::Busy, + } + } + + /// Shuts down both the primary and shadow backends (if any). + /// + /// Shutdown is attempted for the primary first, then the shadow. If primary shutdown fails, the + /// error is returned immediately (the shadow shutdown result is not returned in that case). + /// + /// ## Migration note + /// During major migrations, the old primary backend may need to stay alive until all outstanding + /// handles are dropped. That waiting logic lives outside the router (typically in the migration + /// manager). + async fn shutdown(&self) -> Result<(), FinalisedStateError> { + let primary_shutdown_result = self.primary.load_full().shutdown().await; + + let shadow_option = self.shadow.load(); + let shadow_shutdown_result = match shadow_option.as_ref() { + Some(shadow_database) => shadow_database.shutdown().await, + None => Ok(()), + }; + + primary_shutdown_result?; + shadow_shutdown_result + } +} + +/// Core write surface routed through `WRITE_CORE`. +/// +/// All writes are delegated to the backend currently selected for [`CapabilityRequest::WriteCore`]. +/// During migrations this allows writers to remain on the old backend until the new backend is ready +/// (or to be switched deliberately by migration orchestration). +#[async_trait] +impl DbWrite for Router { + /// Writes a block via the backend currently serving `WRITE_CORE`. + async fn write_block(&self, blk: IndexedBlock) -> Result<(), FinalisedStateError> { + self.backend(CapabilityRequest::WriteCore)? + .write_block(blk) + .await + } + + /// Deletes the block at height `h` via the backend currently serving `WRITE_CORE`. + async fn delete_block_at_height(&self, h: Height) -> Result<(), FinalisedStateError> { + self.backend(CapabilityRequest::WriteCore)? + .delete_block_at_height(h) + .await + } + + /// Deletes the provided block via the backend currently serving `WRITE_CORE`. + async fn delete_block(&self, blk: &IndexedBlock) -> Result<(), FinalisedStateError> { + self.backend(CapabilityRequest::WriteCore)? + .delete_block(blk) + .await + } + + /// Updates the persisted metadata singleton via the backend currently serving `WRITE_CORE`. + /// + /// This is used by migrations to record progress and completion status. + async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { + self.backend(CapabilityRequest::WriteCore)? + .update_metadata(metadata) + .await + } +} + +/// Core read surface routed through `READ_CORE`. +/// +/// All reads are delegated to the backend currently selected for [`CapabilityRequest::ReadCore`]. +/// During migrations this allows reads to continue from the old backend unless/until explicitly +/// moved. +#[async_trait] +impl DbRead for Router { + /// Returns the database tip height via the backend currently serving `READ_CORE`. + async fn db_height(&self) -> Result, FinalisedStateError> { + self.backend(CapabilityRequest::ReadCore)?.db_height().await + } + + /// Returns the height for `hash` via the backend currently serving `READ_CORE`. + async fn get_block_height( + &self, + hash: BlockHash, + ) -> Result, FinalisedStateError> { + self.backend(CapabilityRequest::ReadCore)? + .get_block_height(hash) + .await + } + + /// Returns the hash for `h` via the backend currently serving `READ_CORE`. + async fn get_block_hash(&self, h: Height) -> Result, FinalisedStateError> { + self.backend(CapabilityRequest::ReadCore)? + .get_block_hash(h) + .await + } + + /// Returns database metadata via the backend currently serving `READ_CORE`. + /// + /// During migrations, callers should expect `DbMetadata::migration_status` to reflect the state + /// of the active backend selected by routing. + async fn get_metadata(&self) -> Result { + self.backend(CapabilityRequest::ReadCore)? + .get_metadata() + .await + } +} diff --git a/zaino-state/src/chain_index/mempool.rs b/zaino-state/src/chain_index/mempool.rs new file mode 100644 index 000000000..2a8bdc6f5 --- /dev/null +++ b/zaino-state/src/chain_index/mempool.rs @@ -0,0 +1,605 @@ +//! Holds Zaino's mempool implementation. + +use std::{collections::HashSet, sync::Arc}; + +use crate::{ + broadcast::{Broadcast, BroadcastSubscriber}, + chain_index::{ + source::{BlockchainSource, BlockchainSourceError}, + types::db::metadata::MempoolInfo, + }, + error::{MempoolError, StatusError}, + status::{AtomicStatus, StatusType}, + BlockHash, +}; +use tracing::{info, warn}; +use zaino_fetch::jsonrpsee::response::GetMempoolInfoResponse; +use zebra_chain::{block::Hash, transaction::SerializedTransaction}; + +/// Mempool key +/// +/// Holds txid. +/// +/// TODO: Update to hold zebra_chain::Transaction::Hash ( or internal version ) +/// `https://github.com/zingolabs/zaino/issues/661` +#[derive(Debug, Clone, PartialEq, Hash, Eq)] +pub struct MempoolKey { + /// currently txid (as string) - see above TODO, could be stronger type + pub txid: String, +} + +/// Mempool value. +/// +/// Holds zebra_chain::transaction::SerializedTransaction. +#[derive(Debug, Clone, PartialEq)] +pub struct MempoolValue { + /// Stores bytes that are guaranteed to be deserializable into a Transaction (zebra_chain enum). + /// Sorts in lexicographic order of the transaction's serialized data. + pub serialized_tx: Arc, +} + +/// Zcash mempool, uses dashmap for efficient serving of mempool tx. +#[derive(Debug)] +pub struct Mempool { + /// Zcash chain fetch service. + fetcher: T, + /// Wrapper for a dashmap of mempool transactions. + state: Broadcast, + /// The hash of the chain tip for which this mempool is currently serving. + mempool_chain_tip: tokio::sync::watch::Sender, + /// Mempool sync handle. + sync_task_handle: Option>>, + /// mempool status. + status: AtomicStatus, +} + +impl Mempool { + /// Spawns a new [`Mempool`]. + pub async fn spawn( + fetcher: T, + capacity_and_shard_amount: Option<(usize, usize)>, + ) -> Result { + // Wait for mempool in validator to come online. + loop { + match fetcher.get_mempool_txids().await { + Ok(_) => { + break; + } + Err(_) => { + info!(" - Waiting for Validator mempool to come online.."); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + } + } + + let best_block_hash: BlockHash = match fetcher.get_best_block_hash().await { + Ok(block_hash_opt) => match block_hash_opt { + Some(hash) => hash.into(), + None => { + return Err(MempoolError::Critical( + "Error in mempool: Error connecting with validator".to_string(), + )) + } + }, + Err(_e) => { + return Err(MempoolError::Critical( + "Error in mempool: Error connecting with validator".to_string(), + )) + } + }; + + let (chain_tip_sender, _chain_tip_reciever) = tokio::sync::watch::channel(best_block_hash); + + info!("Launching Mempool.."); + let mut mempool = Mempool { + fetcher: fetcher.clone(), + state: match capacity_and_shard_amount { + Some((capacity, shard_amount)) => { + Broadcast::new(Some(capacity), Some(shard_amount)) + } + None => Broadcast::new(None, None), + }, + mempool_chain_tip: chain_tip_sender, + sync_task_handle: None, + status: AtomicStatus::new(StatusType::Spawning), + }; + + loop { + match mempool.get_mempool_transactions().await { + Ok(mempool_transactions) => { + mempool.status.store(StatusType::Ready); + mempool + .state + .insert_filtered_set(mempool_transactions, mempool.status.load()); + break; + } + Err(e) => { + mempool.state.notify(mempool.status.load()); + warn!("{e}"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + }; + } + + mempool.sync_task_handle = Some(std::sync::Mutex::new(mempool.serve().await?)); + + Ok(mempool) + } + + async fn serve(&self) -> Result, MempoolError> { + let mempool = Self { + fetcher: self.fetcher.clone(), + state: self.state.clone(), + mempool_chain_tip: self.mempool_chain_tip.clone(), + sync_task_handle: None, + status: self.status.clone(), + }; + + let state = self.state.clone(); + let status = self.status.clone(); + status.store(StatusType::Spawning); + + let sync_handle = tokio::spawn(async move { + let mut best_block_hash: Hash; + let mut check_block_hash: Hash; + + // Initialise tip. + loop { + match mempool.fetcher.get_best_block_hash().await { + Ok(block_hash_opt) => match block_hash_opt { + Some(hash) => { + mempool.mempool_chain_tip.send_replace(hash.into()); + best_block_hash = hash; + break; + } + None => { + mempool.status.store(StatusType::RecoverableError); + state.notify(status.load()); + warn!("error fetching best_block_hash from validator"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + }, + Err(e) => { + mempool.status.store(StatusType::RecoverableError); + state.notify(status.load()); + warn!("{e}"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + } + } + + // Main loop + loop { + // Check chain tip. + match mempool.fetcher.get_best_block_hash().await { + Ok(block_hash_opt) => match block_hash_opt { + Some(hash) => { + check_block_hash = hash; + } + None => { + mempool.status.store(StatusType::RecoverableError); + state.notify(status.load()); + warn!("error fetching best_block_hash from validator"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + }, + Err(e) => { + state.notify(status.load()); + warn!("{e}"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + } + + // If chain tip has changed reset mempool. + if check_block_hash != best_block_hash { + status.store(StatusType::Syncing); + state.notify(status.load()); + state.clear(); + + mempool + .mempool_chain_tip + .send_replace(check_block_hash.into()); + best_block_hash = check_block_hash; + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + continue; + } + + match mempool.get_mempool_transactions().await { + Ok(mempool_transactions) => { + status.store(StatusType::Ready); + state.insert_filtered_set(mempool_transactions, status.load()); + } + Err(e) => { + status.store(StatusType::RecoverableError); + state.notify(status.load()); + warn!("{e}"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + continue; + } + }; + + if status.load() == StatusType::Closing { + state.notify(status.load()); + return; + } + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + }); + + Ok(sync_handle) + } + + /// Returns all transactions in the mempool. + async fn get_mempool_transactions( + &self, + ) -> Result, MempoolError> { + let mut transactions = Vec::new(); + + let txids = self.fetcher.get_mempool_txids().await?.ok_or_else(|| { + MempoolError::BlockchainSourceError(BlockchainSourceError::Unrecoverable( + "could not fetch mempool data: mempool txid list was None".to_string(), + )) + })?; + + for txid in txids { + let (transaction, _location) = self + .fetcher + .get_transaction(txid.0.into()) + .await? + .ok_or_else(|| { + MempoolError::BlockchainSourceError( + crate::chain_index::source::BlockchainSourceError::Unrecoverable(format!( + "could not fetch mempool data: transaction not found for txid {txid}" + )), + ) + })?; + + transactions.push(( + MempoolKey { + txid: txid.to_string(), + }, + MempoolValue { + serialized_tx: Arc::new(transaction.into()), + }, + )); + } + + Ok(transactions) + } + + /// Returns a [`MempoolSubscriber`]. + pub fn subscriber(&self) -> MempoolSubscriber { + MempoolSubscriber { + subscriber: self.state.subscriber(), + seen_txids: HashSet::new(), + mempool_chain_tip: self.mempool_chain_tip.subscribe(), + status: self.status.clone(), + } + } + + /// Returns the current tx count + pub async fn size(&self) -> Result { + Ok(self + .fetcher + .get_mempool_txids() + .await? + .map_or(0, |v| v.len())) + } + + /// Returns information about the mempool. Used by the `getmempoolinfo` RPC. + /// Computed from local Broadcast state. + pub async fn get_mempool_info(&self) -> Result { + let map = self.state.get_state(); + + let size = map.len() as u64; + + let mut bytes: u64 = 0; + let mut key_heap_bytes: u64 = 0; + + for entry in map.iter() { + // payload bytes are exact (we store SerializedTransaction) + bytes = + bytes.saturating_add(Self::tx_serialized_len_bytes(&entry.value().serialized_tx)); + + // heap used by the key txid (String) + key_heap_bytes = key_heap_bytes.saturating_add(entry.key().txid.capacity() as u64); + } + + let usage = bytes.saturating_add(key_heap_bytes); + + Ok(GetMempoolInfoResponse { size, bytes, usage }) + } + + #[inline] + fn tx_serialized_len_bytes(tx: &SerializedTransaction) -> u64 { + tx.as_ref().len() as u64 + } + + // TODO knock this out if possible + // private fields in remaining references + // + /// Returns the status of the mempool. + pub fn status(&self) -> StatusType { + self.status.load() + } + + /// Sets the mempool to close gracefully. + pub fn close(&self) { + self.status.store(StatusType::Closing); + self.state.notify(self.status.load()); + if let Some(ref handle) = self.sync_task_handle { + if let Ok(handle) = handle.lock() { + handle.abort(); + } + } + } +} + +impl Drop for Mempool { + fn drop(&mut self) { + self.status.store(StatusType::Closing); + self.state.notify(StatusType::Closing); + if let Some(handle) = self.sync_task_handle.take() { + if let Ok(handle) = handle.lock() { + handle.abort(); + } + } + } +} + +/// A subscriber to a [`Mempool`]. +#[derive(Debug, Clone)] +pub struct MempoolSubscriber { + subscriber: BroadcastSubscriber, + seen_txids: HashSet, + mempool_chain_tip: tokio::sync::watch::Receiver, + status: AtomicStatus, +} + +impl MempoolSubscriber { + /// Returns all tx currently in the mempool. + pub async fn get_mempool(&self) -> Vec<(MempoolKey, MempoolValue)> { + self.subscriber.get_filtered_state(&HashSet::new()) + } + + /// Returns all tx currently in the mempool filtered by `exclude_list`. + /// + /// The transaction IDs in the Exclude list can be shortened to any number of bytes to make the request + /// more bandwidth-efficient; if two or more transactions in the mempool + /// match a shortened txid, they are all sent (none is excluded). Transactions + /// in the exclude list that don't exist in the mempool are ignored. + pub async fn get_filtered_mempool( + &self, + exclude_list: Vec, + ) -> Vec<(MempoolKey, MempoolValue)> { + let mempool_tx = self.subscriber.get_filtered_state(&HashSet::new()); + + let mempool_txids: HashSet = mempool_tx + .iter() + .map(|(mempool_key, _)| mempool_key.txid.clone()) + .collect(); + + let mut txids_to_exclude: HashSet = HashSet::new(); + for exclude_txid in &exclude_list { + let matching_txids: Vec<&String> = mempool_txids + .iter() + .filter(|txid| txid.starts_with(exclude_txid)) + .collect(); + + if matching_txids.len() == 1 { + txids_to_exclude.insert(MempoolKey { + txid: matching_txids[0].clone(), + }); + } + } + + mempool_tx + .into_iter() + .filter(|(mempool_key, _)| !txids_to_exclude.contains(mempool_key)) + .collect() + } + + /// Returns a stream of mempool txids, closes the channel when a new block has been mined. + pub async fn get_mempool_stream( + &mut self, + expected_chain_tip: Option, + ) -> Result< + ( + tokio::sync::mpsc::Receiver>, + tokio::task::JoinHandle<()>, + ), + MempoolError, + > { + let mut subscriber = self.clone(); + subscriber.seen_txids.clear(); + let (channel_tx, channel_rx) = tokio::sync::mpsc::channel(32); + + if let Some(expected_chain_tip_hash) = expected_chain_tip { + if expected_chain_tip_hash != *self.mempool_chain_tip.borrow() { + return Err(MempoolError::IncorrectChainTip { + expected_chain_tip: expected_chain_tip_hash, + current_chain_tip: *self.mempool_chain_tip.borrow(), + }); + } + } + + let streamer_handle = tokio::spawn(async move { + let mempool_result: Result<(), MempoolError> = async { + loop { + let (mempool_status, mempool_updates) = subscriber + .wait_on_mempool_updates(expected_chain_tip) + .await?; + match mempool_status { + StatusType::Ready => { + for (mempool_key, mempool_value) in mempool_updates { + loop { + match channel_tx + .try_send(Ok((mempool_key.clone(), mempool_value.clone()))) + { + Ok(_) => break, + Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { + tokio::time::sleep(std::time::Duration::from_millis( + 100, + )) + .await; + continue; + } + Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { + return Ok(()); + } + } + } + } + } + StatusType::Syncing => { + return Ok(()); + } + StatusType::Closing => { + return Err(MempoolError::StatusError(StatusError { + server_status: StatusType::Closing, + })); + } + StatusType::RecoverableError => { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + continue; + } + status => { + return Err(MempoolError::StatusError(StatusError { + server_status: status, + })); + } + } + if subscriber.status.load() == StatusType::Closing { + return Err(MempoolError::StatusError(StatusError { + server_status: StatusType::Closing, + })); + } + } + } + .await; + + if let Err(mempool_error) = mempool_result { + warn!("Error in mempool stream: {:?}", mempool_error); + match mempool_error { + MempoolError::StatusError(error_status) => { + let _ = channel_tx.send(Err(error_status)).await; + } + _ => { + let _ = channel_tx + .send(Err(StatusError { + server_status: StatusType::RecoverableError, + })) + .await; + } + } + } + }); + + Ok((channel_rx, streamer_handle)) + } + + /// Returns true if mempool contains the given txid. + pub async fn contains_txid(&self, txid: &MempoolKey) -> bool { + self.subscriber.contains_key(txid) + } + + /// Returns transaction by txid if in the mempool, else returns none. + pub async fn get_transaction(&self, txid: &MempoolKey) -> Option> { + self.subscriber.get(txid) + } + + /// Returns information about the mempool. Used by the `getmempoolinfo` RPC. + /// Computed from local Broadcast state. + pub async fn get_mempool_info(&self) -> MempoolInfo { + let mempool_transactions: Vec<(MempoolKey, MempoolValue)> = + self.subscriber.get_filtered_state(&HashSet::new()); + + let size: u64 = mempool_transactions.len() as u64; + + let mut bytes: u64 = 0; + let mut key_heap_bytes: u64 = 0; + + for (mempool_key, mempool_value) in mempool_transactions.iter() { + // payload bytes are exact (we store SerializedTransaction) + bytes = + bytes.saturating_add(mempool_value.serialized_tx.as_ref().as_ref().len() as u64); + + // heap used by the key String (txid) + key_heap_bytes = key_heap_bytes.saturating_add(mempool_key.txid.capacity() as u64); + } + + let usage: u64 = bytes.saturating_add(key_heap_bytes); + + MempoolInfo { size, bytes, usage } + } + + // TODO noted here too + /// Returns the status of the mempool. + pub fn status(&self) -> StatusType { + self.status.load() + } + + /// Returns all tx currently in the mempool and updates seen_txids. + fn get_mempool_and_update_seen(&mut self) -> Vec<(MempoolKey, MempoolValue)> { + let mempool_updates = self.subscriber.get_filtered_state(&HashSet::new()); + for (mempool_key, _) in mempool_updates.clone() { + self.seen_txids.insert(mempool_key); + } + mempool_updates + } + + /// Returns txids not yet seen by the subscriber and updates seen_txids. + fn get_mempool_updates_and_update_seen(&mut self) -> Vec<(MempoolKey, MempoolValue)> { + let mempool_updates = self.subscriber.get_filtered_state(&self.seen_txids); + for (mempool_key, _) in mempool_updates.clone() { + self.seen_txids.insert(mempool_key); + } + mempool_updates + } + + /// Waits on update from mempool and updates the mempool, returning either the new mempool or the mempool updates, along with the mempool status. + async fn wait_on_mempool_updates( + &mut self, + expected_chain_tip: Option, + ) -> Result<(StatusType, Vec<(MempoolKey, MempoolValue)>), MempoolError> { + if expected_chain_tip.is_some() + && expected_chain_tip.unwrap() != *self.mempool_chain_tip.borrow() + { + self.clear_seen(); + return Ok((StatusType::Syncing, self.get_mempool_and_update_seen())); + } + + let update_status = self.subscriber.wait_on_notifier().await?; + match update_status { + StatusType::Ready => Ok(( + StatusType::Ready, + self.get_mempool_updates_and_update_seen(), + )), + StatusType::Syncing => { + self.clear_seen(); + Ok((StatusType::Syncing, self.get_mempool_and_update_seen())) + } + StatusType::Closing => Ok((StatusType::Closing, Vec::new())), + status => Err(MempoolError::StatusError(StatusError { + server_status: status, + })), + } + } + + /// Clears the subscribers seen_txids. + fn clear_seen(&mut self) { + self.seen_txids.clear(); + } + + /// Get the chain tip that the mempool is atop + pub fn mempool_chain_tip(&self) -> BlockHash { + *self.mempool_chain_tip.borrow() + } +} diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs new file mode 100644 index 000000000..884423a8b --- /dev/null +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -0,0 +1,761 @@ +use super::{finalised_state::ZainoDB, source::BlockchainSource}; +use crate::{ + chain_index::types::{self, BlockHash, BlockMetadata, BlockWithMetadata, Height, TreeRootData}, + error::FinalisedStateError, + ChainWork, IndexedBlock, +}; +use arc_swap::ArcSwap; +use futures::lock::Mutex; +use primitive_types::U256; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::mpsc; +use tracing::{info, warn}; +use zebra_chain::{parameters::Network, serialization::BytesInDisplayOrder}; +use zebra_state::HashOrHeight; + +/// Holds the block cache +#[derive(Debug)] +pub struct NonFinalizedState { + /// We need access to the validator's best block hash, as well + /// as a source of blocks + pub(super) source: Source, + /// This lock should not be exposed to consumers. Rather, + /// clone the Arc and offer that. This means we can overwrite the arc + /// without interfering with readers, who will hold a stale copy + current: ArcSwap, + /// Used mostly to determine activation heights + pub(crate) network: Network, + /// Listener used to detect non-best-chain blocks, if available + #[allow(clippy::type_complexity)] + nfs_change_listener: Option< + Mutex< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + >, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +/// created for NonfinalizedBlockCacheSnapshot best_tip field for naming fields +pub struct BestTip { + /// from chain_index types + pub height: Height, + /// from chain_index types + pub blockhash: BlockHash, +} + +#[derive(Debug, Clone)] +/// A snapshot of the nonfinalized state as it existed when this was created. +pub struct NonfinalizedBlockCacheSnapshot { + /// the set of all known blocks < 100 blocks old + /// this includes all blocks on-chain, as well as + /// all blocks known to have been on-chain before being + /// removed by a reorg. Blocks reorged away have no height. + pub blocks: HashMap, + /// hashes indexed by height + /// Hashes in this map are part of the best chain. + pub heights_to_hashes: HashMap, + // Do we need height here? + /// The highest known block + // best_tip is a BestTip, which contains + // a Height, and a BlockHash as named fields. + pub best_tip: BestTip, + + /// if the validator has finalized above the tip + /// of the snapshot, we can use it for some queries + /// and pass through to the validator + pub validator_finalized_height: Height, +} + +#[derive(Debug)] +/// Could not connect to a validator +pub enum NodeConnectionError { + /// The Uri provided was invalid + BadUri(String), + /// Could not connect to the zebrad. + /// This is a network issue. + ConnectionFailure(reqwest::Error), + /// The Zebrad provided invalid or corrupt data. Something has gone wrong + /// and we need to shut down. + UnrecoverableError(Box), +} + +#[derive(Debug)] +struct MissingBlockError(String); + +impl std::fmt::Display for MissingBlockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "missing block: {}", self.0) + } +} + +impl std::error::Error for MissingBlockError {} + +#[derive(Debug, thiserror::Error)] +/// An error occurred during sync of the NonFinalized State. +pub enum SyncError { + /// The backing validator node returned corrupt, invalid, or incomplete data + /// TODO: This may not be correctly disambibuated from temporary network issues + /// in the fetchservice case. + #[error("failed to connect to validator: {0:?}")] + ValidatorConnectionError(NodeConnectionError), + /// The channel used to store new blocks has been closed. This should only happen + /// during shutdown. + #[error("staging channel closed. Shutdown in progress")] + StagingChannelClosed, + /// Sync has been called multiple times in parallel, or another process has + /// written to the block snapshot. + #[error("multiple sync processes running")] + CompetingSyncProcess, + /// Sync attempted a reorg, and something went wrong. + #[error("reorg failed: {0}")] + ReorgFailure(String), + /// UnrecoverableFinalizedStateError + #[error("error reading nonfinalized state")] + CannotReadFinalizedState(#[from] FinalisedStateError), +} + +impl From for SyncError { + fn from(value: UpdateError) -> Self { + match value { + UpdateError::ReceiverDisconnected => SyncError::StagingChannelClosed, + UpdateError::StaleSnapshot => SyncError::CompetingSyncProcess, + UpdateError::FinalizedStateCorruption => SyncError::CannotReadFinalizedState( + FinalisedStateError::Custom("mystery update failure".to_string()), + ), + UpdateError::DatabaseHole => { + SyncError::ReorgFailure(String::from("could not determine best chain")) + } + UpdateError::ValidatorConnectionError(e) => SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError(e.to_string()))), + ), + } + } +} + +#[derive(thiserror::Error, Debug)] +#[error("Genesis block missing in validator")] +struct MissingGenesisBlock; + +#[derive(thiserror::Error, Debug)] +#[error("data from validator invalid: {0}")] +struct InvalidData(String); + +#[derive(Debug, thiserror::Error)] +/// An error occured during initial creation of the NonFinalizedState +pub enum InitError { + #[error("zebra returned invalid data: {0}")] + /// the connected node returned garbage data + InvalidNodeData(Box), + #[error(transparent)] + /// The mempool state failed to initialize + MempoolInitialzationError(#[from] crate::error::MempoolError), + #[error(transparent)] + /// The finalized state failed to initialize + FinalisedStateInitialzationError(#[from] FinalisedStateError), + /// the initial block provided was not on the best chain + #[error("initial block not on best chain")] + InitalBlockMissingHeight, +} + +/// This is the core of the concurrent block cache. +impl BestTip { + /// Create a BestTip from an IndexedBlock + fn from_block(block: &IndexedBlock) -> Self { + let height = block.height(); + let blockhash = *block.hash(); + Self { height, blockhash } + } +} + +impl NonfinalizedBlockCacheSnapshot { + /// Create initial snapshot from a single block + fn from_initial_block(block: IndexedBlock, validator_finalized_height: Height) -> Self { + let best_tip = BestTip::from_block(&block); + let hash = *block.hash(); + let height = best_tip.height; + + let mut blocks = HashMap::new(); + let mut heights_to_hashes = HashMap::new(); + + blocks.insert(hash, block); + heights_to_hashes.insert(height, hash); + + Self { + blocks, + heights_to_hashes, + best_tip, + validator_finalized_height, + } + } + + fn add_block_new_chaintip(&mut self, block: IndexedBlock) { + self.best_tip = BestTip { + height: block.height(), + blockhash: *block.hash(), + }; + self.add_block(block) + } + + fn get_block_by_hash_bytes_in_serialized_order(&self, hash: [u8; 32]) -> Option<&IndexedBlock> { + self.blocks + .values() + .find(|block| block.hash_bytes_serialized_order() == hash) + } + + fn remove_finalized_blocks(&mut self, finalized_height: Height) { + // Keep the last finalized block. This means we don't have to check + // the finalized state when the entire non-finalized state is reorged away. + self.blocks + .retain(|_hash, block| block.height() >= finalized_height); + self.heights_to_hashes + .retain(|height, _hash| height >= &finalized_height); + } + + fn add_block(&mut self, block: IndexedBlock) { + self.heights_to_hashes.insert(block.height(), *block.hash()); + self.blocks.insert(*block.hash(), block); + } +} + +impl NonFinalizedState { + /// Create a nonfinalized state, in a coherent initial state + /// + /// TODO: Currently, we can't initate without an snapshot, we need to create a cache + /// of at least one block. Should this be tied to the instantiation of the data structure + /// itself? + pub async fn initialize( + source: Source, + network: Network, + start_block: Option, + ) -> Result { + info!("Initialising non-finalised state."); + + let validator_tip = source + .get_best_block_height() + .await + .map_err(|e| InitError::InvalidNodeData(Box::new(e)))? + .ok_or_else(|| { + InitError::InvalidNodeData(Box::new(MissingBlockError( + "Validator has no best block".to_string(), + ))) + })?; + + // Resolve the initial block (provided or genesis) + let initial_block = Self::resolve_initial_block(&source, &network, start_block).await?; + + // Create initial snapshot from the block + let snapshot = NonfinalizedBlockCacheSnapshot::from_initial_block( + initial_block, + Height(validator_tip.0.saturating_sub(100)), + ); + + // Set up optional listener + let nfs_change_listener = Self::setup_listener(&source).await; + + Ok(Self { + source, + current: ArcSwap::new(Arc::new(snapshot)), + network, + nfs_change_listener, + }) + } + + /// Fetch the genesis block and convert it to IndexedBlock + async fn get_genesis_indexed_block( + source: &Source, + network: &Network, + ) -> Result { + let genesis_block = source + .get_block(HashOrHeight::Height(zebra_chain::block::Height(0))) + .await + .map_err(|e| InitError::InvalidNodeData(Box::new(e)))? + .ok_or_else(|| InitError::InvalidNodeData(Box::new(MissingGenesisBlock)))?; + + let (sapling_root_and_len, orchard_root_and_len) = source + .get_commitment_tree_roots(genesis_block.hash().into()) + .await + .map_err(|e| InitError::InvalidNodeData(Box::new(e)))?; + + let tree_roots = TreeRootData { + sapling: sapling_root_and_len, + orchard: orchard_root_and_len, + }; + + // For genesis block, chainwork is just the block's own work (no previous blocks) + let genesis_work = ChainWork::from(U256::from( + genesis_block + .header + .difficulty_threshold + .to_work() + .ok_or_else(|| { + InitError::InvalidNodeData(Box::new(InvalidData( + "Invalid work field of genesis block".to_string(), + ))) + })? + .as_u128(), + )); + + Self::create_indexed_block_with_optional_roots( + genesis_block.as_ref(), + &tree_roots, + genesis_work, + network.clone(), + ) + .map_err(|e| InitError::InvalidNodeData(Box::new(InvalidData(e)))) + } + + /// Resolve the initial block - either use provided block or fetch genesis + async fn resolve_initial_block( + source: &Source, + network: &Network, + start_block: Option, + ) -> Result { + match start_block { + Some(block) => Ok(block), + None => Self::get_genesis_indexed_block(source, network).await, + } + } + + /// Set up the optional non-finalized change listener + async fn setup_listener( + source: &Source, + ) -> Option< + Mutex< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + > { + source + .nonfinalized_listener() + .await + .ok() + .flatten() + .map(Mutex::new) + } + + /// sync to the top of the chain, trimming to the finalised tip. + pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { + let mut initial_state = self.get_snapshot(); + let local_finalized_tip = finalized_db.to_reader().db_height().await?; + if Some(initial_state.best_tip.height) < local_finalized_tip { + self.current.swap(Arc::new( + NonfinalizedBlockCacheSnapshot::from_initial_block( + finalized_db + .to_reader() + .get_chain_block( + local_finalized_tip.expect("known to be some due to above if"), + ) + .await? + .ok_or(FinalisedStateError::DataUnavailable(format!( + "Missing block {}", + local_finalized_tip.unwrap().0 + )))?, + local_finalized_tip.unwrap(), + ), + )); + initial_state = self.get_snapshot() + } + let mut working_snapshot = initial_state.as_ref().clone(); + + // currently this only gets main-chain blocks + // once readstateservice supports serving sidechain data, this + // must be rewritten to match + // + // see https://github.com/ZcashFoundation/zebra/issues/9541 + + while let Some(block) = self + .source + .get_block(HashOrHeight::Height(zebra_chain::block::Height( + u32::from(working_snapshot.best_tip.height) + 1, + ))) + .await + .map_err(|e| { + // TODO: Check error. Determine what kind of error to return, this may be recoverable + SyncError::ValidatorConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(e), + )) + })? + { + let parent_hash = BlockHash::from(block.header.previous_block_hash); + if parent_hash == working_snapshot.best_tip.blockhash { + // Normal chain progression + let prev_block = working_snapshot + .blocks + .get(&working_snapshot.best_tip.blockhash) + .ok_or_else(|| { + SyncError::ReorgFailure(format!( + "found blocks {:?}, expected block {:?}", + working_snapshot + .blocks + .values() + .map(|block| (block.index().hash(), block.index().height())) + .collect::>(), + working_snapshot.best_tip + )) + })?; + let chainblock = self.block_to_chainblock(prev_block, &block).await?; + info!( + "syncing block {} at height {}", + &chainblock.index().hash(), + working_snapshot.best_tip.height + 1 + ); + working_snapshot.add_block_new_chaintip(chainblock); + } else { + self.handle_reorg(&mut working_snapshot, block.as_ref()) + .await?; + // There's been a reorg. The fresh block is the new chaintip + // we need to work backwards from it and update heights_to_hashes + // with it and all its parents. + } + if initial_state.best_tip.height + 100 < working_snapshot.best_tip.height { + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; + initial_state = self.current.load_full(); + working_snapshot = initial_state.as_ref().clone(); + } + } + // Handle non-finalized change listener + self.handle_nfs_change_listener(&mut working_snapshot) + .await?; + + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; + + Ok(()) + } + + /// Handle a blockchain reorg by finding the common ancestor + async fn handle_reorg( + &self, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(prev_block) => { + if !working_snapshot + .heights_to_hashes + .values() + .any(|hash| hash == prev_block.hash()) + { + Box::pin(self.handle_reorg(working_snapshot, &prev_block)).await? + } else { + prev_block + } + } + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(e)), + ) + })? + .ok_or(SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block in best chain".to_string(), + ))), + ))?; + Box::pin(self.handle_reorg(working_snapshot, &*prev_block)).await? + } + }; + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block_new_chaintip(indexed_block.clone()); + Ok(indexed_block) + } + + /// Handle non-finalized change listener events + async fn handle_nfs_change_listener( + &self, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + ) -> Result<(), SyncError> { + let Some(ref listener) = self.nfs_change_listener else { + return Ok(()); + }; + + let Some(mut listener) = listener.try_lock() else { + warn!("Error fetching non-finalized change listener"); + return Err(SyncError::CompetingSyncProcess); + }; + + loop { + match listener.try_recv() { + Ok((hash, block)) => { + if !self + .current + .load() + .blocks + .contains_key(&types::BlockHash(hash.0)) + { + self.add_nonbest_block(working_snapshot, &*block).await?; + } + } + Err(mpsc::error::TryRecvError::Empty) => break, + Err(e @ mpsc::error::TryRecvError::Disconnected) => { + return Err(SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(e)), + )) + } + } + } + Ok(()) + } + + /// Add all blocks from the staging area, and save a new cache snapshot, trimming block below the finalised tip. + pub(super) async fn update( + &self, + finalized_db: Arc, + initial_state: Arc, + mut new_snapshot: NonfinalizedBlockCacheSnapshot, + ) -> Result<(), UpdateError> { + let finalized_height = finalized_db + .to_reader() + .db_height() + .await + .map_err(|_e| UpdateError::FinalizedStateCorruption)? + .unwrap_or(Height(0)); + + new_snapshot.remove_finalized_blocks(finalized_height); + let best_block = &new_snapshot + .blocks + .values() + .max_by_key(|block| block.chainwork()) + .cloned() + .expect("empty snapshot impossible"); + self.handle_reorg(&mut new_snapshot, best_block) + .await + .map_err(|_e| UpdateError::DatabaseHole)?; + + let validator_tip = self + .source + .get_best_block_height() + .await + .map_err(|e| UpdateError::ValidatorConnectionError(Box::new(e)))? + .ok_or(UpdateError::ValidatorConnectionError(Box::new( + MissingBlockError("no best block height".to_string()), + )))?; + new_snapshot.validator_finalized_height = Height(validator_tip.0.saturating_sub(100)); + + // Need to get best hash at some point in this process + let stored = self + .current + .compare_and_swap(&initial_state, Arc::new(new_snapshot)); + + if Arc::ptr_eq(&stored, &initial_state) { + let stale_best_tip = initial_state.best_tip; + let new_best_tip = stored.best_tip; + + // Log chain tip change + if new_best_tip != stale_best_tip { + if new_best_tip.height > stale_best_tip.height { + info!( + "non-finalized tip advanced: Height: {} -> {}, Hash: {} -> {}", + stale_best_tip.height, + new_best_tip.height, + stale_best_tip.blockhash, + new_best_tip.blockhash, + ); + } else if new_best_tip.height == stale_best_tip.height + && new_best_tip.blockhash != stale_best_tip.blockhash + { + info!( + "non-finalized tip reorg at height {}: Hash: {} -> {}", + new_best_tip.height, stale_best_tip.blockhash, new_best_tip.blockhash, + ); + } else if new_best_tip.height < stale_best_tip.height { + info!( + "non-finalized tip rollback from height {} to {}, Hash: {} -> {}", + stale_best_tip.height, + new_best_tip.height, + stale_best_tip.blockhash, + new_best_tip.blockhash, + ); + } + } + Ok(()) + } else { + Err(UpdateError::StaleSnapshot) + } + } + + /// Get a snapshot of the block cache + pub(super) fn get_snapshot(&self) -> Arc { + self.current.load_full() + } + + async fn block_to_chainblock( + &self, + prev_block: &IndexedBlock, + block: &zebra_chain::block::Block, + ) -> Result { + let tree_roots = self + .get_tree_roots_from_source(block.hash().into()) + .await + .map_err(|e| { + SyncError::ValidatorConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(InvalidData(format!("{}", e))), + )) + })?; + + Self::create_indexed_block_with_optional_roots( + block, + &tree_roots, + *prev_block.chainwork(), + self.network.clone(), + ) + .map_err(|e| { + SyncError::ValidatorConnectionError(NodeConnectionError::UnrecoverableError(Box::new( + InvalidData(e), + ))) + }) + } + + /// Get commitment tree roots from the blockchain source + async fn get_tree_roots_from_source( + &self, + block_hash: BlockHash, + ) -> Result { + let (sapling_root_and_len, orchard_root_and_len) = + self.source.get_commitment_tree_roots(block_hash).await?; + + Ok(TreeRootData { + sapling: sapling_root_and_len, + orchard: orchard_root_and_len, + }) + } + + /// Create IndexedBlock with optional tree roots (for genesis/sync cases) + /// + /// TODO: Issue #604 - This uses `unwrap_or_default()` uniformly for both Sapling and Orchard, + /// but they have different activation heights. This masks potential bugs and prevents proper + /// validation based on network upgrade activation. + fn create_indexed_block_with_optional_roots( + block: &zebra_chain::block::Block, + tree_roots: &TreeRootData, + parent_chainwork: ChainWork, + network: Network, + ) -> Result { + let (sapling_root, sapling_size, orchard_root, orchard_size) = + tree_roots.clone().extract_with_defaults(); + + let metadata = BlockMetadata::new( + sapling_root, + sapling_size as u32, + orchard_root, + orchard_size as u32, + parent_chainwork, + network, + ); + + let block_with_metadata = BlockWithMetadata::new(block, metadata); + IndexedBlock::try_from(block_with_metadata) + } + + async fn add_nonbest_block( + &self, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(block) => block, + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(e)), + ) + })? + .ok_or(SyncError::ValidatorConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block".to_string(), + ))), + ))?; + Box::pin(self.add_nonbest_block(working_snapshot, &*prev_block)).await? + } + }; + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block(indexed_block.clone()); + Ok(indexed_block) + } +} + +/// Errors that occur during a snapshot update +pub enum UpdateError { + /// The block reciever disconnected. This should only happen during shutdown. + ReceiverDisconnected, + /// The snapshot was already updated by a different process, between when this update started + /// and when it completed. + StaleSnapshot, + + /// Something has gone unrecoverably wrong in the finalized + /// state. A full rebuild is likely needed + FinalizedStateCorruption, + + /// A block in the snapshot is missing + DatabaseHole, + + /// Failed to connect to the backing validator + ValidatorConnectionError(Box), +} + +trait Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32]; + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32]; + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result; +} + +impl Block for IndexedBlock { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().0 + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.index.parent_hash.0 + } + + async fn to_indexed_block( + &self, + _prev_block: &IndexedBlock, + _nfs: &NonFinalizedState, + ) -> Result { + Ok(self.clone()) + } +} +impl Block for zebra_chain::block::Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().bytes_in_serialized_order() + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.header.previous_block_hash.bytes_in_serialized_order() + } + + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result { + nfs.block_to_chainblock(prev_block, self).await + } +} diff --git a/zaino-state/src/chain_index/source.rs b/zaino-state/src/chain_index/source.rs new file mode 100644 index 000000000..3c408fe8a --- /dev/null +++ b/zaino-state/src/chain_index/source.rs @@ -0,0 +1,1010 @@ +//! Traits and types for the blockchain source thats serves zaino, commonly a validator connection. + +use std::{error::Error, str::FromStr as _, sync::Arc}; + +use crate::chain_index::types::{BlockHash, TransactionHash}; +use async_trait::async_trait; +use futures::{future::join, TryFutureExt as _}; +use tower::{Service, ServiceExt as _}; +use zaino_common::Network; +use zaino_fetch::jsonrpsee::{ + connector::{JsonRpSeeConnector, RpcRequestError}, + response::{GetBlockError, GetBlockResponse, GetTransactionResponse, GetTreestateResponse}, +}; +use zcash_primitives::merkle_tree::read_commitment_tree; +use zebra_chain::{block::TryIntoHeight, serialization::ZcashDeserialize}; +use zebra_state::{HashOrHeight, ReadRequest, ReadResponse, ReadStateService}; + +macro_rules! expected_read_response { + ($response:ident, $expected_variant:ident) => { + match $response { + ReadResponse::$expected_variant(inner) => inner, + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + } + }; +} + +/// A trait for accessing blockchain data from different backends. +#[async_trait] +pub trait BlockchainSource: Clone + Send + Sync + 'static { + /// Returns a best-chain block by hash or height + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>>; + + /// Returns the block commitment tree data by hash + async fn get_commitment_tree_roots( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )>; + + /// Returns the sapling and orchard treestate by hash + async fn get_treestate( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)>; + + /// Returns the complete list of txids currently in the mempool. + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>>; + + /// Returns the transaction by txid + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult< + Option<( + Arc, + GetTransactionLocation, + )>, + >; + + /// Returns the hash of the block at the tip of the best chain. + async fn get_best_block_hash(&self) + -> BlockchainSourceResult>; + + /// Returns the height of the block at the tip of the best chain. + async fn get_best_block_height( + &self, + ) -> BlockchainSourceResult>; + + /// Get a listener for new nonfinalized blocks, + /// if supported + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + Box, + >; +} + +/// An error originating from a blockchain source. +#[derive(Debug, thiserror::Error)] +pub enum BlockchainSourceError { + /// Unrecoverable error. + // TODO: Add logic for handling recoverable errors if any are identified + // one candidate may be ephemerable network hiccoughs + #[error("critical error in backing block source: {0}")] + Unrecoverable(String), +} + +/// Error type returned when invalid data is returned by the validator. +#[derive(thiserror::Error, Debug)] +#[error("data from validator invalid: {0}")] +pub struct InvalidData(String); + +pub(crate) type BlockchainSourceResult = Result; + +/// ReadStateService based validator connector. +/// +/// Currently the Mempool cannot utilise the mempool change endpoint in the ReadStateService, +/// for this reason the lagacy jsonrpc inteface is used until the Mempool updates required can be implemented. +/// +/// Due to the difference if the mempool inteface provided by the ReadStateService and the Json RPC service +/// two seperate Mempool implementation will likely be required. +#[derive(Clone, Debug)] +pub struct State { + /// Used to fetch chain data. + pub read_state_service: ReadStateService, + /// Temporarily used to fetch mempool data. + pub mempool_fetcher: JsonRpSeeConnector, + /// Current network type being run. + pub network: Network, +} + +/// A connection to a validator. +#[derive(Clone, Debug)] +// TODO: Explore whether State should be Boxed. +#[allow(clippy::large_enum_variant)] +pub enum ValidatorConnector { + /// The connection is via direct read access to a zebrad's data file + /// + /// NOTE: See docs for State struct. + State(State), + /// We are connected to a zebrad, zcashd, or other zainod via JsonRpc ("JsonRpSee") + Fetch(JsonRpSeeConnector), +} + +#[async_trait] +impl BlockchainSource for ValidatorConnector { + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>> { + match self { + ValidatorConnector::State(state) => match state + .read_state_service + .clone() + .call(zebra_state::ReadRequest::Block(id)) + .await + { + Ok(zebra_state::ReadResponse::Block(Some(block))) => Ok(Some(block)), + // Zebra's ReadStateService does not currently serve non-best chain blocks + // so we must fetch using the JsonRpcConnector. + Ok(zebra_state::ReadResponse::Block(None)) => { + match state.mempool_fetcher.get_block(id.to_string(), Some(0)).await + { + Ok(GetBlockResponse::Raw(raw_block)) => Ok(Some(Arc::new( + zebra_chain::block::Block::zcash_deserialize(raw_block.as_ref()) + .map_err(|e| BlockchainSourceError::Unrecoverable(e.to_string()))?, + ))), + Ok(_) => unreachable!(), + Err(e) => match e { + RpcRequestError::Method(GetBlockError::MissingBlock(_)) => Ok(None), + // TODO/FIX: zcashd returns this transport error when a block is requested higher than current chain. is this correct? + RpcRequestError::Transport(zaino_fetch::jsonrpsee::error::TransportError::ErrorStatusCode(500)) => Ok(None), + RpcRequestError::ServerWorkQueueFull => Err(BlockchainSourceError::Unrecoverable("Work queue full. not yet implemented: handling of ephemeral network errors.".to_string())), + _ => Err(BlockchainSourceError::Unrecoverable(e.to_string())), + }, + } + } + Ok(otherwise) => panic!( + "Read Request of Block returned Read Response of {otherwise:#?} \n\ + This should be deterministically unreachable" + ), + Err(e) => Err(BlockchainSourceError::Unrecoverable(e.to_string())), + }, + ValidatorConnector::Fetch(fetch) => { + match fetch + .get_block(id.to_string(), Some(0)) + .await + { + Ok(GetBlockResponse::Raw(raw_block)) => Ok(Some(Arc::new( + zebra_chain::block::Block::zcash_deserialize(raw_block.as_ref()) + .map_err(|e| BlockchainSourceError::Unrecoverable(e.to_string()))?, + ))), + Ok(_) => unreachable!(), + Err(e) => match e { + RpcRequestError::Method(GetBlockError::MissingBlock(_)) => Ok(None), + // TODO/FIX: zcashd returns this transport error when a block is requested higher than current chain. is this correct? + RpcRequestError::Transport(zaino_fetch::jsonrpsee::error::TransportError::ErrorStatusCode(500)) => Ok(None), + RpcRequestError::ServerWorkQueueFull => Err(BlockchainSourceError::Unrecoverable("Work queue full. not yet implemented: handling of ephemeral network errors.".to_string())), + _ => Err(BlockchainSourceError::Unrecoverable(e.to_string())), + }, + } + } + } + } + + async fn get_commitment_tree_roots( + &self, + // Sould this be HashOrHeight? + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )> { + match self { + ValidatorConnector::State(state) => { + let (sapling_tree_response, orchard_tree_response) = + join( + state.read_state_service.clone().call( + zebra_state::ReadRequest::SaplingTree(HashOrHeight::Hash(id.into())), + ), + state.read_state_service.clone().call( + zebra_state::ReadRequest::OrchardTree(HashOrHeight::Hash(id.into())), + ), + ) + .await; + let (sapling_tree, orchard_tree) = match ( + //TODO: Better readstateservice error handling + sapling_tree_response + .map_err(|e| BlockchainSourceError::Unrecoverable(e.to_string()))?, + orchard_tree_response + .map_err(|e| BlockchainSourceError::Unrecoverable(e.to_string()))?, + ) { + (ReadResponse::SaplingTree(saptree), ReadResponse::OrchardTree(orctree)) => { + (saptree, orctree) + } + (_, _) => panic!("Bad response"), + }; + + Ok(( + sapling_tree + .as_deref() + .map(|tree| (tree.root(), tree.count())), + orchard_tree + .as_deref() + .map(|tree| (tree.root(), tree.count())), + )) + } + ValidatorConnector::Fetch(fetch) => { + let tree_responses = fetch + .get_treestate(id.to_string()) + .await + // As MethodError contains a GetTreestateError, which is an enum with no variants, + // we don't need to account for it at all here + .map_err(|e| match e { + RpcRequestError::ServerWorkQueueFull => { + BlockchainSourceError::Unrecoverable( + "Not yet implemented: handle backing validator\ + full queue" + .to_string(), + ) + } + _ => BlockchainSourceError::Unrecoverable(e.to_string()), + })?; + let GetTreestateResponse { + sapling, orchard, .. + } = tree_responses; + let sapling_frontier = sapling + .commitments() + .final_state() + .as_ref() + .map(|final_state| { + read_commitment_tree::(final_state.as_slice()) + }) + .transpose() + .map_err(|e| BlockchainSourceError::Unrecoverable(format!("io error: {e}")))?; + let orchard_frontier = orchard + .commitments() + .final_state() + .as_ref() + .map(|final_state| { + read_commitment_tree::( + final_state.as_slice(), + ) + }) + .transpose() + .map_err(|e| BlockchainSourceError::Unrecoverable(format!("io error: {e}")))?; + let sapling_root = sapling_frontier + .map(|tree| { + zebra_chain::sapling::tree::Root::try_from(tree.root().to_bytes()) + .map(|root| (root, tree.size() as u64)) + }) + .transpose() + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!("could not deser: {e}")) + })?; + let orchard_root = orchard_frontier + .map(|tree| { + zebra_chain::orchard::tree::Root::try_from(tree.root().to_repr()) + .map(|root| (root, tree.size() as u64)) + }) + .transpose() + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!("could not deser: {e}")) + })?; + Ok((sapling_root, orchard_root)) + } + } + } + + /// Returns the Sapling and Orchard treestate by blockhash. + async fn get_treestate( + &self, + // Sould this be HashOrHeight? + id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)> { + let hash_or_height: HashOrHeight = HashOrHeight::Hash(zebra_chain::block::Hash(id.into())); + match self { + ValidatorConnector::State(state) => { + let mut state = state.clone(); + let block_header_response = state + .read_state_service + .ready() + .and_then(|service| service.call(ReadRequest::BlockHeader(hash_or_height))) + .await + .map_err(|_e| { + BlockchainSourceError::Unrecoverable( + InvalidData(format!("could not fetch header of block {id}")) + .to_string(), + ) + })?; + let (_header, _hash, height) = match block_header_response { + ReadResponse::BlockHeader { + header, + hash, + height, + .. + } => (header, hash, height), + unexpected => { + unreachable!("Unexpected response from state service: {unexpected:?}") + } + }; + + let sapling = match zebra_chain::parameters::NetworkUpgrade::Sapling + .activation_height(&state.network.to_zebra_network()) + { + Some(activation_height) if height >= activation_height => Some( + state + .read_state_service + .ready() + .and_then(|service| { + service.call(ReadRequest::SaplingTree(hash_or_height)) + }) + .await + .map_err(|_e| { + BlockchainSourceError::Unrecoverable( + InvalidData(format!( + "could not fetch sapling treestate of block {id}" + )) + .to_string(), + ) + })?, + ), + _ => None, + } + .and_then(|sap_response| { + expected_read_response!(sap_response, SaplingTree) + .map(|tree| tree.to_rpc_bytes()) + }); + + let orchard = match zebra_chain::parameters::NetworkUpgrade::Nu5 + .activation_height(&state.network.to_zebra_network()) + { + Some(activation_height) if height >= activation_height => Some( + state + .read_state_service + .ready() + .and_then(|service| { + service.call(ReadRequest::OrchardTree(hash_or_height)) + }) + .await + .map_err(|_e| { + BlockchainSourceError::Unrecoverable( + InvalidData(format!( + "could not fetch orchard treestate of block {id}" + )) + .to_string(), + ) + })?, + ), + _ => None, + } + .and_then(|orch_response| { + expected_read_response!(orch_response, OrchardTree) + .map(|tree| tree.to_rpc_bytes()) + }); + + Ok((sapling, orchard)) + } + ValidatorConnector::Fetch(fetch) => { + let treestate = fetch + .get_treestate(hash_or_height.to_string()) + .await + .map_err(|_e| { + BlockchainSourceError::Unrecoverable( + InvalidData(format!("could not fetch treestate of block {id}")) + .to_string(), + ) + })?; + + let sapling = treestate.sapling.commitments().final_state(); + + let orchard = treestate.orchard.commitments().final_state(); + + Ok((sapling.clone(), orchard.clone())) + } + } + } + + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>> { + let mempool_fetcher = match self { + ValidatorConnector::State(state) => &state.mempool_fetcher, + ValidatorConnector::Fetch(fetch) => fetch, + }; + + let txid_strings = mempool_fetcher + .get_raw_mempool() + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!("could not fetch mempool data: {e}")) + })? + .transactions; + + let txids: Vec = txid_strings + .into_iter() + .map(|txid_str| { + zebra_chain::transaction::Hash::from_str(&txid_str).map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "invalid transaction id '{txid_str}': {e}" + )) + }) + }) + .collect::>()?; + + Ok(Some(txids)) + } + + // Returns the transaction, and the height of the block that transaction is in if on the best chain + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult< + Option<( + Arc, + GetTransactionLocation, + )>, + > { + match self { + ValidatorConnector::State(State { + read_state_service, + mempool_fetcher, + network: _, + }) => { + // Check state for transaction + let mut read_state_service = read_state_service.clone(); + let mempool_fetcher = mempool_fetcher.clone(); + + let zebra_txid: zebra_chain::transaction::Hash = + zebra_chain::transaction::Hash::from(txid.0); + + let response = read_state_service + .ready() + .and_then(|svc| { + svc.call(zebra_state::ReadRequest::AnyChainTransaction(zebra_txid)) + }) + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!("state read failed: {e}")) + })?; + + if let zebra_state::ReadResponse::AnyChainTransaction(opt) = response { + if let Some(any_chain_tx) = opt { + match any_chain_tx { + zebra_state::AnyTx::Mined(mined_tx) => { + return Ok(Some(( + (mined_tx).tx.clone(), + GetTransactionLocation::BestChain(mined_tx.height), + ))) + } + zebra_state::AnyTx::Side((transaction, _block_hash)) => { + return Ok(Some(( + transaction, + GetTransactionLocation::NonbestChain, + ))) + } + } + } + } else { + unreachable!("unmatched response to a `Transaction` read request"); + } + + // Else check mempool for transaction. + let mempool_txids = self.get_mempool_txids().await?.ok_or_else(|| { + BlockchainSourceError::Unrecoverable( + "could not fetch mempool transaction ids: none returned".to_string(), + ) + })?; + if mempool_txids.contains(&zebra_txid) { + let serialized_transaction = if let GetTransactionResponse::Raw( + serialized_transaction, + ) = mempool_fetcher + .get_raw_transaction(zebra_txid.to_string(), Some(0)) + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch transaction data: {e}" + )) + })? { + serialized_transaction + } else { + return Err(BlockchainSourceError::Unrecoverable( + "could not fetch transaction data: non-raw response".to_string(), + )); + }; + let transaction: zebra_chain::transaction::Transaction = + zebra_chain::transaction::Transaction::zcash_deserialize( + std::io::Cursor::new(serialized_transaction.as_ref()), + ) + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not deserialize transaction data: {e}" + )) + })?; + Ok(Some((transaction.into(), GetTransactionLocation::Mempool))) + } else { + Ok(None) + } + } + ValidatorConnector::Fetch(fetch) => { + let transaction_object = if let GetTransactionResponse::Object(transaction_object) = + fetch + .get_raw_transaction(txid.to_string(), Some(1)) + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch transaction data: {e}" + )) + })? { + transaction_object + } else { + return Err(BlockchainSourceError::Unrecoverable( + "could not fetch transaction data: non-obj response".to_string(), + )); + }; + let transaction: zebra_chain::transaction::Transaction = + zebra_chain::transaction::Transaction::zcash_deserialize(std::io::Cursor::new( + transaction_object.hex().as_ref(), + )) + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not deserialize transaction data: {e}" + )) + })?; + let location = match transaction_object.height() { + Some(-1) => GetTransactionLocation::NonbestChain, + None => GetTransactionLocation::Mempool, + Some(n) => { + GetTransactionLocation::BestChain(n.try_into_height().map_err(|_e| { + BlockchainSourceError::Unrecoverable(format!( + "invalid height value {n}" + )) + })?) + } + }; + Ok(Some((transaction.into(), location))) + } + } + } + + async fn get_best_block_hash( + &self, + ) -> BlockchainSourceResult> { + match self { + ValidatorConnector::State(State { + read_state_service, + mempool_fetcher, + network: _, + }) => { + match read_state_service.best_tip() { + Some((_height, hash)) => Ok(Some(hash)), + None => { + // try RPC if state read fails: + Ok(Some( + mempool_fetcher + .get_best_blockhash() + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch best block hash from validator: {e}" + )) + })? + .0, + )) + } + } + } + ValidatorConnector::Fetch(fetch) => Ok(Some( + fetch + .get_best_blockhash() + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch best block hash from validator: {e}" + )) + })? + .0, + )), + } + } + + /// Returns the height of the block at the tip of the best chain. + async fn get_best_block_height( + &self, + ) -> BlockchainSourceResult> { + match self { + ValidatorConnector::State(State { + read_state_service, + mempool_fetcher, + network: _, + }) => { + match read_state_service.best_tip() { + Some((height, _hash)) => Ok(Some(height)), + None => { + // try RPC if state read fails: + Ok(Some( + mempool_fetcher + .get_block_count() + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch best block hash from validator: {e}" + )) + })? + .into(), + )) + } + } + } + ValidatorConnector::Fetch(fetch) => Ok(Some( + fetch + .get_block_count() + .await + .map_err(|e| { + BlockchainSourceError::Unrecoverable(format!( + "could not fetch best block hash from validator: {e}" + )) + })? + .into(), + )), + } + } + + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + Box, + > { + match self { + ValidatorConnector::State(State { + read_state_service, + mempool_fetcher: _, + network: _, + }) => { + match read_state_service + .clone() + .call(zebra_state::ReadRequest::NonFinalizedBlocksListener) + .await + { + Ok(ReadResponse::NonFinalizedBlocksListener(listener)) => { + // NOTE: This is not Option::unwrap, but a custom zebra-defined NonFinalizedBlocksListener::unwrap. + Ok(Some(listener.unwrap())) + } + Ok(_) => unreachable!(), + Err(e) => Err(e), + } + } + ValidatorConnector::Fetch(_fetch) => Ok(None), + } + } +} + +/// The location of a transaction returned by +/// [BlockchainSource::get_transaction] +#[derive(Debug)] +pub enum GetTransactionLocation { + // get_transaction can get the height of the block + // containing the transaction if it's on the best + // chain, but cannot reliably if it isn't. + // + /// The transaction is in the best chain, + /// the block height is returned + BestChain(zebra_chain::block::Height), + /// The transaction is on a non-best chain + NonbestChain, + /// The transaction is in the mempool + Mempool, +} + +#[cfg(test)] +pub(crate) mod test { + use super::*; + use async_trait::async_trait; + use std::sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }; + use zebra_chain::{block::Block, orchard::tree as orchard, sapling::tree as sapling}; + use zebra_state::HashOrHeight; + + /// A test-only mock implementation of BlockchainReader using ordered lists by height. + #[derive(Clone)] + #[allow(clippy::type_complexity)] + pub(crate) struct MockchainSource { + blocks: Vec>, + roots: Vec<(Option<(sapling::Root, u64)>, Option<(orchard::Root, u64)>)>, + treestates: Vec<(Vec, Vec)>, + hashes: Vec, + active_chain_height: Arc, + } + + impl MockchainSource { + /// Creates a new MockchainSource. + /// All inputs must be the same length, and ordered by ascending height starting from 0. + #[allow(clippy::type_complexity)] + pub(crate) fn new( + blocks: Vec>, + roots: Vec<(Option<(sapling::Root, u64)>, Option<(orchard::Root, u64)>)>, + treestates: Vec<(Vec, Vec)>, + hashes: Vec, + ) -> Self { + assert!( + blocks.len() == roots.len() + && roots.len() == hashes.len() + && hashes.len() == treestates.len(), + "All input vectors must be the same length" + ); + + // len() returns one-indexed length, height is zero-indexed. + let tip_height = blocks.len().saturating_sub(1) as u32; + Self { + blocks, + roots, + treestates, + hashes, + active_chain_height: Arc::new(AtomicU32::new(tip_height)), + } + } + + /// Creates a new MockchainSource, *with* an active chain height. + /// + /// Block will only be served up to the active chain height, with mempool data coming from + /// the *next block in the chain. + /// + /// Blocks must be "mined" to extend the active chain height. + /// + /// All inputs must be the same length, and ordered by ascending height starting from 0. + #[allow(clippy::type_complexity)] + pub(crate) fn new_with_active_height( + blocks: Vec>, + roots: Vec<(Option<(sapling::Root, u64)>, Option<(orchard::Root, u64)>)>, + treestates: Vec<(Vec, Vec)>, + hashes: Vec, + active_chain_height: u32, + ) -> Self { + assert!( + blocks.len() == roots.len() + && roots.len() == hashes.len() + && hashes.len() == treestates.len(), + "All input vectors must be the same length" + ); + + // len() returns one-indexed length, height is zero-indexed. + let max_height = blocks.len().saturating_sub(1) as u32; + assert!( + active_chain_height <= max_height, + "active_chain_height must be in 0..=len-1" + ); + + Self { + blocks, + roots, + treestates, + hashes, + active_chain_height: Arc::new(AtomicU32::new(active_chain_height)), + } + } + + pub(crate) fn mine_blocks(&self, blocks: u32) { + // len() returns one-indexed length, height is zero-indexed. + let max_height = self.max_chain_height(); + let _ = self.active_chain_height.fetch_update( + Ordering::SeqCst, + Ordering::SeqCst, + |current| { + let target = current.saturating_add(blocks).min(max_height); + if target == current { + None + } else { + Some(target) + } + }, + ); + } + + pub(crate) fn max_chain_height(&self) -> u32 { + // len() returns one-indexed length, height is zero-indexed. + self.blocks.len().saturating_sub(1) as u32 + } + + pub(crate) fn active_height(&self) -> u32 { + self.active_chain_height.load(Ordering::SeqCst) + } + + fn valid_height(&self, height: u32) -> Option { + let active_chain_height = self.active_height() as usize; + let valid_height = height as usize; + + if valid_height <= active_chain_height { + Some(valid_height) + } else { + None + } + } + + fn valid_hash(&self, hash: &zebra_chain::block::Hash) -> Option { + let active_chain_height = self.active_height() as usize; + let height_index = self.hashes.iter().position(|h| h.0 == hash.0); + + if height_index.is_some() && height_index.unwrap() <= active_chain_height { + height_index + } else { + None + } + } + } + + #[async_trait] + impl BlockchainSource for MockchainSource { + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>> { + match id { + HashOrHeight::Height(h) => { + let Some(height_index) = self.valid_height(h.0) else { + return Ok(None); + }; + Ok(Some(Arc::clone(&self.blocks[height_index]))) + } + HashOrHeight::Hash(hash) => { + let Some(hash_index) = self.valid_hash(&hash) else { + return Ok(None); + }; + + Ok(Some(Arc::clone(&self.blocks[hash_index]))) + } + } + } + + async fn get_commitment_tree_roots( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )> { + let active_chain_height = self.active_height() as usize; // serve up to active tip + + if let Some(height) = self.hashes.iter().position(|h| h == &id) { + if height <= active_chain_height { + Ok(self.roots[height]) + } else { + Ok((None, None)) + } + } else { + Ok((None, None)) + } + } + + /// Returns the sapling and orchard treestate by hash + async fn get_treestate( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)> { + let active_chain_height = self.active_height() as usize; // serve up to active tip + + if let Some(height) = self.hashes.iter().position(|h| h == &id) { + if height <= active_chain_height { + let (sapling_state, orchard_state) = &self.treestates[height]; + Ok((Some(sapling_state.clone()), Some(orchard_state.clone()))) + } else { + Ok((None, None)) + } + } else { + Ok((None, None)) + } + } + + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>> { + let mempool_height = self.active_height() as usize + 1; + + let txids = if mempool_height < self.blocks.len() { + self.blocks[mempool_height] + .transactions + .iter() + .filter(|tx| !tx.is_coinbase()) // <-- exclude coinbase + .map(|tx| tx.hash()) + .collect::>() + } else { + Vec::new() + }; + + Ok(Some(txids)) + } + + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult< + Option<( + Arc, + GetTransactionLocation, + )>, + > { + let zebra_txid: zebra_chain::transaction::Hash = + zebra_chain::transaction::Hash::from(txid.0); + + let active_chain_height = self.active_height() as usize; + let mempool_height = active_chain_height + 1; + + for height in 0..=active_chain_height { + if height > self.max_chain_height() as usize { + break; + } + if let Some(found) = self.blocks[height] + .transactions + .iter() + .find(|transaction| transaction.hash() == zebra_txid) + { + return Ok(Some(( + Arc::clone(found), + GetTransactionLocation::BestChain(zebra_chain::block::Height( + height as u32, + )), + ))); + } + } + + if mempool_height < self.blocks.len() { + if let Some(found) = self.blocks[mempool_height] + .transactions + .iter() + .find(|transaction| transaction.hash() == zebra_txid) + { + return Ok(Some((Arc::clone(found), GetTransactionLocation::Mempool))); + } + } + + Ok(None) + } + + async fn get_best_block_hash( + &self, + ) -> BlockchainSourceResult> { + let active_chain_height = self.active_height() as usize; + + if self.blocks.is_empty() || active_chain_height > self.max_chain_height() as usize { + return Ok(None); + } + + Ok(Some(self.blocks[active_chain_height].hash())) + } + + async fn get_best_block_height( + &self, + ) -> BlockchainSourceResult> { + let active_chain_height = self.active_height() as usize; + + if self.blocks.is_empty() || active_chain_height > self.max_chain_height() as usize { + return Ok(None); + } + + Ok(Some( + self.blocks[active_chain_height].coinbase_height().unwrap(), + )) + } + + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<( + zebra_chain::block::Hash, + Arc, + )>, + >, + Box, + > { + Ok(None) + } + } +} diff --git a/zaino-state/src/chain_index/tests.rs b/zaino-state/src/chain_index/tests.rs new file mode 100644 index 000000000..49879273a --- /dev/null +++ b/zaino-state/src/chain_index/tests.rs @@ -0,0 +1,557 @@ +//! Zaino-State ChainIndex unit tests. + +pub(crate) mod finalised_state; +pub(crate) mod mempool; +mod proptest_blockgen; +pub(crate) mod vectors; + +pub(crate) fn init_tracing() { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339()) + .with_target(true) + .try_init() + .unwrap(); +} + +mod mockchain_tests { + use std::path::PathBuf; + use tempfile::TempDir; + use tokio::time::{sleep, Duration}; + use tokio_stream::StreamExt as _; + use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; + use zebra_chain::serialization::ZcashDeserializeInto; + + use crate::{ + chain_index::{ + source::test::MockchainSource, + tests::vectors::{ + build_active_mockchain_source, build_mockchain_source, load_test_vectors, + TestVectorBlockData, + }, + types::{BestChainLocation, TransactionHash}, + ChainIndex, NodeBackedChainIndex, NodeBackedChainIndexSubscriber, + }, + BlockCacheConfig, + }; + + async fn load_test_vectors_and_sync_chain_index( + active_mockchain_source: bool, + ) -> ( + Vec, + NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, + MockchainSource, + ) { + super::init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let source = if active_mockchain_source { + build_active_mockchain_source(150, blocks.clone()) + } else { + build_mockchain_source(blocks.clone()) + }; + + // TODO: the temp_dir is deleted when it goes out of scope + // at the end of this function. + // Somehow, this isn't breaking the database, but I'm confused + // as to how the database works when the directory containing + // it is deleted + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let indexer = NodeBackedChainIndex::new(source.clone(), config) + .await + .unwrap(); + let index_reader = indexer.subscriber(); + + loop { + let check_height: u32 = match active_mockchain_source { + true => source.active_height() - 100, + false => 100, + }; + if index_reader.finalized_state.db_height().await.unwrap() + == Some(crate::Height(check_height)) + { + break; + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + + (blocks, indexer, index_reader, source) + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_range() { + let (blocks, _indexer, index_reader, _mockchain) = + load_test_vectors_and_sync_chain_index(false).await; + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + + let start = crate::Height(0); + + let indexer_blocks = + ChainIndex::get_block_range(&index_reader, &nonfinalized_snapshot, start, None) + .unwrap() + .collect::>() + .await; + + for (i, block) in indexer_blocks.into_iter().enumerate() { + let parsed_block = block + .unwrap() + .zcash_deserialize_into::() + .unwrap(); + + let expected_block = &blocks[i].zebra_block; + assert_eq!(&parsed_block, expected_block); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_raw_transaction() { + let (blocks, _indexer, index_reader, _mockchain) = + load_test_vectors_and_sync_chain_index(false).await; + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + for (expected_transaction, height) in blocks.into_iter().flat_map(|block| { + block + .zebra_block + .transactions + .into_iter() + .map(move |transaction| (transaction, block.height)) + }) { + let (transaction, branch_id) = index_reader + .get_raw_transaction( + &nonfinalized_snapshot, + &TransactionHash::from(expected_transaction.hash()), + ) + .await + .unwrap() + .unwrap(); + let zaino_transaction = transaction + .zcash_deserialize_into::() + .unwrap(); + assert_eq!(expected_transaction.as_ref(), &zaino_transaction); + assert_eq!( + branch_id, + if height == 0 { + None + } else if height == 1 { + zebra_chain::parameters::NetworkUpgrade::Canopy + .branch_id() + .map(u32::from) + } else { + zebra_chain::parameters::NetworkUpgrade::Nu6_1 + .branch_id() + .map(u32::from) + } + ); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_transaction_status() { + let (blocks, _indexer, index_reader, _mockchain) = + load_test_vectors_and_sync_chain_index(false).await; + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + + for (expected_transaction, block_hash, block_height) in + blocks.into_iter().flat_map(|block| { + block + .zebra_block + .transactions + .iter() + .cloned() + .map(|transaction| { + ( + transaction, + block.zebra_block.hash(), + block.zebra_block.coinbase_height(), + ) + }) + .collect::>() + .into_iter() + }) + { + let expected_txid = expected_transaction.hash(); + + let (transaction_status_best_chain, transaction_status_nonbest_chain) = index_reader + .get_transaction_status( + &nonfinalized_snapshot, + &TransactionHash::from(expected_txid), + ) + .await + .unwrap(); + assert!(transaction_status_nonbest_chain.is_empty()); + assert_eq!( + transaction_status_best_chain.unwrap(), + BestChainLocation::Block( + crate::BlockHash(block_hash.0), + crate::Height(block_height.unwrap().0) + ) + ); + } + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + async fn sync_blocks_after_startup() { + let (_blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + + let indexer_tip = dbg!(&index_reader.snapshot_nonfinalized_state().best_tip) + .height + .0; + let active_mockchain_tip = dbg!(mockchain.active_height()); + assert_eq!(active_mockchain_tip, indexer_tip); + + for _ in 0..20 { + mockchain.mine_blocks(1); + sleep(Duration::from_millis(600)).await; + } + sleep(Duration::from_millis(2000)).await; + + let indexer_tip = dbg!(&index_reader.snapshot_nonfinalized_state().best_tip) + .height + .0; + let active_mockchain_tip = dbg!(mockchain.active_height()); + assert_eq!(active_mockchain_tip, indexer_tip); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_mempool_transaction() { + let (blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + let block_data: Vec = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + sleep(Duration::from_millis(2000)).await; + + let mempool_height = (dbg!(mockchain.active_height()) as usize) + 1; + + let mempool_transactions: Vec<_> = block_data + .get(mempool_height) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + for expected_transaction in mempool_transactions.into_iter() { + let (transaction, branch_id) = index_reader + .get_raw_transaction( + &nonfinalized_snapshot, + &TransactionHash::from(expected_transaction.hash()), + ) + .await + .unwrap() + .unwrap(); + let zaino_transaction = transaction + .zcash_deserialize_into::() + .unwrap(); + assert_eq!(expected_transaction.as_ref(), &zaino_transaction); + assert_eq!( + branch_id, + zebra_chain::parameters::NetworkUpgrade::Nu6_1 + .branch_id() + .map(u32::from) + ); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_mempool_transaction_status() { + let (blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + let block_data: Vec = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + sleep(Duration::from_millis(2000)).await; + + let mempool_height = (dbg!(mockchain.active_height()) as usize) + 1; + + let mempool_transactions: Vec<_> = block_data + .get(mempool_height) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + for expected_transaction in mempool_transactions.into_iter() { + let expected_txid = expected_transaction.hash(); + + let (transaction_status_best_chain, transaction_status_nonbest_chain) = index_reader + .get_transaction_status( + &nonfinalized_snapshot, + &TransactionHash::from(expected_txid), + ) + .await + .unwrap(); + assert_eq!( + transaction_status_best_chain, + Some(BestChainLocation::Mempool( + crate::chain_index::types::Height(mempool_height as u32) + )) + ); + assert!(transaction_status_nonbest_chain.is_empty()); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_mempool_transactions() { + let (blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + let block_data: Vec = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + sleep(Duration::from_millis(2000)).await; + + let mempool_height = (dbg!(mockchain.active_height()) as usize) + 1; + let mut mempool_transactions: Vec<_> = block_data + .get(mempool_height) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + mempool_transactions.sort_by_key(|a| a.hash()); + + let mut found_mempool_transactions: Vec = + index_reader + .get_mempool_transactions(Vec::new()) + .await + .unwrap() + .iter() + .map(|txn_bytes| { + txn_bytes + .zcash_deserialize_into::() + .unwrap() + }) + .collect(); + found_mempool_transactions.sort_by_key(|a| a.hash()); + assert_eq!( + mempool_transactions + .iter() + .map(|tx| tx.as_ref().clone()) + .collect::>(), + found_mempool_transactions, + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_filtered_mempool_transactions() { + let (blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + let block_data: Vec = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + sleep(Duration::from_millis(2000)).await; + + let mempool_height = (dbg!(mockchain.active_height()) as usize) + 1; + let mut mempool_transactions: Vec<_> = block_data + .get(mempool_height) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + let exclude_tx = mempool_transactions.pop().unwrap(); + let exclude_txid = exclude_tx.hash().to_string(); + mempool_transactions.sort_by_key(|a| a.hash()); + + let mut found_mempool_transactions: Vec = + index_reader + .get_mempool_transactions(vec![exclude_txid]) + .await + .unwrap() + .iter() + .map(|txn_bytes| { + txn_bytes + .zcash_deserialize_into::() + .unwrap() + }) + .collect(); + found_mempool_transactions.sort_by_key(|a| a.hash()); + assert_eq!(mempool_transactions.len(), found_mempool_transactions.len()); + assert_eq!( + mempool_transactions + .iter() + .map(|tx| tx.as_ref().clone()) + .collect::>(), + found_mempool_transactions, + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + async fn get_mempool_stream() { + let (blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + + let block_data: Vec = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + sleep(Duration::from_millis(2000)).await; + + let next_mempool_height_index = (dbg!(mockchain.active_height()) as usize) + 1; + let mut mempool_transactions: Vec<_> = block_data + .get(next_mempool_height_index) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + mempool_transactions.sort_by_key(|transaction| transaction.hash()); + + let mempool_stream_task = tokio::spawn(async move { + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + let mut mempool_stream = index_reader + .get_mempool_stream(Some(&nonfinalized_snapshot)) + .expect("failed to create mempool stream"); + + let mut indexer_mempool_transactions: Vec = + Vec::new(); + + while let Some(tx_bytes_res) = mempool_stream.next().await { + let tx_bytes = tx_bytes_res.expect("stream error"); + let tx: zebra_chain::transaction::Transaction = + tx_bytes.zcash_deserialize_into().expect("deserialize tx"); + indexer_mempool_transactions.push(tx); + } + + indexer_mempool_transactions.sort_by_key(|tx| tx.hash()); + indexer_mempool_transactions + }); + + sleep(Duration::from_millis(500)).await; + + mockchain.mine_blocks(1); + + let indexer_mempool_stream_transactions = + mempool_stream_task.await.expect("collector task failed"); + + assert_eq!( + mempool_transactions + .iter() + .map(|tx| tx.as_ref().clone()) + .collect::>(), + indexer_mempool_stream_transactions, + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + async fn get_mempool_stream_for_stale_snapshot() { + let (_blocks, _indexer, index_reader, mockchain) = + load_test_vectors_and_sync_chain_index(true).await; + sleep(Duration::from_millis(2000)).await; + + let stale_nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + + mockchain.mine_blocks(1); + sleep(Duration::from_millis(2000)).await; + + let mempool_stream = index_reader.get_mempool_stream(Some(&stale_nonfinalized_snapshot)); + + assert!(mempool_stream.is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_block_height() { + let (blocks, _indexer, index_reader, _mockchain) = + load_test_vectors_and_sync_chain_index(false).await; + let nonfinalized_snapshot = index_reader.snapshot_nonfinalized_state(); + + // Positive cases: every known best-chain block returns its height + for TestVectorBlockData { + height, + zebra_block, + .. + } in blocks.iter() + { + let got = index_reader + .get_block_height( + &nonfinalized_snapshot, + crate::BlockHash(zebra_block.hash().0), + ) + .await + .unwrap(); + assert_eq!(got, Some(crate::Height(*height))); + } + + // Negative case: an unknown hash returns None + let unknown = crate::BlockHash([0u8; 32]); + let got = index_reader + .get_block_height(&nonfinalized_snapshot, unknown) + .await + .unwrap(); + assert_eq!(got, None); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_treestate() { + let (blocks, _indexer, index_reader, _mockchain) = + load_test_vectors_and_sync_chain_index(false).await; + + for TestVectorBlockData { + zebra_block, + sapling_tree_state, + orchard_tree_state, + .. + } in blocks.into_iter() + { + let (sapling_bytes_opt, orchard_bytes_opt) = index_reader + .get_treestate(&crate::BlockHash(zebra_block.hash().0)) + .await + .unwrap(); + + assert_eq!( + sapling_bytes_opt.as_deref(), + Some(sapling_tree_state.as_slice()) + ); + assert_eq!( + orchard_bytes_opt.as_deref(), + Some(orchard_tree_state.as_slice()) + ); + } + } +} diff --git a/zaino-state/src/chain_index/tests/finalised_state.rs b/zaino-state/src/chain_index/tests/finalised_state.rs new file mode 100644 index 000000000..d1ef4bf35 --- /dev/null +++ b/zaino-state/src/chain_index/tests/finalised_state.rs @@ -0,0 +1,4 @@ +//! Zaino-State ChainIndex Finalised State (ZainoDB) unit tests. +mod migrations; +mod v0; +pub(crate) mod v1; diff --git a/zaino-state/src/chain_index/tests/finalised_state/migrations.rs b/zaino-state/src/chain_index/tests/finalised_state/migrations.rs new file mode 100644 index 000000000..376c96845 --- /dev/null +++ b/zaino-state/src/chain_index/tests/finalised_state/migrations.rs @@ -0,0 +1,209 @@ +//! Holds database migration tests. + +use std::path::PathBuf; +use tempfile::TempDir; +use zaino_common::network::ActivationHeights; +use zaino_common::{DatabaseConfig, Network, StorageConfig}; + +use crate::chain_index::finalised_state::capability::DbCore as _; +use crate::chain_index::finalised_state::db::DbBackend; +use crate::chain_index::finalised_state::ZainoDB; +use crate::chain_index::tests::init_tracing; +use crate::chain_index::tests::vectors::{ + build_mockchain_source, load_test_vectors, TestVectorData, +}; +use crate::BlockCacheConfig; + +#[tokio::test(flavor = "multi_thread")] +async fn v0_to_v1_full() { + init_tracing(); + + let TestVectorData { blocks, .. } = load_test_vectors().unwrap(); + + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let v0_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path.clone(), + ..Default::default() + }, + ..Default::default() + }, + db_version: 0, + network: Network::Regtest(ActivationHeights::default()), + }; + let v1_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let source = build_mockchain_source(blocks.clone()); + + // Build v0 database. + let zaino_db = ZainoDB::spawn(v0_config, source.clone()).await.unwrap(); + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + blocks.clone(), + None, + ) + .await; + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + dbg!(zaino_db.shutdown().await.unwrap()); + + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + // Open v1 database and check migration. + let zaino_db_2 = ZainoDB::spawn(v1_config, source).await.unwrap(); + zaino_db_2.wait_until_ready().await; + dbg!(zaino_db_2.status()); + let db_height = dbg!(zaino_db_2.db_height().await.unwrap()).unwrap(); + assert_eq!(db_height.0, 200); + dbg!(zaino_db_2.shutdown().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn v0_to_v1_interrupted() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let v0_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path.clone(), + ..Default::default() + }, + ..Default::default() + }, + db_version: 0, + network: Network::Regtest(ActivationHeights::default()), + }; + let v1_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let source = build_mockchain_source(blocks.clone()); + + // Build v0 database. + let zaino_db = ZainoDB::spawn(v0_config, source.clone()).await.unwrap(); + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + blocks.clone(), + None, + ) + .await; + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + dbg!(zaino_db.shutdown().await.unwrap()); + + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + // Partial build v1 database. + let zaino_db = DbBackend::spawn_v1(&v1_config).await.unwrap(); + crate::chain_index::tests::vectors::sync_db_with_blockdata(&zaino_db, blocks.clone(), Some(50)) + .await; + + dbg!(zaino_db.shutdown().await.unwrap()); + + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + // Open v1 database and check migration. + let zaino_db_2 = ZainoDB::spawn(v1_config, source).await.unwrap(); + zaino_db_2.wait_until_ready().await; + dbg!(zaino_db_2.status()); + let db_height = dbg!(zaino_db_2.db_height().await.unwrap()).unwrap(); + assert_eq!(db_height.0, 200); + dbg!(zaino_db_2.shutdown().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn v0_to_v1_partial() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let v0_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path.clone(), + ..Default::default() + }, + ..Default::default() + }, + db_version: 0, + network: Network::Regtest(ActivationHeights::default()), + }; + let v1_config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let source = build_mockchain_source(blocks.clone()); + + // Build v0 database. + let zaino_db = ZainoDB::spawn(v0_config, source.clone()).await.unwrap(); + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + blocks.clone(), + None, + ) + .await; + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + dbg!(zaino_db.shutdown().await.unwrap()); + + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + // Partial build v1 database. + let zaino_db = DbBackend::spawn_v1(&v1_config).await.unwrap(); + crate::chain_index::tests::vectors::sync_db_with_blockdata(&zaino_db, blocks.clone(), None) + .await; + + dbg!(zaino_db.shutdown().await.unwrap()); + + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + // Open v1 database and check migration. + let zaino_db_2 = ZainoDB::spawn(v1_config, source).await.unwrap(); + zaino_db_2.wait_until_ready().await; + dbg!(zaino_db_2.status()); + let db_height = dbg!(zaino_db_2.db_height().await.unwrap()).unwrap(); + assert_eq!(db_height.0, 200); + dbg!(zaino_db_2.shutdown().await.unwrap()); +} diff --git a/zaino-state/src/chain_index/tests/finalised_state/v0.rs b/zaino-state/src/chain_index/tests/finalised_state/v0.rs new file mode 100644 index 000000000..b2ffef18f --- /dev/null +++ b/zaino-state/src/chain_index/tests/finalised_state/v0.rs @@ -0,0 +1,339 @@ +//! Holds tests for the V0 database. + +use std::path::PathBuf; +use tempfile::TempDir; + +use zaino_common::network::ActivationHeights; +use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::{compact_block_with_pool_types, PoolTypeFilter}; + +use crate::chain_index::finalised_state::reader::DbReader; +use crate::chain_index::finalised_state::ZainoDB; +use crate::chain_index::source::test::MockchainSource; +use crate::chain_index::tests::init_tracing; +use crate::chain_index::tests::vectors::{ + build_mockchain_source, load_test_vectors, TestVectorBlockData, TestVectorData, +}; +use crate::error::FinalisedStateError; +use crate::{BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock}; + +pub(crate) async fn spawn_v0_zaino_db( + source: MockchainSource, +) -> Result<(TempDir, ZainoDB), FinalisedStateError> { + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 0, + network: Network::Regtest(ActivationHeights::default()), + }; + + let zaino_db = ZainoDB::spawn(config, source).await.unwrap(); + + Ok((temp_dir, zaino_db)) +} + +pub(crate) async fn load_vectors_and_spawn_and_sync_v0_zaino_db( +) -> (TestVectorData, TempDir, ZainoDB) { + let test_data = load_test_vectors().unwrap(); + + let source = build_mockchain_source(test_data.blocks.clone()); + + let (db_dir, zaino_db) = spawn_v0_zaino_db(source).await.unwrap(); + + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + test_data.blocks.clone(), + None, + ) + .await; + + (test_data, db_dir, zaino_db) +} + +pub(crate) async fn load_vectors_v0db_and_reader( +) -> (TestVectorData, TempDir, std::sync::Arc, DbReader) { + let (test_data, db_dir, zaino_db) = load_vectors_and_spawn_and_sync_v0_zaino_db().await; + + let zaino_db = std::sync::Arc::new(zaino_db); + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + + let db_reader = zaino_db.to_reader(); + dbg!(db_reader.db_height().await.unwrap()).unwrap(); + + (test_data, db_dir, zaino_db, db_reader) +} + +// *** ZainoDB Tests *** + +#[tokio::test(flavor = "multi_thread")] +async fn sync_to_height() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let source = build_mockchain_source(blocks.clone()); + + let (_db_dir, zaino_db) = spawn_v0_zaino_db(source.clone()).await.unwrap(); + + zaino_db.sync_to_height(Height(200), &source).await.unwrap(); + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + let built_db_height = dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + + assert_eq!(built_db_height, Height(200)); +} + +#[tokio::test(flavor = "multi_thread")] +async fn add_blocks_to_db_and_verify() { + init_tracing(); + + let (_test_vector, _db_dir, zaino_db) = load_vectors_and_spawn_and_sync_v0_zaino_db().await; + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn delete_blocks_from_db() { + init_tracing(); + + let (_test_vector, _db_dir, zaino_db) = load_vectors_and_spawn_and_sync_v0_zaino_db().await; + + for h in (1..=200).rev() { + // dbg!("Deleting block at height {}", h); + zaino_db + .delete_block_at_height(crate::Height(h)) + .await + .unwrap(); + } + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn save_db_to_file_and_reload() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 0, + network: Network::Regtest(ActivationHeights::default()), + }; + + let source = build_mockchain_source(blocks.clone()); + let source_clone = source.clone(); + + let blocks_clone = blocks.clone(); + let config_clone = config.clone(); + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + let zaino_db = ZainoDB::spawn(config_clone, source).await.unwrap(); + + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + blocks_clone, + None, + ) + .await; + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + + dbg!(zaino_db.shutdown().await.unwrap()); + }); + }) + .join() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(1000)); + + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + dbg!(config + .storage + .database + .path + .read_dir() + .unwrap() + .collect::>()); + let zaino_db_2 = ZainoDB::spawn(config, source_clone).await.unwrap(); + + zaino_db_2.wait_until_ready().await; + dbg!(zaino_db_2.status()); + let db_height = dbg!(zaino_db_2.db_height().await.unwrap()).unwrap(); + + assert_eq!(db_height.0, 200); + + dbg!(zaino_db_2.shutdown().await.unwrap()); + }); + }) + .join() + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn create_db_reader() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, zaino_db, db_reader) = + load_vectors_v0db_and_reader().await; + + let test_vector_block = blocks.last().unwrap(); + let db_height = dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + let db_reader_height = dbg!(db_reader.db_height().await.unwrap()).unwrap(); + + assert_eq!(test_vector_block.height, db_height.0); + assert_eq!(db_height, db_reader_height); +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_blocks() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v0db_and_reader().await; + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let chain_block = + IndexedBlock::try_from(BlockWithMetadata::new(zebra_block, metadata)).unwrap(); + + let compact_block = chain_block.to_compact_block(); + + parent_chain_work = *chain_block.index().chainwork(); + + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + &PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + &PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + + println!("CompactBlock at height {height} OK"); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v0db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs new file mode 100644 index 000000000..889290126 --- /dev/null +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -0,0 +1,1198 @@ +//! Holds tests for the V1 database. + +use std::path::PathBuf; +use tempfile::TempDir; + +use zaino_common::network::ActivationHeights; +use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::{compact_block_with_pool_types, PoolTypeFilter}; + +use crate::chain_index::finalised_state::capability::IndexedBlockExt; +use crate::chain_index::finalised_state::db::DbBackend; +use crate::chain_index::finalised_state::reader::DbReader; +use crate::chain_index::finalised_state::ZainoDB; +use crate::chain_index::source::test::MockchainSource; +use crate::chain_index::tests::init_tracing; +use crate::chain_index::tests::vectors::{ + build_mockchain_source, load_test_vectors, TestVectorBlockData, TestVectorData, +}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::chain_index::types::TransactionHash; + +use crate::error::FinalisedStateError; +use crate::{BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock}; + +#[cfg(feature = "transparent_address_history_experimental")] +use crate::{AddrScript, Outpoint}; + +pub(crate) async fn spawn_v1_zaino_db( + source: MockchainSource, +) -> Result<(TempDir, ZainoDB), FinalisedStateError> { + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let zaino_db = ZainoDB::spawn(config, source).await.unwrap(); + + Ok((temp_dir, zaino_db)) +} + +pub(crate) async fn load_vectors_and_spawn_and_sync_v1_zaino_db( +) -> (TestVectorData, TempDir, ZainoDB) { + let test_vector_data = load_test_vectors().unwrap(); + let blocks = test_vector_data.blocks.clone(); + + dbg!(blocks.len()); + + let source = build_mockchain_source(blocks.clone()); + + let (db_dir, zaino_db) = spawn_v1_zaino_db(source).await.unwrap(); + + crate::chain_index::tests::vectors::sync_db_with_blockdata(zaino_db.router(), blocks, None) + .await; + + (test_vector_data, db_dir, zaino_db) +} + +pub(crate) async fn load_vectors_v1db_and_reader( +) -> (TestVectorData, TempDir, std::sync::Arc, DbReader) { + let (test_vector_data, db_dir, zaino_db) = load_vectors_and_spawn_and_sync_v1_zaino_db().await; + + let zaino_db = std::sync::Arc::new(zaino_db); + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + + let db_reader = zaino_db.to_reader(); + dbg!(db_reader.db_height().await.unwrap()).unwrap(); + + (test_vector_data, db_dir, zaino_db, db_reader) +} + +// *** ZainoDB Tests *** + +#[tokio::test(flavor = "multi_thread")] +async fn sync_to_height() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let source = build_mockchain_source(blocks.clone()); + + let (_db_dir, zaino_db) = spawn_v1_zaino_db(source.clone()).await.unwrap(); + + zaino_db.sync_to_height(Height(200), &source).await.unwrap(); + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + let built_db_height = dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + + assert_eq!(built_db_height, Height(200)); +} + +#[tokio::test(flavor = "multi_thread")] +async fn add_blocks_to_db_and_verify() { + init_tracing(); + + let (_test_vector_data, _db_dir, zaino_db) = + load_vectors_and_spawn_and_sync_v1_zaino_db().await; + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn delete_blocks_from_db() { + init_tracing(); + + let (_test_vector_data, _db_dir, zaino_db) = + load_vectors_and_spawn_and_sync_v1_zaino_db().await; + + for h in (1..=200).rev() { + // dbg!("Deleting block at height {}", h); + zaino_db + .delete_block_at_height(crate::Height(h)) + .await + .unwrap(); + } + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn save_db_to_file_and_reload() { + init_tracing(); + + let blocks = load_test_vectors().unwrap().blocks; + + let temp_dir: TempDir = tempfile::tempdir().unwrap(); + let db_path: PathBuf = temp_dir.path().to_path_buf(); + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + + let source = build_mockchain_source(blocks.clone()); + let source_clone = source.clone(); + + let config_clone = config.clone(); + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + let zaino_db = ZainoDB::spawn(config_clone, source).await.unwrap(); + + crate::chain_index::tests::vectors::sync_db_with_blockdata( + zaino_db.router(), + blocks.clone(), + None, + ) + .await; + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + + dbg!(zaino_db.shutdown().await.unwrap()); + }); + }) + .join() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(1000)); + + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + dbg!(config + .storage + .database + .path + .read_dir() + .unwrap() + .collect::>()); + let zaino_db_2 = ZainoDB::spawn(config, source_clone).await.unwrap(); + + zaino_db_2.wait_until_ready().await; + dbg!(zaino_db_2.status()); + let db_height = dbg!(zaino_db_2.db_height().await.unwrap()).unwrap(); + + assert_eq!(db_height.0, 200); + + dbg!(zaino_db_2.shutdown().await.unwrap()); + }); + }) + .join() + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn load_db_backend_from_file() { + init_tracing(); + + let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("src") + .join("chain_index") + .join("tests") + .join("vectors") + .join("v1_test_db"); + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path.clone(), + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + }; + let finalized_state_backend = DbBackend::spawn_v1(&config).await.unwrap(); + + let mut prev_hash = None; + for height in 0..=100 { + let block = finalized_state_backend + .get_chain_block(Height(height)) + .await + .unwrap() + .unwrap(); + if let Some(prev_hash) = prev_hash { + assert_eq!(prev_hash, block.index().parent_hash); + } + prev_hash = Some(block.index().hash); + assert_eq!(block.index.height, Height(height)); + } + assert!(finalized_state_backend + .get_chain_block(Height(101)) + .await + .unwrap() + .is_none()); + std::fs::remove_file(db_path.join("regtest").join("v1").join("lock.mdb")).unwrap() +} + +#[tokio::test(flavor = "multi_thread")] +async fn try_write_invalid_block() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, zaino_db) = + load_vectors_and_spawn_and_sync_v1_zaino_db().await; + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + + let TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } = blocks.last().unwrap().clone(); + + // NOTE: Currently using default here. + let parent_chain_work = ChainWork::from_u256(0.into()); + let metadata = BlockMetadata::new( + sapling_root, + sapling_tree_size as u32, + orchard_root, + orchard_tree_size as u32, + parent_chain_work, + zaino_common::Network::Regtest(ActivationHeights::default()).to_zebra_network(), + ); + + let mut chain_block = + IndexedBlock::try_from(BlockWithMetadata::new(&zebra_block, metadata)).unwrap(); + + chain_block.index.height = crate::Height(height + 1); + dbg!(chain_block.index.height); + + let db_err = dbg!(zaino_db.write_block(chain_block).await); + + // TODO: Update with concrete err type. + assert!(db_err.is_err()); + + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn try_delete_block_with_invalid_height() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, zaino_db) = + load_vectors_and_spawn_and_sync_v1_zaino_db().await; + + zaino_db.wait_until_ready().await; + dbg!(zaino_db.status()); + dbg!(zaino_db.db_height().await.unwrap()); + + let height = blocks.last().unwrap().clone().height; + + let delete_height = height - 1; + + let db_err = dbg!( + zaino_db + .delete_block_at_height(crate::Height(delete_height)) + .await + ); + + // TODO: Update with concrete err type. + assert!(db_err.is_err()); + + dbg!(zaino_db.db_height().await.unwrap()); +} + +#[tokio::test(flavor = "multi_thread")] +async fn create_db_reader() { + let (TestVectorData { blocks, .. }, _db_dir, zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let data_height = blocks.last().unwrap().height; + let db_height = dbg!(zaino_db.db_height().await.unwrap()).unwrap(); + let db_reader_height = dbg!(db_reader.db_height().await.unwrap()).unwrap(); + + assert_eq!(data_height, db_height.0); + assert_eq!(db_height, db_reader_height); +} + +// *** DbReader Tests *** + +#[tokio::test(flavor = "multi_thread")] +async fn get_chain_blocks() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + + parent_chain_work = *chain_block.index().chainwork(); + + let reader_chain_block = db_reader.get_chain_block(Height(*height)).await.unwrap(); + assert_eq!(Some(chain_block), reader_chain_block); + println!("IndexedBlock at height {height} OK"); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_blocks() { + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + let compact_block = chain_block.to_compact_block(); + + parent_chain_work = *chain_block.index().chainwork(); + + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + &PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + &PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + + println!("CompactBlock at height {height} OK"); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn get_faucet_txids() { + init_tracing(); + + let (TestVectorData { blocks, faucet, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let start = Height(blocks.first().unwrap().height); + let end = Height(blocks.last().unwrap().height); + dbg!(&start, &end); + + let (_faucet_address, _txid, _output_index, faucet_script, _satoshis, _height) = + faucet.utxos.first().unwrap().into_parts(); + let faucet_addr_script = AddrScript::from_script(faucet_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + + parent_chain_work = *chain_block.index().chainwork(); + + println!("Checking faucet txids at height {height}"); + let block_height = Height(*height); + let block_txids: Vec = chain_block + .transactions() + .iter() + .map(|tx_data| tx_data.txid().to_string()) + .collect(); + let filtered_block_txids: Vec = block_txids + .into_iter() + .filter(|txid| faucet.txids.contains(txid)) + .collect(); + dbg!(&filtered_block_txids); + + let reader_faucet_tx_locations = db_reader + .addr_tx_locations_by_range(faucet_addr_script, block_height, block_height) + .await + .unwrap() + .unwrap_or_default(); + let mut reader_block_txids = Vec::new(); + for tx_location in reader_faucet_tx_locations { + let txid = db_reader.get_txid(tx_location).await.unwrap(); + reader_block_txids.push(txid.to_string()); + } + dbg!(&reader_block_txids); + + assert_eq!(filtered_block_txids.len(), reader_block_txids.len()); + assert_eq!(filtered_block_txids, reader_block_txids); + } + + println!("Checking full faucet data"); + let reader_faucet_tx_locations = db_reader + .addr_tx_locations_by_range(faucet_addr_script, start, end) + .await + .unwrap() + .unwrap(); + let mut reader_faucet_txids = Vec::new(); + for tx_location in reader_faucet_tx_locations { + let txid = db_reader.get_txid(tx_location).await.unwrap(); + reader_faucet_txids.push(txid.to_string()); + } + + assert_eq!(faucet.txids.len(), reader_faucet_txids.len()); + assert_eq!(faucet.txids, reader_faucet_txids); +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn get_recipient_txids() { + init_tracing(); + + let ( + TestVectorData { + blocks, recipient, .. + }, + _db_dir, + _zaino_db, + db_reader, + ) = load_vectors_v1db_and_reader().await; + + let start = Height(blocks.first().unwrap().height); + let end = Height(blocks.last().unwrap().height); + + let (_recipient_address, _txid, _output_index, recipient_script, _satoshis, _height) = + recipient.utxos.first().unwrap().into_parts(); + let recipient_addr_script = AddrScript::from_script(recipient_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + + parent_chain_work = *chain_block.index().chainwork(); + + println!("Checking recipient txids at height {height}"); + let block_height = Height(*height); + let block_txids: Vec = chain_block + .transactions() + .iter() + .map(|tx_data| tx_data.txid().to_string()) + .collect(); + + // Get block txids that are relevant to recipient. + let filtered_block_txids: Vec = block_txids + .into_iter() + .filter(|txid| recipient.txids.contains(txid)) + .collect(); + dbg!(&filtered_block_txids); + + let reader_recipient_tx_locations = match db_reader + .addr_tx_locations_by_range(recipient_addr_script, block_height, block_height) + .await + .unwrap() + { + Some(v) => v, + None => continue, + }; + let mut reader_block_txids = Vec::new(); + for tx_location in reader_recipient_tx_locations { + let txid = db_reader.get_txid(tx_location).await.unwrap(); + reader_block_txids.push(txid.to_string()); + } + dbg!(&reader_block_txids); + + assert_eq!(filtered_block_txids.len(), reader_block_txids.len()); + assert_eq!(filtered_block_txids, reader_block_txids); + } + + println!("Checking full faucet data"); + let reader_recipient_tx_locations = db_reader + .addr_tx_locations_by_range(recipient_addr_script, start, end) + .await + .unwrap() + .unwrap(); + + let mut reader_recipient_txids = Vec::new(); + for tx_location in reader_recipient_tx_locations { + let txid = db_reader.get_txid(tx_location).await.unwrap(); + reader_recipient_txids.push(txid.to_string()); + } + + assert_eq!(recipient.txids.len(), reader_recipient_txids.len()); + assert_eq!(recipient.txids, reader_recipient_txids); +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn get_faucet_utxos() { + init_tracing(); + + let (TestVectorData { blocks, faucet, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let start = Height(blocks.first().unwrap().height); + let end = Height(blocks.last().unwrap().height); + + let (_faucet_address, _txid, _output_index, faucet_script, _satoshis, _height) = + faucet.utxos.first().unwrap().into_parts(); + let faucet_addr_script = AddrScript::from_script(faucet_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let mut cleaned_utxos = Vec::new(); + for utxo in faucet.utxos.iter() { + let (_faucet_address, txid, output_index, _faucet_script, satoshis, _height) = + utxo.into_parts(); + cleaned_utxos.push((txid.to_string(), output_index.index(), satoshis)); + } + + let reader_faucet_utxo_indexes = db_reader + .addr_utxos_by_range(faucet_addr_script, start, end) + .await + .unwrap() + .unwrap(); + + let mut reader_faucet_utxos = Vec::new(); + + for (tx_location, vout, value) in reader_faucet_utxo_indexes { + let txid = db_reader.get_txid(tx_location).await.unwrap().to_string(); + reader_faucet_utxos.push((txid, vout as u32, value)); + } + + assert_eq!(cleaned_utxos.len(), reader_faucet_utxos.len()); + assert_eq!(cleaned_utxos, reader_faucet_utxos); +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn get_recipient_utxos() { + init_tracing(); + + let ( + TestVectorData { + blocks, recipient, .. + }, + _db_dir, + _zaino_db, + db_reader, + ) = load_vectors_v1db_and_reader().await; + + let start = Height(blocks.first().unwrap().height); + let end = Height(blocks.last().unwrap().height); + + let (_recipient_address, _txid, _output_index, recipient_script, _satoshis, _height) = + recipient.utxos.first().unwrap().into_parts(); + let recipient_addr_script = AddrScript::from_script(recipient_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let mut cleaned_utxos = Vec::new(); + for utxo in recipient.utxos.iter() { + let (_recipient_address, txid, output_index, _recipient_script, satoshis, _height) = + utxo.into_parts(); + cleaned_utxos.push((txid.to_string(), output_index.index(), satoshis)); + } + + let reader_recipient_utxo_indexes = db_reader + .addr_utxos_by_range(recipient_addr_script, start, end) + .await + .unwrap() + .unwrap(); + + let mut reader_recipient_utxos = Vec::new(); + + for (tx_location, vout, value) in reader_recipient_utxo_indexes { + let txid = db_reader.get_txid(tx_location).await.unwrap().to_string(); + reader_recipient_utxos.push((txid, vout as u32, value)); + } + + assert_eq!(cleaned_utxos.len(), reader_recipient_utxos.len()); + assert_eq!(cleaned_utxos, reader_recipient_utxos); +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn get_balance() { + init_tracing(); + + let (test_vector_data, _db_dir, _zaino_db, db_reader) = load_vectors_v1db_and_reader().await; + + let start = Height(test_vector_data.blocks.first().unwrap().height); + let end = Height(test_vector_data.blocks.last().unwrap().height); + + // Check faucet + + let (_faucet_address, _txid, _output_index, faucet_script, _satoshis, _height) = + test_vector_data.faucet.utxos.first().unwrap().into_parts(); + let faucet_addr_script = AddrScript::from_script(faucet_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let reader_faucet_balance = dbg!(db_reader + .addr_balance_by_range(faucet_addr_script, start, end) + .await + .unwrap()) as u64; + + assert_eq!(test_vector_data.faucet.balance, reader_faucet_balance); + + // Check recipient + + let (_recipient_address, _txid, _output_index, recipient_script, _satoshis, _height) = + test_vector_data + .recipient + .utxos + .first() + .unwrap() + .into_parts(); + let recipient_addr_script = AddrScript::from_script(recipient_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + let reader_recipient_balance = dbg!(db_reader + .addr_balance_by_range(recipient_addr_script, start, end) + .await + .unwrap()) as u64; + + assert_eq!(test_vector_data.recipient.balance, reader_recipient_balance); +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn check_faucet_spent_map() { + init_tracing(); + + let (TestVectorData { blocks, faucet, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let (_faucet_address, _txid, _output_index, faucet_script, _satoshis, _height) = + faucet.utxos.first().unwrap().into_parts(); + let faucet_addr_script = AddrScript::from_script(faucet_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + // collect faucet outpoints + let mut faucet_outpoints = Vec::new(); + let mut faucet_ouptpoints_spent_status = Vec::new(); + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + + parent_chain_work = *chain_block.index().chainwork(); + + for tx in chain_block.transactions() { + let txid = tx.txid().0; + let outputs = tx.transparent().outputs(); + for (vout_idx, output) in outputs.iter().enumerate() { + if output.script_hash() == faucet_addr_script.hash() { + let outpoint = Outpoint::new(txid, vout_idx as u32); + + let spender = db_reader.get_outpoint_spender(outpoint).await.unwrap(); + + faucet_outpoints.push(outpoint); + faucet_ouptpoints_spent_status.push(spender); + } + } + } + } + + // collect faucet txids holding utxos + let mut faucet_utxo_indexes = Vec::new(); + for utxo in faucet.utxos.iter() { + let (_faucet_address, txid, output_index, _faucet_script, _satoshis, _height) = + utxo.into_parts(); + faucet_utxo_indexes.push((txid.to_string(), output_index.index())); + } + + // check full spent outpoints map + let faucet_spent_map = db_reader + .get_outpoint_spenders(faucet_outpoints.clone()) + .await + .unwrap(); + assert_eq!(&faucet_ouptpoints_spent_status, &faucet_spent_map); + + for (outpoint, spender_option) in faucet_outpoints + .iter() + .zip(faucet_ouptpoints_spent_status.iter()) + { + let outpoint_tuple = ( + TransactionHash::from(*outpoint.prev_txid()).to_string(), + outpoint.prev_index(), + ); + match spender_option { + Some(spender_index) => { + let spender_tx = blocks.iter().find_map( + |TestVectorBlockData { + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + }| { + // NOTE: Currently using default here. + let parent_chain_work = ChainWork::from_u256(0.into()); + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zaino_common::Network::Regtest(ActivationHeights::default()) + .to_zebra_network(), + ); + let chain_block = + IndexedBlock::try_from(BlockWithMetadata::new(zebra_block, metadata)) + .unwrap(); + + chain_block + .transactions() + .iter() + .find(|tx| { + let (block_height, tx_idx) = + (spender_index.block_height(), spender_index.tx_index()); + chain_block.index().height() == Height(block_height) + && tx.index() == tx_idx as u64 + }) + .cloned() + }, + ); + assert!( + spender_tx.is_some(), + "Spender transaction not found in blocks!" + ); + + let spender_tx = spender_tx.unwrap(); + let matches = spender_tx.transparent().inputs().iter().any(|input| { + input.prevout_txid() == outpoint.prev_txid() + && input.prevout_index() == outpoint.prev_index() + }); + assert!( + matches, + "Spender transaction does not actually spend the outpoint: {outpoint:?}" + ); + + assert!( + !faucet_utxo_indexes.contains(&outpoint_tuple), + "Spent outpoint should NOT be in UTXO set, but found: {outpoint_tuple:?}" + ); + } + None => { + assert!( + faucet_utxo_indexes.contains(&outpoint_tuple), + "Unspent outpoint should be in UTXO set, but NOT found: {outpoint_tuple:?}" + ); + } + } + } +} + +#[cfg(feature = "transparent_address_history_experimental")] +#[tokio::test(flavor = "multi_thread")] +async fn check_recipient_spent_map() { + init_tracing(); + + let ( + TestVectorData { + blocks, recipient, .. + }, + _db_dir, + _zaino_db, + db_reader, + ) = load_vectors_v1db_and_reader().await; + + let (_recipient_address, _txid, _output_index, recipient_script, _satoshis, _height) = + recipient.utxos.first().unwrap().into_parts(); + let recipient_addr_script = AddrScript::from_script(recipient_script.as_raw_bytes()) + .expect("faucet script must be standard P2PKH or P2SH"); + + // collect faucet outpoints + let mut recipient_outpoints = Vec::new(); + let mut recipient_ouptpoints_spent_status = Vec::new(); + + let mut parent_chain_work = ChainWork::from_u256(0.into()); + + for TestVectorBlockData { + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in blocks.iter() + { + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + + parent_chain_work = *chain_block.index().chainwork(); + + for tx in chain_block.transactions() { + let txid = tx.txid().0; + let outputs = tx.transparent().outputs(); + for (vout_idx, output) in outputs.iter().enumerate() { + if output.script_hash() == recipient_addr_script.hash() { + let outpoint = Outpoint::new(txid, vout_idx as u32); + + let spender = db_reader.get_outpoint_spender(outpoint).await.unwrap(); + + recipient_outpoints.push(outpoint); + recipient_ouptpoints_spent_status.push(spender); + } + } + } + } + + // collect faucet txids holding utxos + let mut recipient_utxo_indexes = Vec::new(); + for utxo in recipient.utxos.iter() { + let (_recipient_address, txid, output_index, _recipient_script, _satoshis, _height) = + utxo.into_parts(); + recipient_utxo_indexes.push((txid.to_string(), output_index.index())); + } + + // check full spent outpoints map + let recipient_spent_map = db_reader + .get_outpoint_spenders(recipient_outpoints.clone()) + .await + .unwrap(); + assert_eq!(&recipient_ouptpoints_spent_status, &recipient_spent_map); + + for (outpoint, spender_option) in recipient_outpoints + .iter() + .zip(recipient_ouptpoints_spent_status.iter()) + { + let outpoint_tuple = ( + TransactionHash::from(*outpoint.prev_txid()).to_string(), + outpoint.prev_index(), + ); + match spender_option { + Some(spender_index) => { + let spender_tx = blocks.iter().find_map( + |TestVectorBlockData { + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + }| { + // NOTE: Currently using default here. + let parent_chain_work = ChainWork::from_u256(0.into()); + let metadata = BlockMetadata::new( + *sapling_root, + *sapling_tree_size as u32, + *orchard_root, + *orchard_tree_size as u32, + parent_chain_work, + zaino_common::Network::Regtest(ActivationHeights::default()) + .to_zebra_network(), + ); + let chain_block = + IndexedBlock::try_from(BlockWithMetadata::new(zebra_block, metadata)) + .unwrap(); + + chain_block + .transactions() + .iter() + .find(|tx| { + let (block_height, tx_idx) = + (spender_index.block_height(), spender_index.tx_index()); + chain_block.index().height() == Height(block_height) + && tx.index() == tx_idx as u64 + }) + .cloned() + }, + ); + assert!( + spender_tx.is_some(), + "Spender transaction not found in blocks!" + ); + + let spender_tx = spender_tx.unwrap(); + let matches = spender_tx.transparent().inputs().iter().any(|input| { + input.prevout_txid() == outpoint.prev_txid() + && input.prevout_index() == outpoint.prev_index() + }); + assert!( + matches, + "Spender transaction does not actually spend the outpoint: {outpoint:?}" + ); + + assert!( + !recipient_utxo_indexes.contains(&outpoint_tuple), + "Spent outpoint should NOT be in UTXO set, but found: {outpoint_tuple:?}" + ); + } + None => { + assert!( + recipient_utxo_indexes.contains(&outpoint_tuple), + "Unspent outpoint should be in UTXO set, but NOT found: {outpoint_tuple:?}" + ); + } + } + } +} diff --git a/zaino-state/src/chain_index/tests/mempool.rs b/zaino-state/src/chain_index/tests/mempool.rs new file mode 100644 index 000000000..347ba0e89 --- /dev/null +++ b/zaino-state/src/chain_index/tests/mempool.rs @@ -0,0 +1,335 @@ +//! Zaino-State ChainIndex Mempool unit tests. + +use std::{collections::HashMap, io::Cursor, str::FromStr as _}; +use tokio::time::{sleep, timeout, Duration}; +use zebra_chain::serialization::ZcashDeserialize as _; + +use crate::{ + chain_index::{ + mempool::MempoolSubscriber, + source::test::MockchainSource, + tests::vectors::{build_active_mockchain_source, load_test_vectors, TestVectorBlockData}, + }, + Mempool, MempoolKey, MempoolValue, +}; + +async fn spawn_mempool_and_mockchain() -> ( + Mempool, + MempoolSubscriber, + MockchainSource, + Vec, +) { + let blocks = load_test_vectors().unwrap().blocks; + + let mockchain = build_active_mockchain_source(0, blocks.clone()); + + let mempool = Mempool::spawn(mockchain.clone(), None).await.unwrap(); + + let subscriber = mempool.subscriber(); + + let block_data = blocks + .iter() + .map(|TestVectorBlockData { zebra_block, .. }| zebra_block.clone()) + .collect(); + + (mempool, subscriber, mockchain, block_data) +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_mempool() { + let (_mempool, subscriber, mockchain, block_data) = spawn_mempool_and_mockchain().await; + + let mut active_chain_height = dbg!(mockchain.active_height()); + assert_eq!(active_chain_height, 0); + let max_chain_height = mockchain.max_chain_height(); + + for _ in 0..=max_chain_height { + let mempool_index = (active_chain_height as usize) + 1; + let mempool_transactions = block_data + .get(mempool_index) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let subscriber_tx = subscriber.get_mempool().await; + + for transaction in mempool_transactions.into_iter() { + let transaction_hash = dbg!(transaction.hash()); + + let (subscriber_tx_hash, subscriber_tx) = subscriber_tx + .iter() + .find(|(k, _)| k.txid == transaction_hash.to_string()) + .map( + |(MempoolKey { txid: s }, MempoolValue { serialized_tx: tx })| { + ( + zebra_chain::transaction::Hash::from_str(s).unwrap(), + tx.clone(), + ) + }, + ) + .unwrap(); + + let subscriber_transaction = zebra_chain::transaction::Transaction::zcash_deserialize( + Cursor::new(subscriber_tx.as_ref()), + ) + .unwrap(); + + assert_eq!(transaction_hash, subscriber_tx_hash); + assert_eq!(*transaction, subscriber_transaction); + } + + if active_chain_height < max_chain_height { + mockchain.mine_blocks(10); + active_chain_height = dbg!(mockchain.active_height()); + + sleep(Duration::from_millis(2000)).await; + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_filtered_mempool() { + let (_mempool, subscriber, mockchain, block_data) = spawn_mempool_and_mockchain().await; + + mockchain.mine_blocks(150); + let active_chain_height = mockchain.active_height(); + + sleep(Duration::from_millis(2000)).await; + + let mempool_index = (active_chain_height as usize) + 1; + let mempool_transactions = block_data + .get(mempool_index) + .map(|b| b.transactions.clone()) + .unwrap_or_default(); + + let exclude_hash = mempool_transactions[0].hash(); + + let subscriber_tx = subscriber + .get_filtered_mempool(vec![exclude_hash.to_string()]) + .await; + + println!("Checking transactions.."); + + for transaction in mempool_transactions.into_iter() { + let transaction_hash = transaction.hash(); + if transaction_hash == exclude_hash { + // check tx is *not* in mempool transactions + let maybe_subscriber_tx = subscriber_tx + .iter() + .find(|(k, _)| k.txid == transaction_hash.to_string()) + .map( + |(MempoolKey { txid: s }, MempoolValue { serialized_tx: tx })| { + ( + zebra_chain::transaction::Hash::from_str(s).unwrap(), + tx.clone(), + ) + }, + ); + + assert!(maybe_subscriber_tx.is_none()); + } else { + let (subscriber_tx_hash, subscriber_tx) = subscriber_tx + .iter() + .find(|(k, _)| k.txid == transaction_hash.to_string()) + .map( + |(MempoolKey { txid: s }, MempoolValue { serialized_tx: tx })| { + ( + zebra_chain::transaction::Hash::from_str(s).unwrap(), + tx.clone(), + ) + }, + ) + .unwrap(); + + let subscriber_transaction = zebra_chain::transaction::Transaction::zcash_deserialize( + Cursor::new(subscriber_tx.as_ref()), + ) + .unwrap(); + + assert_eq!(transaction_hash, subscriber_tx_hash); + assert_eq!(*transaction, subscriber_transaction); + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_mempool_transaction() { + let (_mempool, subscriber, mockchain, block_data) = spawn_mempool_and_mockchain().await; + + mockchain.mine_blocks(150); + let active_chain_height = dbg!(mockchain.active_height()); + + sleep(Duration::from_millis(2000)).await; + + let mempool_index = (active_chain_height as usize) + 1; + + let mempool_transactions: Vec<_> = block_data + .get(mempool_index) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let target_transaction = mempool_transactions + .first() + .expect("expected at least one non-coinbase mempool transaction"); + let target_hash = target_transaction.hash(); + + let subscriber_tx = subscriber + .get_transaction(&MempoolKey { + txid: target_hash.to_string(), + }) + .await + .unwrap() + .serialized_tx + .clone(); + + let subscriber_transaction = zebra_chain::transaction::Transaction::zcash_deserialize( + Cursor::new(subscriber_tx.as_ref()), + ) + .unwrap(); + + assert_eq!(*mempool_transactions[0], subscriber_transaction); +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_mempool_info() { + let (_mempool, subscriber, mockchain, block_data) = spawn_mempool_and_mockchain().await; + + mockchain.mine_blocks(150); + let active_chain_height = dbg!(mockchain.active_height()); + + sleep(Duration::from_millis(2000)).await; + + let mempool_index = (active_chain_height as usize) + 1; + + // 1) Take the “next block” as a mempool proxy, but: + // - exclude coinbase + // - dedupe by txid (mempool is keyed by txid) + let mut seen = std::collections::HashSet::new(); + let mempool_transactions: Vec<_> = block_data + .get(mempool_index) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .filter(|tx| seen.insert(tx.hash())) // returns true only on first insert + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let subscriber_mempool_info = subscriber.get_mempool_info().await; + + let expected_size: u64 = mempool_transactions.len() as u64; + + let expected_bytes: u64 = mempool_transactions + .iter() + .map(|tx| { + // Mempool stores SerializedTransaction, so mirror that here. + let st: zebra_chain::transaction::SerializedTransaction = tx.as_ref().into(); + st.as_ref().len() as u64 + }) + .sum(); + + let expected_key_heap_bytes: u64 = mempool_transactions + .iter() + .map(|tx| { + // Keys are hex txid strings; measure heap capacity like the implementation. + tx.hash().to_string().capacity() as u64 + }) + .sum(); + + let expected_usage: u64 = expected_bytes + expected_key_heap_bytes; + + assert_eq!(subscriber_mempool_info.size, expected_size, "size mismatch"); + assert_eq!( + subscriber_mempool_info.bytes, expected_bytes, + "bytes mismatch" + ); + assert_eq!( + subscriber_mempool_info.usage, expected_usage, + "usage mismatch" + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_mempool_stream() { + let (_mempool, subscriber, mockchain, block_data) = spawn_mempool_and_mockchain().await; + let mut subscriber = subscriber; + + mockchain.mine_blocks(150); + let active_chain_height = dbg!(mockchain.active_height()); + + sleep(Duration::from_millis(2000)).await; + + let mempool_index = (active_chain_height as usize) + 1; + + let mempool_transactions: Vec<_> = block_data + .get(mempool_index) + .map(|b| { + b.transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + let (mut rx, handle) = subscriber.get_mempool_stream(None).await.unwrap(); + + let expected_count = mempool_transactions.len(); + let mut received: HashMap> = HashMap::new(); + + let collect_deadline = Duration::from_secs(2); + + timeout(collect_deadline, async { + while received.len() < expected_count { + match rx.recv().await { + Some(Ok((MempoolKey { txid: k }, MempoolValue { serialized_tx: v }))) => { + received.insert(k, v.as_ref().as_ref().to_vec()); + } + Some(Err(e)) => panic!("stream yielded error: {e:?}"), + None => break, + } + } + }) + .await + .expect("timed out waiting for initial mempool stream items"); + + let expected: HashMap> = mempool_transactions + .iter() + .map(|tx| { + let key = tx.hash().to_string(); + let st: zebra_chain::transaction::SerializedTransaction = tx.as_ref().into(); + (key, st.as_ref().to_vec()) + }) + .collect(); + + assert_eq!(received.len(), expected.len(), "entry count mismatch"); + for (k, bytes) in expected.iter() { + let got = received + .get(k) + .unwrap_or_else(|| panic!("missing tx {k} in stream")); + assert_eq!(got, bytes, "bytes mismatch for {k}"); + } + + mockchain.mine_blocks(1); + + timeout(Duration::from_secs(5), async { + while let Some(_msg) = rx.recv().await {} + }) + .await + .expect("mempool stream did not close after mining a new block"); + + handle.await.unwrap(); +} diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs new file mode 100644 index 000000000..89d3f730e --- /dev/null +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -0,0 +1,789 @@ +use std::{sync::Arc, time::Duration}; + +use futures::stream::FuturesUnordered; +use proptest::{ + prelude::{Arbitrary as _, BoxedStrategy, Just}, + strategy::Strategy, +}; +use rand::seq::SliceRandom; +use tokio_stream::StreamExt as _; +use tonic::async_trait; +use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; +use zebra_chain::{ + block::arbitrary::{self, LedgerStateOverride}, + fmt::SummaryDebug, + serialization::ZcashSerialize, + transaction::SerializedTransaction, + LedgerState, +}; +use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; + +use crate::{ + chain_index::{ + source::{BlockchainSourceResult, GetTransactionLocation}, + tests::{init_tracing, proptest_blockgen::proptest_helpers::add_segment}, + types::BestChainLocation, + NonFinalizedSnapshot, + }, + BlockCacheConfig, BlockHash, BlockchainSource, ChainIndex, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, NonfinalizedBlockCacheSnapshot, TransactionHash, +}; + +/// Handle all the boilerplate for a passthrough +fn passthrough_test( + // The actual assertions. Takes as args: + test: impl AsyncFn( + // The mockchain, to use a a source of truth + &ProptestMockchain, + // The subscriber to test against + NodeBackedChainIndexSubscriber, + // A snapshot, which will have only the genesis block + Arc, + ), +) { + init_tracing(); + let network = Network::Regtest(ActivationHeights::default()); + // Long enough to have some finalized blocks to play with + let segment_length = 120; + // No need to worry about non-best chains for this test + let branch_count = 1; + + // from this line to `runtime.block_on(async {` are all + // copy-pasted. Could a macro get rid of some of this boilerplate? + proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(branch_count, segment_length, network))| { + let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); + runtime.block_on(async { + let (genesis_segment, branching_segments) = segments; + let mockchain = ProptestMockchain { + genesis_segment, + branching_segments, + // This number can be played with. We want to slow down + // sync enough to trigger passthrough without + // slowing down passthrough more than we need to + delay: Some(Duration::from_secs(1)), + }; + let temp_dir: tempfile::TempDir = tempfile::tempdir().unwrap(); + let db_path: std::path::PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network, + + }; + + let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) + .await + .unwrap(); + tokio::time::sleep(Duration::from_secs(5)).await; + let index_reader = indexer.subscriber(); + let snapshot = index_reader.snapshot_nonfinalized_state(); + // 101 instead of 100 as heights are 0-indexed + assert_eq!(snapshot.validator_finalized_height.0 as usize, (2 * segment_length) - 101); + assert_eq!(snapshot.best_tip.height.0, 0); + + + test(&mockchain, index_reader, snapshot).await; + + + + + }); + }) +} + +#[test] +fn passthrough_find_fork_point() { + // TODO: passthrough_test handles a good chunck of boilerplate, but there's + // still a lot more inside of the closures being passed to passthrough_test. + // Can we DRY out more of it? + passthrough_test(async |mockchain, index_reader, snapshot| { + // We use a futures-unordered instead of only a for loop + // as this lets us call all the get_raw_transaction requests + // at the same time and wait for them in parallel + // + // This allows the artificial delays to happen in parallel + let mut parallel = FuturesUnordered::new(); + // As we only have one branch, arbitrary branch order is fine + for (height, hash) in mockchain + .all_blocks_arb_branch_order() + .map(|block| (block.coinbase_height().unwrap(), block.hash())) + { + let index_reader = index_reader.clone(); + let snapshot = snapshot.clone(); + parallel.push(async move { + let fork_point = index_reader + .find_fork_point(&snapshot, &hash.into()) + .await + .unwrap(); + + if height <= snapshot.validator_finalized_height { + // passthrough fork point can only ever be the requested block + // as we don't passthrough to nonfinalized state + assert_eq!(hash, fork_point.unwrap().0); + assert_eq!(height, fork_point.unwrap().1); + } else { + assert!(fork_point.is_none()); + } + }) + } + while let Some(_success) = parallel.next().await {} + }); +} + +#[test] +fn passthrough_get_transaction_status() { + passthrough_test(async |mockchain, index_reader, snapshot| { + // We use a futures-unordered instead of only a for loop + // as this lets us call all the get_raw_transaction requests + // at the same time and wait for them in parallel + // + // This allows the artificial delays to happen in parallel + let mut parallel = FuturesUnordered::new(); + // As we only have one branch, arbitrary branch order is fine + for (height, txid) in mockchain + .all_blocks_arb_branch_order() + .map(|block| { + block + .transactions + .iter() + .map(|transaction| (block.coinbase_height().unwrap(), transaction.hash())) + .collect::>() + }) + .flatten() + { + let index_reader = index_reader.clone(); + let snapshot = snapshot.clone(); + parallel.push(async move { + let transaction_status = index_reader + .get_transaction_status(&snapshot, &txid.into()) + .await + .unwrap(); + + if height <= snapshot.validator_finalized_height { + // passthrough transaction status can only ever be on the best + // chain as we don't passthrough to nonfinalized state + let Some(BestChainLocation::Block(_block_hash, transaction_height)) = + transaction_status.0 + else { + panic!("expected best chain location") + }; + assert_eq!(height, transaction_height); + } else { + assert!(transaction_status.0.is_none()); + } + assert!(transaction_status.1.is_empty()); + }) + } + while let Some(_success) = parallel.next().await {} + }); +} + +#[test] +fn passthrough_get_raw_transaction() { + passthrough_test(async |mockchain, index_reader, snapshot| { + // We use a futures-unordered instead of only a for loop + // as this lets us call all the get_raw_transaction requests + // at the same time and wait for them in parallel + // + // This allows the artificial delays to happen in parallel + let mut parallel = FuturesUnordered::new(); + // As we only have one branch, arbitrary branch order is fine + for (expected_transaction, height) in mockchain + .all_blocks_arb_branch_order() + .map(|block| { + block + .transactions + .iter() + .map(|transaction| (transaction, block.coinbase_height().unwrap())) + .collect::>() + }) + .flatten() + { + let index_reader = index_reader.clone(); + let snapshot = snapshot.clone(); + parallel.push(async move { + let actual_transaction = index_reader + .get_raw_transaction( + &snapshot, + &TransactionHash::from(expected_transaction.hash()), + ) + .await + .unwrap(); + let Some((raw_transaction, _branch_id)) = actual_transaction else { + panic!("missing transaction at height {}", height.0) + }; + assert_eq!( + raw_transaction, + SerializedTransaction::from(expected_transaction.clone()).as_ref() + ) + }) + } + while let Some(_success) = parallel.next().await {} + }); +} + +#[test] +fn passthrough_best_chaintip() { + passthrough_test(async |mockchain, index_reader, snapshot| { + let tip = index_reader.best_chaintip(&snapshot).await.unwrap(); + assert_eq!( + tip.height.0, + mockchain + .best_branch() + .last() + .unwrap() + .coinbase_height() + .unwrap() + .0 + .saturating_sub(100) + ); + }) +} + +#[test] +fn passthrough_get_block_height() { + passthrough_test(async |mockchain, index_reader, snapshot| { + // We use a futures-unordered instead of only a for loop + // as this lets us call all the get_raw_transaction requests + // at the same time and wait for them in parallel + // + // This allows the artificial delays to happen in parallel + let mut parallel = FuturesUnordered::new(); + + for (expected_height, hash) in mockchain + .all_blocks_arb_branch_order() + .map(|block| (block.coinbase_height().unwrap(), block.hash())) + { + let index_reader = index_reader.clone(); + let snapshot = snapshot.clone(); + parallel.push(async move { + let height = index_reader + .get_block_height(&snapshot, hash.into()) + .await + .unwrap(); + if expected_height <= snapshot.validator_finalized_height { + assert_eq!(height, Some(expected_height.into())); + } else { + assert_eq!(height, None); + } + }); + } + while let Some(_success) = parallel.next().await {} + }) +} + +#[test] +fn passthrough_get_block_range() { + passthrough_test(async |mockchain, index_reader, snapshot| { + // We use a futures-unordered instead of only a for loop + // as this lets us call all the get_raw_transaction requests + // at the same time and wait for them in parallel + // + // This allows the artificial delays to happen in parallel + let mut parallel = FuturesUnordered::new(); + + for expected_start_height in mockchain + .all_blocks_arb_branch_order() + .map(|block| block.coinbase_height().unwrap()) + { + let expected_end_height = (expected_start_height + 9).unwrap(); + if expected_end_height.0 as usize <= mockchain.all_blocks_arb_branch_order().count() { + let index_reader = index_reader.clone(); + let snapshot = snapshot.clone(); + parallel.push(async move { + let block_range_stream = index_reader.get_block_range( + &snapshot, + expected_start_height.into(), + Some(expected_end_height.into()), + ); + if expected_start_height <= snapshot.validator_finalized_height { + let mut block_range_stream = Box::pin(block_range_stream.unwrap()); + let mut num_blocks_in_stream = 0; + while let Some(block) = block_range_stream.next().await { + let expected_block = mockchain + .all_blocks_arb_branch_order() + .nth(expected_start_height.0 as usize + num_blocks_in_stream) + .unwrap() + .zcash_serialize_to_vec() + .unwrap(); + assert_eq!(block.unwrap(), expected_block); + num_blocks_in_stream += 1; + } + assert_eq!( + num_blocks_in_stream, + // expect 10 blocks + 10.min( + // unless the provided range overlaps the finalized boundary. + // in that case, expect all blocks between start height + // and finalized height, (+1 for inclusive range) + snapshot + .validator_finalized_height + .0 + .saturating_sub(expected_start_height.0) + + 1 + ) as usize + ); + } else { + assert!(block_range_stream.is_none()) + } + }); + } + } + while let Some(_success) = parallel.next().await {} + }) +} + +#[test] +fn make_chain() { + init_tracing(); + let network = Network::Regtest(ActivationHeights::default()); + let segment_length = 12; + + let branch_count = 2; + + // default is 256. As each case takes multiple seconds, this seems too many. + // TODO: this should be higher than 1. Currently set to 1 for ease of iteration + proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(branch_count, segment_length, network))| { + let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); + runtime.block_on(async { + let (genesis_segment, branching_segments) = segments; + let mockchain = ProptestMockchain { + genesis_segment, + branching_segments, + delay: None + }; + let temp_dir: tempfile::TempDir = tempfile::tempdir().unwrap(); + let db_path: std::path::PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network, + + }; + + let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) + .await + .unwrap(); + tokio::time::sleep(Duration::from_secs(5)).await; + let index_reader = indexer.subscriber(); + let snapshot = index_reader.snapshot_nonfinalized_state(); + let best_tip_hash = snapshot.best_tip.blockhash; + let best_tip_block = snapshot + .get_chainblock_by_hash(&best_tip_hash) + .unwrap(); + for (hash, block) in &snapshot.blocks { + if hash != &best_tip_hash { + assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); + if snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) { + assert_eq!(index_reader.find_fork_point(&snapshot, hash).await.unwrap().unwrap().0, *hash); + } else { + assert_ne!(index_reader.find_fork_point(&snapshot, hash).await.unwrap().unwrap().0, *hash); + } + } + } + assert_eq!(snapshot.heights_to_hashes.len(), (segment_length * 2) ); + assert_eq!( + snapshot.blocks.len(), + segment_length * (branch_count + 1) + ); + }); + }); +} + +#[derive(Clone)] +struct ProptestMockchain { + genesis_segment: ChainSegment, + branching_segments: Vec, + delay: Option, +} + +impl ProptestMockchain { + fn best_branch(&self) -> SummaryDebug>> { + let mut best_branch_and_work = None; + for branch in self.branching_segments.clone() { + let branch_chainwork: u128 = branch + .iter() + .map(|block| { + block + .header + .difficulty_threshold + .to_work() + .unwrap() + .as_u128() + }) + .sum(); + match best_branch_and_work { + Some((ref _b, w)) => { + if w < branch_chainwork { + best_branch_and_work = Some((branch, branch_chainwork)) + } + } + None => best_branch_and_work = Some((branch, branch_chainwork)), + } + } + let mut combined = self.genesis_segment.clone(); + combined.append(&mut best_branch_and_work.unwrap().0.clone()); + combined + } + + fn all_blocks_arb_branch_order(&self) -> impl Iterator> { + self.genesis_segment.iter().chain( + self.branching_segments + .iter() + .flat_map(|branch| branch.iter()), + ) + } + + fn get_block_and_all_preceeding( + &self, + // This probably doesn't need to allow FnMut closures (Fn should suffice) + // but there's no cost to allowing it + mut block_identifier: impl FnMut(&zebra_chain::block::Block) -> bool, + ) -> std::option::Option>> { + let mut blocks = Vec::new(); + for block in self.genesis_segment.iter() { + blocks.push(block); + if block_identifier(block) { + return Some(blocks); + } + } + for branch in self.branching_segments.iter() { + let mut branch_blocks = Vec::new(); + for block in branch.iter() { + branch_blocks.push(block); + if block_identifier(block) { + blocks.extend_from_slice(&branch_blocks); + return Some(blocks); + } + } + } + + None + } +} + +#[async_trait] +impl BlockchainSource for ProptestMockchain { + /// Returns the block by hash or height + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>> { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + match id { + HashOrHeight::Hash(hash) => { + let matches_hash = |block: &&Arc| block.hash() == hash; + Ok(self + .genesis_segment + .iter() + .find(matches_hash) + .or_else(|| { + self.branching_segments + .iter() + .flat_map(|vec| vec.iter()) + .find(matches_hash) + }) + .cloned()) + } + // This implementation selects a block from a random branch instead + // of the best branch. This is intended to simulate reorgs + HashOrHeight::Height(height) => Ok(self + .genesis_segment + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + .or_else(|| { + self.branching_segments + .choose(&mut rand::thread_rng()) + .unwrap() + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + })), + } + } + + /// Returns the block commitment tree data by hash + async fn get_commitment_tree_roots( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )> { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + let Some(chain_up_to_block) = + self.get_block_and_all_preceeding(|block| block.hash().0 == id.0) + else { + return Ok((None, None)); + }; + + let (sapling, orchard) = + chain_up_to_block + .iter() + .fold((None, None), |(mut sapling, mut orchard), block| { + for transaction in &block.transactions { + for sap_commitment in transaction.sapling_note_commitments() { + let sap_commitment = + sapling_crypto::Node::from_bytes(sap_commitment.to_bytes()) + .unwrap(); + + sapling = Some(sapling.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + sapling = sapling.map(|mut tree| { + tree.append(sap_commitment); + tree + }); + } + for orc_commitment in transaction.orchard_note_commitments() { + let orc_commitment = + zebra_chain::orchard::tree::Node::from(*orc_commitment); + + orchard = Some(orchard.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + orchard = orchard.map(|mut tree| { + tree.append(orc_commitment); + tree + }); + } + } + (sapling, orchard) + }); + Ok(( + sapling.map(|sap_front| { + ( + zebra_chain::sapling::tree::Root::from_bytes(sap_front.root().to_bytes()), + sap_front.tree_size(), + ) + }), + orchard.map(|orc_front| { + ( + zebra_chain::orchard::tree::Root::from_bytes(orc_front.root().as_bytes()), + orc_front.tree_size(), + ) + }), + )) + } + + /// Returns the sapling and orchard treestate by hash + async fn get_treestate( + &self, + _id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)> { + // I don't think this is used for sync? + unimplemented!() + } + + /// Returns the complete list of txids currently in the mempool. + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>> { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + Ok(Some(Vec::new())) + } + + /// Returns the transaction by txid + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult< + Option<( + Arc, + GetTransactionLocation, + )>, + > { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + Ok(self.all_blocks_arb_branch_order().find_map(|block| { + block + .transactions + .iter() + .find(|transaction| transaction.hash() == txid.into()) + .cloned() + .zip(Some(if self.best_branch().contains(block) { + GetTransactionLocation::BestChain(block.coinbase_height().unwrap()) + } else { + GetTransactionLocation::NonbestChain + })) + })) + } + + /// Returns the hash of the block at the tip of the best chain. + async fn get_best_block_hash( + &self, + ) -> BlockchainSourceResult> { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + Ok(Some(self.best_branch().last().unwrap().hash())) + } + + /// Returns the hash of the block at the tip of the best chain. + async fn get_best_block_height( + &self, + ) -> BlockchainSourceResult> { + if let Some(delay) = self.delay { + tokio::time::sleep(delay).await; + } + Ok(Some( + self.best_branch() + .last() + .unwrap() + .coinbase_height() + .unwrap(), + )) + } + + /// Get a listener for new nonfinalized blocks, + /// if supported + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + Box, + > { + let (sender, receiver) = tokio::sync::mpsc::channel(1_000); + let self_clone = self.clone(); + tokio::task::spawn(async move { + for block in self_clone.all_blocks_arb_branch_order() { + sender.send((block.hash(), block.clone())).await.unwrap() + } + // don't drop the sender + std::mem::forget(sender); + }) + .await + .unwrap(); + Ok(Some(receiver)) + } +} + +type ChainSegment = SummaryDebug>>; + +fn make_branching_chain( + // The number of separate branches, after the branching point at the tip + // of the initial segment. + num_branches: usize, + // The length of the initial segment, and of the branches + // TODO: it would be useful to allow branches of different lengths. + chain_size: usize, + network_override: Network, +) -> BoxedStrategy<(ChainSegment, Vec)> { + let network_override = Some(network_override.to_zebra_network()); + add_segment( + SummaryDebug(Vec::new()), + network_override.clone(), + chain_size, + ) + .prop_flat_map(move |segment| { + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: None, + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + network_override: network_override.clone(), + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + std::iter::repeat_with(|| { + zebra_chain::block::Block::partial_chain_strategy( + ledger.clone(), + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ) + }) + .take(num_branches) + .collect::>(), + ) + }) + .boxed() +} + +mod proptest_helpers { + + use proptest::prelude::{Arbitrary, BoxedStrategy, Strategy}; + use zebra_chain::{ + block::{ + arbitrary::{allow_all_transparent_coinbase_spends, LedgerStateOverride}, + Block, Height, + }, + parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, + LedgerState, + }; + + use super::ChainSegment; + + pub(super) fn add_segment( + previous_chain: ChainSegment, + network_override: Option, + segment_length: usize, + ) -> BoxedStrategy { + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: Some( + previous_chain + .last() + .map(|block| (block.coinbase_height().unwrap() + 1).unwrap()) + .unwrap_or(Height(0)), + ), + previous_block_hash_override: Some( + previous_chain + .last() + .map(|block| block.hash()) + .unwrap_or(GENESIS_PREVIOUS_BLOCK_HASH), + ), + network_upgrade_override: None, + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + network_override, + }) + .prop_flat_map(move |ledger| { + Block::partial_chain_strategy( + ledger, + segment_length, + allow_all_transparent_coinbase_spends, + true, + ) + }) + .prop_map(move |new_segment| { + let mut full_chain = previous_chain.clone(); + full_chain.extend_from_slice(&new_segment); + full_chain + }) + .boxed() + } +} diff --git a/zaino-state/src/chain_index/tests/vectors.rs b/zaino-state/src/chain_index/tests/vectors.rs new file mode 100644 index 000000000..eaf3daa67 --- /dev/null +++ b/zaino-state/src/chain_index/tests/vectors.rs @@ -0,0 +1,347 @@ +//! Test vector creation and validity tests, MockchainSource creation. + +use core2::io::{self, Read}; +use std::io::BufReader; +use std::path::Path; +use std::sync::Arc; +use std::{fs::File, path::PathBuf}; +use zebra_chain::serialization::ZcashDeserialize as _; + +use zebra_rpc::methods::GetAddressUtxos; + +use crate::chain_index::source::test::MockchainSource; +use crate::{ + read_u32_le, read_u64_le, BlockHash, BlockMetadata, BlockWithMetadata, ChainWork, CompactSize, + IndexedBlock, +}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TestVectorData { + pub blocks: Vec, + pub faucet: TestVectorClientData, + pub recipient: TestVectorClientData, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TestVectorBlockData { + pub height: u32, + pub zebra_block: zebra_chain::block::Block, + pub sapling_root: zebra_chain::sapling::tree::Root, + pub sapling_tree_size: u64, + pub sapling_tree_state: Vec, + pub orchard_root: zebra_chain::orchard::tree::Root, + pub orchard_tree_size: u64, + pub orchard_tree_state: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TestVectorClientData { + pub txids: Vec, + pub utxos: Vec, + pub balance: u64, +} + +pub async fn sync_db_with_blockdata( + db: &impl crate::chain_index::finalised_state::capability::DbWrite, + vector_data: Vec, + height_limit: Option, +) { + let mut parent_chain_work = ChainWork::from_u256(0.into()); + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + orchard_root, + orchard_tree_size, + .. + } in vector_data + { + if let Some(h) = height_limit { + if height > h { + break; + } + } + let metadata = BlockMetadata::new( + sapling_root, + sapling_tree_size as u32, + orchard_root, + orchard_tree_size as u32, + parent_chain_work, + zebra_chain::parameters::Network::new_regtest( + zebra_chain::parameters::testnet::ConfiguredActivationHeights { + before_overwinter: Some(1), + overwinter: Some(1), + sapling: Some(1), + blossom: Some(1), + heartwood: Some(1), + canopy: Some(1), + nu5: Some(1), + nu6: Some(1), + // see https://zips.z.cash/#nu6-1-candidate-zips for info on NU6.1 + nu6_1: None, + nu7: None, + } + .into(), + ), + ); + + let block_with_metadata = BlockWithMetadata::new(&zebra_block, metadata); + let chain_block = IndexedBlock::try_from(block_with_metadata).unwrap(); + parent_chain_work = *chain_block.chainwork(); + + db.write_block(chain_block).await.unwrap(); + } +} + +// TODO: Add custom MockChain block data structs to simplify unit test interface +// and add getter methods for comonly used types. +pub fn read_vectors_from_file>(base_dir: P) -> io::Result { + let base = base_dir.as_ref(); + + // zebra_blocks.dat + let mut zebra_blocks = Vec::<(u32, zebra_chain::block::Block)>::new(); + { + let mut r = BufReader::new(File::open(base.join("zcash_blocks.dat"))?); + loop { + let height = match read_u32_le(&mut r) { + Ok(h) => h, + Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, + Err(e) => return Err(e), + }; + + let len: usize = CompactSize::read_t(&mut r)?; + let mut buf = vec![0u8; len]; + r.read_exact(&mut buf)?; + + let zcash_block = zebra_chain::block::Block::zcash_deserialize(&*buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + zebra_blocks.push((height, zcash_block)); + } + } + + // tree_roots.dat + let mut blocks_and_roots = Vec::with_capacity(zebra_blocks.len()); + { + let mut r = BufReader::new(File::open(base.join("tree_roots.dat"))?); + for (height, zebra_block) in zebra_blocks { + let h2 = read_u32_le(&mut r)?; + if height != h2 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "height mismatch in tree_roots.dat", + )); + } + let mut sapling_bytes = [0u8; 32]; + r.read_exact(&mut sapling_bytes)?; + let sapling_root = zebra_chain::sapling::tree::Root::try_from(sapling_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let sapling_size = read_u64_le(&mut r)?; + + let mut orchard_bytes = [0u8; 32]; + r.read_exact(&mut orchard_bytes)?; + let orchard_root = zebra_chain::orchard::tree::Root::try_from(orchard_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let orchard_size = read_u64_le(&mut r)?; + + blocks_and_roots.push(( + height, + zebra_block, + (sapling_root, sapling_size, orchard_root, orchard_size), + )); + } + } + + // tree_states.dat + let mut blocks = Vec::with_capacity(blocks_and_roots.len()); + { + let mut r = BufReader::new(File::open(base.join("tree_states.dat"))?); + for ( + height, + zebra_block, + (sapling_root, sapling_tree_size, orchard_root, orchard_tree_size), + ) in blocks_and_roots + { + let h2 = read_u32_le(&mut r)?; + if height != h2 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "height mismatch in tree_states.dat", + )); + } + + let sapling_len: usize = CompactSize::read_t(&mut r)?; + let mut sapling_tree_state = vec![0u8; sapling_len]; + r.read_exact(&mut sapling_tree_state)?; + + let orchard_len: usize = CompactSize::read_t(&mut r)?; + let mut orchard_tree_state = vec![0u8; orchard_len]; + r.read_exact(&mut orchard_tree_state)?; + + blocks.push(TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + sapling_tree_state, + orchard_root, + orchard_tree_size, + orchard_tree_state, + }); + } + } + + // faucet_data.json + let faucet = { + let (txids, utxos, balance) = + serde_json::from_reader(File::open(base.join("faucet_data.json"))?) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + TestVectorClientData { + txids, + utxos, + balance, + } + }; + + // recipient_data.json + let recipient = { + let (txids, utxos, balance) = + serde_json::from_reader(File::open(base.join("recipient_data.json"))?) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + TestVectorClientData { + txids, + utxos, + balance, + } + }; + + Ok(TestVectorData { + blocks, + faucet, + recipient, + }) +} + +// TODO: Remove IndexedBlocks and Compact blocks as they are no longer used, +// `zebra_chain::block::block`s are used as the single source of block data. +// +// TODO: Create seperate load methods for block_data and transparent_wallet_data. +#[allow(clippy::type_complexity)] +pub(crate) fn load_test_vectors() -> io::Result { + // /zaino-state/src/chain_index/tests/vectors + let base_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("src") + .join("chain_index") + .join("tests") + .join("vectors"); + read_vectors_from_file(&base_dir) +} + +#[allow(clippy::type_complexity)] +pub(crate) fn build_mockchain_source( + // the input data for this function could be reduced for wider use + // but is more simple to pass all test block data here. + blockchain_data: Vec, +) -> MockchainSource { + let (mut heights, mut zebra_blocks, mut block_roots, mut block_hashes, mut block_treestates) = + (Vec::new(), Vec::new(), Vec::new(), Vec::new(), Vec::new()); + + for block in blockchain_data.clone() { + heights.push(block.height); + block_hashes.push(BlockHash::from(block.zebra_block.hash())); + zebra_blocks.push(Arc::new(block.zebra_block)); + + block_roots.push(( + Some((block.sapling_root, block.sapling_tree_size)), + Some((block.orchard_root, block.orchard_tree_size)), + )); + + block_treestates.push((block.sapling_tree_state, block.orchard_tree_state)); + } + + MockchainSource::new(zebra_blocks, block_roots, block_treestates, block_hashes) +} + +#[allow(clippy::type_complexity)] +pub(crate) fn build_active_mockchain_source( + loaded_chain_height: u32, + // the input data for this function could be reduced for wider use + // but is more simple to pass all test block data here. + blockchain_data: Vec, +) -> MockchainSource { + let (mut heights, mut zebra_blocks, mut block_roots, mut block_hashes, mut block_treestates) = + (Vec::new(), Vec::new(), Vec::new(), Vec::new(), Vec::new()); + + for TestVectorBlockData { + height, + zebra_block, + sapling_root, + sapling_tree_size, + sapling_tree_state, + orchard_root, + orchard_tree_size, + orchard_tree_state, + } in blockchain_data.clone() + { + heights.push(height); + block_hashes.push(BlockHash::from(zebra_block.hash())); + zebra_blocks.push(Arc::new(zebra_block)); + + block_roots.push(( + Some((sapling_root, sapling_tree_size)), + Some((orchard_root, orchard_tree_size)), + )); + + block_treestates.push((sapling_tree_state, orchard_tree_state)); + } + + MockchainSource::new_with_active_height( + zebra_blocks, + block_roots, + block_treestates, + block_hashes, + loaded_chain_height, + ) +} + +// ***** Tests ***** + +#[tokio::test(flavor = "multi_thread")] +async fn vectors_can_be_loaded_and_deserialised() { + let TestVectorData { + blocks, + faucet, + recipient, + } = load_test_vectors().unwrap(); + + // Chech block data.. + assert!( + !blocks.is_empty(), + "expected at least one block in test-vectors" + ); + let mut expected_height: u32 = 0; + for TestVectorBlockData { height, .. } in &blocks { + // println!("Checking block at height {h}"); + + assert_eq!( + expected_height, *height, + "Chain height continuity check failed at height {height}" + ); + expected_height = *height + 1; + } + + // check taddrs. + + println!("\nFaucet UTXO address:"); + let (addr, _hash, _outindex, _script, _value, _height) = faucet.utxos[0].into_parts(); + println!("addr: {addr}"); + + println!("\nRecipient UTXO address:"); + let (addr, _hash, _outindex, _script, _value, _height) = recipient.utxos[0].into_parts(); + println!("addr: {addr}"); +} diff --git a/zaino-state/src/chain_index/tests/vectors/faucet_data.json b/zaino-state/src/chain_index/tests/vectors/faucet_data.json new file mode 100644 index 000000000..fcf3511da --- /dev/null +++ b/zaino-state/src/chain_index/tests/vectors/faucet_data.json @@ -0,0 +1,1211 @@ +[ + [ + "74a24d18167f1b0bb6205c511af465f6be584d694936ddb8348284be9659f4d7", + "63944c38c191ca318079e25f0f00e1a2adc2dc8f486702a875aa800a8a0592fa", + "ab1595f0ce4a41008f8d35d0ed4d1e1d78fcb802afe1f901b93730d78b22a13d", + "69aacfe630a7750134e44fdec22b242c04fe8803bb2cbfab9f503fa0029cd925", + "56a9237474c678bcc9e5495505bdd4587c9daadba52648081d79f41edd5ee1fd", + "e0e7cd8052c9691faf72ae35db3371b5f0e6cab8b23bdc3084b8742d468c3448", + "884e220856cc303d42a872777068704ac2138a720099ca7538e880e763c0d193", + "6445378586661bab00261fd7d8cc908ef047206ca363d2e84ed8dd9a3d64d066", + "cc4bf3694d20eb2118cf3d098bda72ce3ecec33dfd01f0ed1618488e935ab79e", + "b59f2fbfb95d6ba681f9695f64ce6f306bebd3e76f9ef4176659d65722718c9e", + "a2ab86d27b5737b8333072c886c31225979adeebc2a7472d3a1c653098009367", + "825981354bb2e8794288037cac89f6de78d4c21ab095b9df44d46e98d73b46b3", + "3f92dd2c2e90ca6c75b26290e53b30bdeeb8a170111861161404dd8b26bb8c79", + "d14f3dc459cd6328118325f9401bb26200138257c50207bbbfe1ce37a59c49e9", + "9d10e8514e6c3384354a86bc7517915c5073310df41197c23361601e63d26ea4", + "d53436703f659bd88a628867cce294dc331f37eb18a9f19f274870d2b17699bd", + "e100815f310c45030aab88a7a1b4cdd6823265609aa133329dbfd42189babc05", + "e8e96714b73b2816d6867613806061ac6366c3cd0646e093bc6079d9a8feabd5", + "b69d34dbc75615ebe550fd1dfcc7d33567dff59bb148330f4c74efbbdbfecfd7", + "5d6f5d49f214a2ff5ef1936d8b249d454305e1ffe9949d8f02fda6503bbe9611", + "1738df6bdcbe4a1f8ea2f5169555ba6f1d74597efa7cd12135f6c736d8478a2e", + "4ce6af5211eb0025f5d389ec5476ce6a78dbe9241449406cef4fd8bd2f705448", + "40a846e2eeda17a94b99712e4cc4a632b65687b6f9c9d8ea7c321ca4c8cef6b4", + "b580f2340a917a240412d4ee1a13ba52cc896e2614d71fa949a6779f03daff49", + "31c59b87e20a7d9eea7dd876b7127ae6c65ff2e6ff0b68ef6e6bf5600727919f", + "c36a9d4fb88476217981b22d2cd14368126a272b7e0a9f03ec724ace4dcf48e5", + "9cdfa2f8c9377d0f64fcb789bb4632c073703c039fba23392370975b0186dc25", + "904150a25d22355eb29b4cd3b6f7fdcd45465bf04f030effde02174f901e06b0", + "55f6ef72421e10a40a3be6a188ae600cdbb5088039787d48d559fcbc847c630b", + "c080862f3d01f1e48db3f6c18931e56df6192ef2c997317e835941a374395ee1", + "033f3edbc2769190f78513d537168b7d04ebf5ce624eed0344c8d156ef46b0f4", + "37438c23a48325938a7e6ebdc192c30c9a1d9aeb4c91ff3fb3b6401d7e1dd22f", + "7d2cd05a3414012fcae2a7982c8c03e1bbeeab88579d1a04babf8d0330cd9948", + "82c8dfa4d6902bd5deb4911dbe68626fa5691729d353aa20901f40acfb5b2be7", + "0c99fd295116cf5ba090ba83b96bea50b3cc9c528732485047684e6c38a69a91", + "d5bab49570e148d3d284f2e0d05abd4b18e9121d46d3103ff8cfdbf5cbd22196", + "f7292e6d8639fcbb2a5edc854898a1074fbd1ec28c3d787e8914ad6aa205ada7", + "1cb37ae1a4292a8eedf3d5fe3e403e05f71fc5711408e20a44041a7e1fb06c79", + "b541b8b70520588b3e9d040e0e83500bcfd87199fcbf3a5b37603e11eec69789", + "29444d61bce416f08d61c1da13c239f435c6954a091c7fe067defe23088c805e", + "856b01e8ea1240e64f5fa0f68608c56cf198de3732e90250f8b59b2608b38c43", + "35a5de4d713008b261f432ddcaf861455c2cfc32ad85a70418c058100f9bdde2", + "f5fe8dd25306f8406c1397b1ee3e5c733879373454bee5d41d0fc85aacd9fe96", + "7b04fd3789677371c5f134f7e980e68870942e8c26666d4d932bd13d76650cd3", + "a473b476c7d3ad12ae619bf94a45495cc440e0eb2b0933776de2f2d09232e7a5", + "ff0931c67c86b15d856d7f0945d472215573fc974eadd9d8afb9dbfdacdba0c7", + "f7b22ebf727a455206f48dbb85a99754360d5645baeb0ac175f591f43da390f7", + "8eea126c0a71d6e7d2d65753923ea89dbd12a542f62059c6248deed0189d9064", + "16c9e3d1243d3e786b344f94f0cf0506c44ac3ce782d881202d07fa6930a6361", + "5ce423ba27239618d709719c06c7582cd8efd469f33ca8b5d844991355107351", + "a6e63a259a4211af4c5204eda4222c8bbfa4773552880dd90c1a552a7b97c546", + "f24f2d4d48ce07e28121f2eb459718cd8cfe3b414ce38c7d03bd387908cbc293", + "f2441b23abe4126fd71e226a6352850f23fe2d76f7a74ff3d34ff3f0e3714a1a", + "ab5e8f54a40f606c1cc4efb9a5b44d74e3b9679c759fed96e1879cff4a9c8620", + "c095639233b65337c904785e36a5013aa1e2bf6829a6cb51cf8b84e81165489a", + "651f32811c0445c7046a40d91fd4414960c96339614767713259282b89d0157d", + "442dc3e6bae0f5e21c83b58aa8dad1dca8e8c8bda741ca9a578206b58b75f28a", + "30c8fea7eac0bfd97fefc3d04ce8f001c6b66bfbb43697e59e94b4bc9314ddef", + "5c2f87e971ad3245e181fd29e00a7643162e3197be039078dd436a87af1a99ca", + "5bf4e82dafe36acd1a34765425df476e65a24fbc8262dce647e8609444e1099b", + "b94cbf1ef71906197929836a928b383c181b384b1218b3b3d2154b188016e5d8", + "6df7ec2eb3fa6090567012f60658694d45d673a073db7af3f2d7842ca5553686", + "80dcceae9be45037ce8488dc836d8b6c6935b3c66b352715a9cca81507e65deb", + "d67a455c2664f5c43c4bfe16901177da9cfe671199b1643854184948a12a84f9", + "6422cb55836e0cd6fd98f6b8cad603df897c552196acda50ca353d96137f9c07", + "733bf6173023348fab7951a02e8a9289d8fb1a8e8e06048ff7ca5caa681f3a50", + "060a50dbd6fb8b661be95551145601a9902f26bb8d2f27103c6dae5fed2af483", + "1b4cb9c282060004fac317c87f18170389950e5c7fdfcc645cf56c763723edfd", + "8ea00e15c3187710d92c556455b68268e7e23041aeaa012c9d73ad2c8e38371d", + "00b61cc8c87f401e03ee56a16e1cd1c3dbe481622f8f91dd47fced54ee498d66", + "1cb4e511d6d4329226a52ae7128da312a94c1d6e9701a818acfd71f06cb6aa5e", + "68b8588b59281c84ce6b150db0a59ea4aa6652285eab48b8a2c91a1d81071c22", + "a8fa47a5413512e7f8fca9a8fe5a5045d09092d6c37eb951f95fbbe9656fbffc", + "348dbb638471040c8973e5102b9f332850868a8ccef6bb5e001cdee172866dab", + "dfd8214d623ff7094b54a03ee11726878e5da69d93f002fe213d45709c9bcaba", + "2fe214cd23e2c766ae2cc0b0a761008b898c0c59a98f427d6720b6baad837be4", + "f3170915da1fa4af58eca87da18d684b5eecfe272031e23f6d140a025c5b67ca", + "ae260b73b7b506a4262d73adc4083f379b49856a1a240c4b0a0c4984e1e6ca4f", + "db790a5a4c8c459d14a392425c3457de182c4b1d35d0176bce811417a015da3f", + "8370a43a058366d7b31f3e0525a964fa8532abe46612407113cc359a81993769", + "a4ef1bc346cddf1fcd1973f73ab4d43238b39e5288cdba8a50f015ce8a042920", + "8c2067083a79e55cf4ce3b171ecd72ff192a2afdeba6a4be31aa14242f29936a", + "f3dc2311e4a454c0d640f73027ca95be96e2bef1a6f4fb7425c4e5640e540779", + "452604eeafb64e76bd97fd7a93e7f096e66f9f21b7b87eb1ca81cfc8a4e47431", + "d4e72e501004cb43c71e75686610a6723f45302c4f123fa0e990fda12a29a55b", + "e0b335bd55c527734b273fdea05f6db4e3a09ea4a432bf46e7b60ff0149220f9", + "ddef8df34db0b8077ccc7d31f218db90334fcec4cbc66fdd819d4809184d818c", + "053645a3a042b04cf8b696f54a507f8807eb5c1c23470dc813f45f737f5825c2", + "56988a04d9b4f01beb6709990af8b67bf53a47de14870b1c230bd1ddfe10f2e5", + "95829b4d626bda5c9c5c7ffe584e1850d82af4871da42c6f0dbf053681434613", + "7fc91dff4416e0263c818cd8e8ec02e03e0c8875208ba4d6344cc4707adbf522", + "38f966ef82cf8647dc2704c795f93e2a6ccb08c6a35161a3c5b79583021f3487", + "4d84a49248e1093ee977f7ff244a51e95bfbc2ca2c29e40e92160290ade2d239", + "42b13a118059f14a0c7f7ffc8c85515fc86fa90d860acdd8facc0c327ea0953f", + "bb862fe9626db6003e132ee146fb1e0159e055efac261d1498bade34787aed78", + "8a5032a2956481de299ab84a83510bc7200c05a373b4d547f82e590ec2b4a276", + "fc055fac789a08bc6402c4f955ec5d4d03747a484121268051d73115ff843a60", + "6387ee687f253612c81b5532c332f549422be8805fccd468495eb8c2e220a6cf", + "93169868b3daae4a7724dfa724b57a29a6b49f423dc503bae926e615f403feb9", + "ce9b91cda233efb580ccd93b8ba76ed4a97c6ff2843889329ae147e78ff50777", + "4c0faad407b6853dab663a3b4fad530fcbf2c14d8c0fa3854421081860019d20", + "fffe935c4ca56a89a4fd7164444b987446f58a7bc29ac666ac6b3c9211fa9e56", + "a9576b2108807d57a5a40ec1797f891ade09683439f118cd2fa01daba6febf9a", + "3bb2281c3030e339361a48d7a416e326a84cce1db1de6dd2c257491cc1181973", + "d9dabb271cc9a6f8a9078fd7ddd14e815a59fe76c986f390ed80a3d239003c9a", + "8d77d96c8dbc39d8dee1b7290e749a29461cd2b542a2c9e465e9a6ea99226159", + "785eeb3e0e1f67893e1ab847d33c5c0656dac8da5d20265bf560748ee80a908a", + "cf9b8777d09b28eb11eab78081b6a2ab71cffaf2ff26760df9c7c24ede4bc073", + "62b9ca94970f943f86c13bda31bfd742d0d2c45b69e882ec5a80c7434afa99d7", + "d3796afa78e766c7668c10a2e7e8a485c45556d41fbb055617a5f2b728208a48", + "1ef2d13f389e578936f371bffd675ced98f1000f2f62fbe0c0ae78ef5663802a", + "191c77439137c198a98ffd4a9286a9bc9445a97d95a3b87c8dae1c2e26d8a05b", + "40cef9c3f909ab32e20db329b9fb354726272873a4200bdfd82cc032dad970f6", + "000a40c25bac003cc1e9bbf617a171e6e410a2c6ff8aba1570ad398ffef72e0f", + "387287ec117ff614b8d8aeddbf39ad6de062e653f894b982080b7b63728aa5e4", + "cca083fd03cc3e1f07f70358427d9ac87a05f0a4095580268d41017d2d5271aa", + "a6de202a7d5d73abd7cdde540556bbd5800ceda9984f19ef9ef3ccace6a57da8", + "5e94331f55fedcd5cffbb2f2ea9942fd66bb1d9e534df9c5305c16085874d1a0", + "493ac33e412de2d3328bea7c3597bb2b0cabb59b777c435d714a7c1297942a65", + "adc81aedcbbdc74716fdcb94e654f0feae4b4ca66723e9ee3c3e72adf8e0f2c7", + "905635a80754d6cca458287a550a8a76d3ee665dc6bdd334fd770902a2b99fed", + "a578e5afc0299bdc5351e14ccd179214f3c7412c0f80006b3fd328d45f397bba", + "c1a8558a37e0a33e00c599b8649f0c5050908e708906747c60640bb7473a2a8a", + "d308f431ce06c097cb15fc95a198c1a5cee986809fa78e4210aba94aa6d7ff7c", + "2c3d9844f392b3ee4925b833c0cdec0eb6878b1b6dd79f701a83ce3e265d89ec", + "8edb94697cb3238caf3ef17de3a8c05d0b8995a874dc7a58445560d8b47489c3", + "d3a29204a81625db10d7a7fe883ef718b40d1205ac7115f5d3f79a04707dfe68", + "470e41a1353a326f9e44470082d017af6caf086d42bfc335038fd1bfb6df4aaa", + "92e6cac01dc183fa152b33ee05aecff48d53f95e9f2e67c4248d54bcf4d4cd18", + "15fc5d28ca87de0cf206e75fd6767557c619e9bc7da3261f52edf52645d10e0c", + "c920bb6501ac7fc3ecfd42ba0f13dba44e432c4bdd154600c1b113af48dbe7fe", + "bfec29af25b1f98f53b17b5994e058b820dd3f98a6990035906481a5b87230d2", + "b5b32b48d61f485e3ddd63cb24dc698e8f7b1fa7c6f0ed7ef9f896418495e9d5", + "ddb3bedf601ee04e42af4574ae95fc2b19b86c85feda1d2dafb72dba7eae434d", + "f5b84997f4c7bdfc76c1b8b1c9b33aa57552df7709452421020156e6f1c6f54d", + "d2822a4b05340a9c32e5ffa0d7463432a50ed4702fa3c6a3d8fbbacc895ca430", + "9ef788dae4c93773cd620689f4e12fd598cd426be35c7f713d5d01a9445696f4", + "27a8ac6ca8d605f91ad7da0271f1096e41cc5eaaebe7c9d05ed05840b819fe23", + "43a90bea479f9c1baab75ecb8d2cdd5de6b73edb8764eb8f5238436e7374b978", + "49e33f662521e783c65a32026d1cdf852eed5ff9541b9c50681017918b005982", + "f48ee090cad6e4f19e0fc3e10b8d4a950d60aaff2d6a1cc1070e8349d1600138", + "3273aac4445b6a38e3ccb2bdb3d9dabbd48c27bd20f776b9f719ee34c909c441", + "3b94f81dd04fdbf91bc82cf85ac1de6c7d643c4f2ccec3e666f40ae08e8996f0", + "a762a83278d8a6c2fb7de51e386d4608fe702b439bdff93d0995aeab6788ac26", + "b229317462d6941917db27261b492f6c29291da997bd8957f28ed91d246c9fad", + "a67294f48096e5fc8dff51112d898902bb0f844106b197dbd27c851f8fd1b91c", + "4e00da423f3e67b95637728e00a13d4c066c09ca63810e366a14eb9f55aadd81", + "7a60e2d44c550b2c31855ac00fabd8d8c8b7d8a6654b0803254079a45fbcf4ba", + "e7c9961626c22a8c142e403f74888e5eb5404586148b289fc9d5dbdc147a8268", + "783bb955c875d8dc3acd2d2d6df51d3a213b80e8637c04a113e7d40bd2e2a415", + "245fd3fb6d36cee8265aa82fe19cad6d105cb70151171d8a83e732b22f47269a", + "5caf262c3c32020bf373cecec9afa63d3f520d1e1c7cd5f57e40088685c3f11f", + "4ed2a77addd31832c8bb39874ae6c107d2d829fb41563c5a05b68bde3cafedd1", + "dbbb96ecf5a2e27f4973d302e10af2265461c81aa8cebc97a9a19471a8580941", + "1009f2f1a33afc230e7c44fb1d8605b47b62a86df5186c0e0d0d9aab0fc1a835", + "4bb7bf892ba4a91d936dbd7f23fd956c69ad5d6a96d7388665520b323da885aa", + "625a02a9620ce0e5cfa1692f343b7c247c117bc9cf9d61f5d32bc70bbac2ac10", + "2c29bcac4528ff00e5ebe57dacb0eecc7a286912ba0db439e5665abfba276bc1", + "ceb601bd413e9def225146ea800e8f38a33fde698b28a8421a54cfda33094020", + "1402565db0deb5f3a177b3e1de493c4d5e7d13844538d8d95fe8c33a9ba43f3d", + "1331e9ee8c8d04e7ff4d5e6268c218df34a81a73c857ac173be605e3f9857a5c", + "9ab89e7c6bab05b718f6b0b1fe969a12e76640d9a9414336d0221def2647068a", + "8cea8c57adf00fb3215c49b891d00e233096b14816afb7f2fa3da98c07ed79a7", + "e14c15391bf28e686a8db8a7b7140e55bd3b8421f025fa660c7e99eaa098486e", + "f3e4bc50dcb72d696ba2c1de381817f3223a056a72f579af54c12ba19eb72463", + "f5d1cb99734127a99c8a8b2ba21e94d82871164381c40fedf22496c0022932aa", + "b9558e6ae52d06ed811c7677d5f32651732c00de3e6c277a2ac53140e7770ee2", + "a5688be5a80583a6a42cdb636f65d0593557fd30f00945ad03c2c6e284730e17", + "ec320d940224068ff1614b9ade9149ee0a8a10795d9ce63a5f80a49ef0b2cbb9", + "2d6ff628330c61bcbf3dee526ce4d8513dc21ed96baa804808e93b19e2ee0ad7", + "298d6065676aec0bf788b20a5cc2da601f755227495ca1ae11db38b44dda4064", + "8593bf8b177d432e9d41e375e86590b189b259c3ff03fa36c8f1b46463821ccc", + "bb5f78cf944c8006a65b3203341f1ad6d9cb733273e2e71230579e25bc9af65c", + "ed6d2f940607f76274a44dc604002c5e026738f283a21d2e010229f5dbb8a8c3", + "b498a3747009490a2021d34d62193af7d45a5d8023e9fe3fb4da15ad47593208", + "39b61f69db691f9b2aa0010688091734f2bb4d766b1216a230dd433aa61f7af4", + "0bcd16711aee2f32e5e20ef8dfc086719d27668a758da40ca2b0a1b95c027410", + "f8624f75f38e5acf740d50934f1618331d326d618175a8a9ac5d47b316939d75", + "5901f153aae897c5079b348a3b5fa7718689ecf9df297e5ef0421f9f17ef86ef", + "397145d95e0ee2cac2558e5b5c13f44fbc28c65db46198674597257433a20bd5", + "5ebdb3b26ec28bdfdaeaf5eed95461b4de58f330827e3d98772fc2b2b2fcab7d", + "fab7112a5e94bb5289919aea4bd441216f12747c20bb3a19d7c8f625cd762031", + "e124e3e7aa1b168ef280d324d0aae9fd03518aaea00ca71fb6183a60cd5bb98f", + "968a995808b5e4ee42a463bcd88fea9dea947f038da6b0d7d867b75cfcf04f47", + "d154f86dbaf25aa5310be9cb71f5e8ae6180dc10380812fa759d7f27e70b12b2", + "61211d9e05b17aabc146a49f0556f5e25217ef10b5bf9cfa4e7210b1df18fb7f", + "a4e43e250c9a7dbadf1c37b9a2912ac91ad44eac9567863a45230367ada151a7", + "a1f41f45724be444f241bef3d7578572c8ab390ca15cea8cf7dca543a3246eed", + "b7654992d5cc49f0df52af585f7a3b530542c3f5b775738e5cbedfa744082931", + "ce9893af233000c1f1df4f79f4a5d20cb9b9e9e4bc34b82ca8ded260df4d7fa1", + "109799d0ded1706fc46c84a7cf391ff2cc81c341a2649eeb291d43a08ecdea1b", + "d989bb4f24bb8adae66c7ff63db290ac11abc951dd18e9d925bbfc7c8002953e", + "165777c6c81acaecc482d231c6554d49849bf0cf51dc15b891537526c4264987", + "a1e6f07a29b61bb9b88dd8bc812699e8a2a9bcaca017e2aa97e263eb1b4c1ce1", + "35405f29c9f6ef4137c3b5303ab67b71fefe90d5a738564ec3b4edd5261a081c", + "de0d62aa243209d4690a52ca04b7137de63057f6fe4f6388ce41f8fe41909dba", + "9d29d1006c53b6383d94b7ad484ad2f374a053cb32677ae530c3883bdbfd1011", + "94143a8432743eb5ba258d4fb904d7937f8fd9b674bab0c9c6ee6740bd5fad92", + "5e22a38af98aa8300c383116f9c4af8f17275a02a06ec28b33675b5b81c1293e", + "71b56947ed7052847bc1e937b20f103df9024ee272349080529f5bca1ba87bff", + "ecb7d002f83f116f6bce10702eb3bb0d406945139f6a747dd6e6e06424ab53ac", + "0113ce276025ba9c6b0c5b2a99e850a8d18ae816aa4093203ac1e4c52e34955c", + "78a2e443fa4b69481ed9b9d4b50e6be3788e1b6e3a71da63b85b13ce670399d8", + "8e4d3b6f3b2119dc4a69687bff5346886fe51f28b9bb5a1901a5d11c7ba2b1b0", + "f7c2199722cd9a8aae0f3ad44d24e11bc045144849b690c360076ef28b5d94fe", + "3a3cccbd3c104f12ea2644d4c202e73b41ec9cefd63811cf82b550540969dc60", + "0863e6921184c392935509493fcb2d77160ea947ad0a3a194ce47732d12fb494", + "d3331d33d26b2e284d8fafdd380ce54b5187f037a1bce5caaafa876f104624e0", + "f4d1de043d889b86d488f0d0a5cfec0144105c580b5d9a0c8f4b9d410d40d8ab", + "3fd41ebfc4a9ff6d973371519bfd7a890f46dabf6f47c3ff7833ddd9ce2e3d35", + "9a5813358bf8320f45fa4e661aa4d76d5b38d39dcecedbdaaefeb27ee1a08ac3", + "ee89e490eb7169ea97b0cb6591efa226fb2da854b210a13279a8ab886ac481d3", + "c54b5b6ffb899ee5ddeff85936eb44c3bfc36a7bd613bfe6e88aaef0503bcff0", + "8e99540fd112cce61a836a202f105a05c116e4f7db59e91b178bccbb26156089", + "b681d3ace71b736e98de4e1eae044916f8af829f95c37fd366a9199d931da8bf", + "ff160ca506746163e0dddc5cb1976016f18e131e792d6158bdcd3db0d6a6de53", + "3b6e1683c0152ea10066ef99a9daa3e6daffccc4af4033aea72121840b067ff2", + "4863b9b6084cc339aab54c6836df30c96f8642085f1700bfde472d652e07430d", + "d2eeee1385e2e2918908fcadabba1d4764141ec0546f1db3303d6f1d3a527eeb", + "4395a65fb0eea0c7d0ae2629449f6435e79ac8df757d170f61542efda6e016c8", + "8341767afdbe82e00e472c9b9dc3492c9ac664d0055c5cfbb19553a750885007", + "081a090d76f6390637dd0b9d9f5de729f472c9106697dde4196e1153fca405b3", + "45d2384d04db98225a943bf7a062e4015dd00385ebaa0055b74a980d75f4f233", + "7c2f9a960f1f2c8e5243d0fec19f2f2f04f0ac0242d39488a6aa4f7e6d074c4d", + "5386b6dd9b619d0d0095579bde7733420689e63697f94390460327dcb11990d4", + "ef3f3daa2750674c8d5a3fb706576cbc4fd1a4c1af7fa3c413a95930f35da9f4", + "0eae700292a6530a2fae3127669c4b4a23b54b81d359301a92134370a6352b78", + "d048d276e8fed08fe4f77d23a99e4e73a5e112bac231e151c4349422dc4e73d9", + "468404f085a91213483b0255679c563c3e2363785bbc39253d491e2014c53a71", + "fd2eab624294dcb1e6943c888efb4347ec164d36592f74f98b47007d2bf11f18", + "efec3677e3b4b9d08bb7d808f38cecda6effa9a25a616935e5f76452edc21a1b", + "1160d99bddd2d845baea71193ba1505e2a17458721782080e00ebfc76671b6f0", + "db08dae2901b9f22a79b110dfcc69f0930b380ba088d365c374f32d8d82997ed", + "5d4afe6cf5b17943bd21481382b437bd25ce5f24f5d8d7a529b42737e748ff68", + "e0caf13bbdbab0d94accbb5959eafee6becf2af3b43bd78c7fb4d02a5ce7687c", + "c81d0c7a82ad54c5be31aafe974509006791941d589dc0820108c172fb96dc79", + "14c4a43dd265c2708e7ba681cedbad205bf019550c01c6a5005ea5e0f8fe8131", + "eb883bc090d7e2d0bcf37c7386c2459bd7e187a2c0dbf1798e815a3228361f85", + "9819bb504d50dd56be85acdae8d4da5ac0cdf547d9bf062fc126ec5a54e67c2c", + "b789e09a2ca71ad85c5037542fd3344e3679415a433b7844d26dfc15dcffdd2b", + "2563915fb1200f27c4de2b87604d6c62a2d69d0b8aa77419ccdde2951484c090", + "311338be8ad6ca3a6925c8fee1f1e18f60c8fc84dcebc5385ddb8aad4c78ee95", + "685a252dbaf34b64a28bd217d06db171622f471bc4da06719ec56931245ec924", + "a622ff261c67525ef497ec5d573c536152204e3c6864dc196e66d14daf6f1332", + "2e5299fe8889179f7fede54e3f0eeeb28e5a0c804aa5d365b92176939046d0ff", + "90360ecf21a857f16953cc1353f032fc253cbc9260ec30cceba829f7f89c5155", + "a62f7495d0848b2150e3c6e7f6bd9167c5ceebd366e7c2fe18ac56ff724ec37e", + "1d5442b2afc3e5357e1ed16e140740a50b22c7a67a3d8e7922397b0adee874f1", + "ec7286760e289adef8af6b6f7a2fb66341f441ccced213867d2c38dd7d89660d", + "69122646bda3a4f4e8f8b3fbd3ca3be27310ea85645fdbb100d21f06e0de19d4", + "7cbbda1677c2ef334bec9c6bac9786a0fd0e8bb27a5fc6bdea57d388fad04b5d", + "33d1f4d17479079d3b949d1bc801229ee3ebb9fb2c572bec62934df76cff764e", + "2ada52f3187626128267391cf5c89aafa22ce454b5f4627e312fcc2a80a7007e", + "94b0cbcc77fe8300b054778b3fe4f4bb7ea1a4ba514207b2429a3d404d733752", + "1c50e12d7084168448b5cbaa953e3b49e1587a10423a3e427620b63546d479e8", + "23b5b2ebf2448c5629dedd3aa6dc6aed5aa0626525174ea92c3d3e1d50cc57dc", + "94d4645b7e6c6f5f7daf1a4240b728eba12915b427757b16e3f2c517f88e6efc", + "d6f64867fb31fbc2525772b0a62013960f59380aa51496c6d403e4b159fec73b", + "2f75a4d56e681292a9e901bccc6e74d9949a727869d6a989f71a0f3d56ab35db", + "33fb123e0065c2a8289f9caaf3392c8bfe8fb3a282985ad3de5c6dd722112752", + "6ce4babaaaa3e3aa039ca74b08709634e9f45e3c1730ba0b6699c67eeac2d787", + "ac320bbce9a23a9eefb6b96b2f7341a1923c996d95c06db3da492b61750007fe", + "0bf0efc6d660c4e9ea36460b34d1e9877461abc6bc420fafbc342968716360f8", + "dcadfa6d28946cd85ef52ab8e40c91308d35cd4e8d5643edfdc01e6c56cbbe11", + "0910bb2736c171acfaed7a916eb446fb9f8024ed5e2b45f7e5430450b660f9a0", + "f9dcd4e076192b66735f3cb762d2d6e8b3890e6d99a68ec110fb6e010058f5aa", + "6ff588c911446a6cf7d221181bf88ba7bca9281caa9a91a2c014413b9df7c2ae", + "42e5fd84e9e800af97e98fd1266dccc09ac107eb06ceddc5a106aa5795f4b8b7", + "3cd3072974701ad1e980753825287f33b6d7cd1d945f0776512f8bafd160556a", + "2e3fcdd1b04dcc5863664592955c5a2eeeca28186ff476c38a211f53fb56a591", + "a38e0a7821a8be35dbceb547b44310d935b4912fc07bea1d3acc0f97da1760fb", + "413a7734369e0ae9e3f215fd8cbe9bf64beb687a13a6f4b798a59a5935506de7", + "89e00ca81c1ed8c4af988ef668d24927c37749b2306145ce99391f171fbff67a", + "9ed4400cc7c50cdf98ae4150621e5da0b00e2cfcea9bb4bd4b53a0c4864296d3", + "4fdfa67ee90ed2ebcc3f2eb2b22206cc49e3587ebe75939f8efe17e6979da08d", + "f68e033cb77c9469fef52db65a1089dd4fa3544822007900a5af0c68d0182ee3", + "3fc590ae80dcb8d0044f2562be2b1eeefc7fa41d718a9b11f678559d509a8771", + "2685812b9981c3fee7005d19851fd7e64fd53b3faf96c6d934610e7002a13d71", + "1635a98919b9945bf220f6844aa99158b05138eb1280e1ba2850d883a841c0a2", + "0b185b098400d6bc21ae1325ce55d55875456f196b4906f281519764e72bb478", + "5870966d284b8765c0c83f0a703fb8167f6df58f393f1dcea7a7f0e6bd7e9bcb", + "f8381e5cda4d42c547eaa2ae8a7befa181a1ed6efac7e5b81c62b8dc9d91bf5f", + "8f56d97e2e4597d1625a60ff5128bc651b7ce0e3011230eb0940058582cbcb94", + "aea567dd91acfb6ecfbe044b5813ff7998e3f09a6c979633f89ca87326c33bcc", + "d84a45c8ef1819720032d2418158160e70ceb3a0210e20118b696bf38e47b49a", + "1a9fd4a3e05d4c0bb3120fd6566c0a872ba0545f572bf91a5491a94a961f2c20", + "932194ea92839347096a508d7bf749ef16e556dcc3df01b6b6fdd8d4cf832115", + "653c7071fd044cc189a5e0be3c4f4c064544e197168e823b9e5f9b4642a19cdf", + "1dee24997db1fb7d28ad4946c6e8277d431df9cef8d3b7c9afc6486382060157", + "9986214e1678f39c66c1fe9f804f9ddf4366d82498d12c060e8c2a144e814d95", + "53b1e0f3ed43064668beb0f6f15d188f8cacdcd005a120cacfb83e6ab59ac5d6", + "5c0a9fef39fed6dab42eb079f4cd7a0fae9c4ec511b0c95683913c6e5ad3384e", + "8294a7cb795d87249f4c36add2c25c8af443f012dd96efe7065826e12031401d", + "3ef631d22d6d248f65be4c5092c570870f83ca6e4b054df37b84e7459266f5a4", + "1401d5579a239edc79d90224ffc9f8613a8d0a6403241b14d95145b91d2ef5f4", + "ba5b156071e627610b0f511b43f6edea4c5af98693b2f0a105f0732a99b220f4", + "655045e8f67afa264226d8002c3aa3dcfe2d84d21a17716661494ed8e7976920", + "33e461946ab5b529e6fd259b78cd64900642f6984f8c0da5264b3feed82b7bcd", + "764de014b2e71bf6b446ce553ac246a7864ecbea2a38f38b7f8daf9ac05378be", + "7309af5cf438850209da1fd517c43ac0a6d101757a0d4ec30ee6cf90d864e4fd", + "bf8d2d00c1d2da84addb4eae10be7bf6ca530f93c035ee9f206ffda1df235c32", + "9d58cd4eb0320f2577f8a7ee5bc9193b47476214eb4f33264f2f4a7255eb0e77", + "ba446276c123d0fa218487e3afeb5bf61ee57b88b812b6f56b0d09a894ed8085", + "5ca743dca9e5be23323fca0ad91905231150b78915036af80a2d7f2198e21060", + "cf0064f10f136e8676919c452e82aa8328c854ef5a6f4e46fa02f89fb6adeed3", + "85fc3a69ff0baf312f72a952123f896a0cc6babdbd3c91b768358c138212f07a", + "cab103f59270e579e9307d2d405e449aa3774f112b49d360560892c352da0148", + "a617b10384eb5b1182d24fa2cd5af6f250ed5881db2977a4c97b1319cee48374", + "1d24dc3e969a792cfe0f445345e611f7d3356a952e8c6db1b95cb2245ef58269", + "208ff7ec648f5159fbd61f27aacc8aa1fd3d03f3d618bd10b580d61183b93ef5", + "05ae9f011b591065a3d52d764f02eb2d92c1bf211405077988373a1c5ed383a6", + "d15aecc3d33dc504394201f3a42e3cf806af58643395d8dccfb7615dbaa8bd53", + "7d2f25c3bccbf3216569f6a8441a9cc0d0ef2ff069f8e526f36ebc8ebe380348", + "e4f394b0f8272f150db057268c14522083b412a4c504df549e60cfb4af320ffa", + "6f2ed8491e2145678358a72a4dc1ffc37fa3868d0b891f93cd8eebf56488672f", + "f44f2077c211c8f69eb4714b8e00d3b396629190e5a7436088e4bb0349730c70", + "cc4ce9a5de0a4c5cc3be673ba8f84c446cc8f68629bd668c4705986cac4550d7", + "d88d0cc5a953a70c1555922b9bd4e9404298c9a69d385959873654d94c970d24", + "d360116e4c02a8aa8d375eb34b7ee64eb9b85ec8ddeee9a7fcf573fffdd785d1", + "26dddeab628b0fc106b1229fbf421e8ea8b2c0868c038486192c8aa71d8eac9a", + "869bd38a962850fcc231c042ad192900e2b65460a39cff72afe4d22b8fe64b1c", + "d8f8dc115b050e87e25635c7b98a1804c8b3345ae22fdb752e4ea42310818b13", + "5705182b73877c798ea4176c9f9c5592da0f72aff28e5885243807965705f515", + "976d8170e09d4d4c350fd7b9e750c251d4a214526ba4b5ecd891fbc8948055af", + "b07bde6399b997d3fd0141c78cdf1784af65297b380e9dc183c5921a568006c6", + "f3f8f72775ede0961d8407f0191860cc4821c8104160fa6769af7d91d9d0568c", + "ab2af0ac848e0a3c5ce5edbefb3f4146bf86722beefa4735f06572595d74362c", + "cc341721edb67da414ca76b38aed117d359377fa9f0ce907a332c7f6fcb23302", + "8bd21c8cca1e4ba4dd3b76cf4fa7e28cab59a178895bbd896004ef474eed58f4", + "3f8a78b15b0bb469beb98c7e8bd68b0343a7f056b0571fac182f8e1f66283f1d", + "428006caa988f2658a958cf893f2c4d8af70eaaea06f910edcb8c327643ce5ed", + "532e12bcd28cb56c7ebbdca59d3985028d1f35c6a97eec88641ead07848d4075", + "fd79418daafe5c13c605c2797b5539825943d48c15dc4863fe112ab616daa15d", + "0fff3746e5d4c07da546d2b1071debfe2063f50f61f6808a784e327b56c8d070", + "319872e7f31021f05aa93cad3a6b6bac7a00fdabaf0be5862a3265302dd351cd", + "1b3adc7d29ead359b26a359aca218483b9f3435bd43cb8d9e250076efbd66b89", + "6602462c6486b0268899403af0d8519df418074cb2f6c0233601ea033e19e0f5", + "a9d4064ad87c8d840cabc380beb9ad092c1e1a6cc9a0dd98d1f4fe5d8ceec9a1", + "e005bbbfd96868822856f7e009004fc9d537a1d9109da791e382f8fa2a5d865a", + "5ae43975821a6f0ff2b800b7b87d0bcc6985c2714d25e3ce567d2d8548121a85", + "dd1a28b7b004099288e292a57849087316cd60075ea94117984d23d6c6933384", + "837167a760ce5e01946446d5819815f35ff7da30079f2a0b3abaed09d2c79ba3", + "3f627a8b5c67f7b2741aaa9c004c99b0426720ea403cedfbb2ced065f4983660", + "6c9d3dd42199cd6ed8f92ec19cbc2fc5c68b195a62f2d4d49f65fc3208f4fe48", + "e26d22007e7360db1d292b6554e38642abd8a4fa2f888a40e82dfe403c3278c8", + "114a504aa911c366381e486d078ef74f95922b18a4111f432e85138120b52b43", + "0e21560349c1337a984a5d0bfb8700479ad59c22e720015d5527a0e8b42253c7", + "6a83409c96fadd64aaf0587e0b22be0c67408f2213c6d25ceb242b2269da25bb", + "a0d870e47760be8e8def067e363a3679188cd8ad72a59ce9dce9bf48535c18c5", + "a7f88b7e3df21ed0c22d4fddecb400debac3ab2db2c2b46a351d1c6ecf4d2342", + "15e584faa29095def7669368181a1ccde1999312426e68e1ddabc99a24b4c1d4", + "88537a719513b967533cbecf06b41ba0ac490a31b2ec6be0a1b90c910d9de6bc", + "9d753e21e19985ad48f84f3858e0e2cbece2fb1e74e2976612e23f792273150d", + "d9f9d796c08689e7c5ae5fe543a78e47fb839c2aaa00b67b1942164148c3f58d", + "5d3dfac8713f442e3807453299fc0f12cbd63b8c5ce355c195ba0e0ae1b9321f", + "cb4e9e48c738b003e1799f4d18ce9ac87ccd25856b460ce3aa69c6ae826a9d3a", + "2a977d2afbeb0582b46ad317b515cc6f7710c88def23d19f9614af246e3e969b", + "7db5c857be96dbbab3e1ffa8f8c6794352a65d553ab08e55aa67a40700e7ec18", + "c4fd1be23f96c7556a9a103d53c6fdf2a83c0a2913364730402cb34ca27eba02", + "f735efce404a987fbae9953cfe8be7552c3563326594ffdf28f17b99578f3d76", + "092970014dc09de9d345c083d1b501e725d4e7bf6bb159b1d5cc1fad44e118e0", + "849040588f027d4d67a62767597b09a2211e7f6086cc9ef9fe196686996d5100", + "a45987f4c9f6aee3baaec2733eeb44410d7bfb23de67fd57889f1031be7673ed", + "be9264d65ba0c6e76b08999c1d2dd604e8dec08873f4e4261e79782a2f1d1fb5", + "6946da053e4fbeee4149f5a83f63c120ca57764534487e33a795a4319cab9c76", + "a1311e97d1d133eaf1d05bdc3d9738317dcdc460d58dbd0a2eefa5ca2e37b8ea", + "d4d888626fbb77d3f5bf42b52fd1bb6e86c17b321a1843bb1df3ad348ff97246", + "5fca363aa912ac25d78a00d7fb02abed92fba0b2a095270e167d8e5872c21837", + "d60ed57b6fe38b8821ea3a3f37c0c3b16e8bd84c413a50c0e6cafb3c9a5eb341", + "20efc31d723ea58356a3febe1050fb641d42342a4312fec07682f4b268a89a73", + "a8c4a4e68421d0801563467979111324bd54ad56f1e06cb66019ade4c03ec953", + "6bfb6b3d9cb4d152c3dd7ba13349e7c0b903b406a7cfb605e74d99525634d923", + "9a2bbde294ae39b7730a120e9a00ff0c382cdf3662c6f3f41bd40e464e8c8404", + "8b89149ddab9420ec6d6b6b4c6352155d96b8cdb1e267a93fed6efff38dfa319", + "ef715279cdf8cac271dd6c7586d3a5830dee9ad33c943580a54ccfd30437a85f", + "1ce1651fb62294813dd1f4a4f4b0cd17a1cf6ade23506978b40c258f9c4f636e", + "315006029c09e56a671bdf6c45a00cc79c473dc70d77c744aaf6f687f47dce2f", + "c16c6ad222d8d156635a7908487cd443454933f666fb5b6853023090f3c35537", + "59a6332f0b08a33d0a0cb03286ba0bbc01521cb520f48afafa5b1553a4c5f912", + "645be8302c55a7c9287741593466f27db8af3bffb00450550e4533152b8e269b", + "95edf48ea94af5979799d7d9b48d2dcbceaad5dfc99870df4028f9f5e4a30e3c", + "e6f47fa7733d725a26b3c3108ce9c2b5ed2ff5f210734e2109feec229adadbc0", + "55e6ab14882f548efff73148a55bd847b0e33c6b50b030966d639d147db5e200", + "b9add2a98c184431c9cef686fc72c7dcd037d975599f00a9c2349ff0a282d49b", + "9128ebef94c863688c1ecea4a9554913c8786ffbb1c7e13240b07c695ac79322", + "910767887c965d5942fa3c0015200023aac854bc4d5c0c50ea67e05eac087d3f", + "1d966e2da82440b2b3372cd91fe526610414385095d5d9e61cf33e19fb1aa6ef", + "3bbec50c04f95f2437b835a98eaece17cb97f8c8194aaa8f4dbcbc598209ab38", + "c06aa4ee9903e7e5b7dd36420887029a7d8be866c47b3b7a5166e93d93f84a2e", + "42141b4a59498a9b3fe299642216790e4ccc5272768bc22f9beaf057be291f80", + "984030374dbf47d12426fa569dafb20786472f63a4e648f43651c6abaefe2e21", + "c125146b67130e1e768f2d37a32f562ef9ab6e7ad77d3260ef755b71524b4213", + "fe2f59a39de867ddfb94890776b627b2b5c5d7062a0ccfa8c523dd5c35c9fc69", + "93f9f3c0283692bea14116ae2ad824eb2aeece5bd320eafe8dfc4c570716afc7", + "010a5a37a598c1b360f2ebada446b92433905be6773f706ef09190b8dc32950f", + "175fa286e2daa34ec6541e8b2628c169269631e90a0b8d849cebae7c83ef91b2" + ], + [ + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "4c0faad407b6853dab663a3b4fad530fcbf2c14d8c0fa3854421081860019d20", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625000000, + "height": 101 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "fffe935c4ca56a89a4fd7164444b987446f58a7bc29ac666ac6b3c9211fa9e56", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625020000, + "height": 102 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3bb2281c3030e339361a48d7a416e326a84cce1db1de6dd2c257491cc1181973", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625025000, + "height": 103 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "8d77d96c8dbc39d8dee1b7290e749a29461cd2b542a2c9e465e9a6ea99226159", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625055000, + "height": 104 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "62b9ca94970f943f86c13bda31bfd742d0d2c45b69e882ec5a80c7434afa99d7", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625075000, + "height": 105 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "191c77439137c198a98ffd4a9286a9bc9445a97d95a3b87c8dae1c2e26d8a05b", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625085000, + "height": 106 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "387287ec117ff614b8d8aeddbf39ad6de062e653f894b982080b7b63728aa5e4", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625115000, + "height": 107 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5e94331f55fedcd5cffbb2f2ea9942fd66bb1d9e534df9c5305c16085874d1a0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 108 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "905635a80754d6cca458287a550a8a76d3ee665dc6bdd334fd770902a2b99fed", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 109 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d308f431ce06c097cb15fc95a198c1a5cee986809fa78e4210aba94aa6d7ff7c", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 110 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d3a29204a81625db10d7a7fe883ef718b40d1205ac7115f5d3f79a04707dfe68", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 111 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "15fc5d28ca87de0cf206e75fd6767557c619e9bc7da3261f52edf52645d10e0c", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 112 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "b5b32b48d61f485e3ddd63cb24dc698e8f7b1fa7c6f0ed7ef9f896418495e9d5", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 113 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d2822a4b05340a9c32e5ffa0d7463432a50ed4702fa3c6a3d8fbbacc895ca430", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 114 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "43a90bea479f9c1baab75ecb8d2cdd5de6b73edb8764eb8f5238436e7374b978", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 115 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3273aac4445b6a38e3ccb2bdb3d9dabbd48c27bd20f776b9f719ee34c909c441", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 116 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "b229317462d6941917db27261b492f6c29291da997bd8957f28ed91d246c9fad", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 117 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "7a60e2d44c550b2c31855ac00fabd8d8c8b7d8a6654b0803254079a45fbcf4ba", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 118 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "245fd3fb6d36cee8265aa82fe19cad6d105cb70151171d8a83e732b22f47269a", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 119 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "dbbb96ecf5a2e27f4973d302e10af2265461c81aa8cebc97a9a19471a8580941", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 120 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "625a02a9620ce0e5cfa1692f343b7c247c117bc9cf9d61f5d32bc70bbac2ac10", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 121 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1402565db0deb5f3a177b3e1de493c4d5e7d13844538d8d95fe8c33a9ba43f3d", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 122 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "8cea8c57adf00fb3215c49b891d00e233096b14816afb7f2fa3da98c07ed79a7", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 123 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "f5d1cb99734127a99c8a8b2ba21e94d82871164381c40fedf22496c0022932aa", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 124 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "ec320d940224068ff1614b9ade9149ee0a8a10795d9ce63a5f80a49ef0b2cbb9", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 125 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "8593bf8b177d432e9d41e375e86590b189b259c3ff03fa36c8f1b46463821ccc", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 126 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "b498a3747009490a2021d34d62193af7d45a5d8023e9fe3fb4da15ad47593208", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 127 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "f8624f75f38e5acf740d50934f1618331d326d618175a8a9ac5d47b316939d75", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 128 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5ebdb3b26ec28bdfdaeaf5eed95461b4de58f330827e3d98772fc2b2b2fcab7d", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 129 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "968a995808b5e4ee42a463bcd88fea9dea947f038da6b0d7d867b75cfcf04f47", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 130 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "a4e43e250c9a7dbadf1c37b9a2912ac91ad44eac9567863a45230367ada151a7", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 131 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "ce9893af233000c1f1df4f79f4a5d20cb9b9e9e4bc34b82ca8ded260df4d7fa1", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 132 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "165777c6c81acaecc482d231c6554d49849bf0cf51dc15b891537526c4264987", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 133 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "de0d62aa243209d4690a52ca04b7137de63057f6fe4f6388ce41f8fe41909dba", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 134 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5e22a38af98aa8300c383116f9c4af8f17275a02a06ec28b33675b5b81c1293e", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 135 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "0113ce276025ba9c6b0c5b2a99e850a8d18ae816aa4093203ac1e4c52e34955c", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 136 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "f7c2199722cd9a8aae0f3ad44d24e11bc045144849b690c360076ef28b5d94fe", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 137 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d3331d33d26b2e284d8fafdd380ce54b5187f037a1bce5caaafa876f104624e0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 138 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "9a5813358bf8320f45fa4e661aa4d76d5b38d39dcecedbdaaefeb27ee1a08ac3", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 139 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "8e99540fd112cce61a836a202f105a05c116e4f7db59e91b178bccbb26156089", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 140 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3b6e1683c0152ea10066ef99a9daa3e6daffccc4af4033aea72121840b067ff2", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 141 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "4395a65fb0eea0c7d0ae2629449f6435e79ac8df757d170f61542efda6e016c8", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 142 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "45d2384d04db98225a943bf7a062e4015dd00385ebaa0055b74a980d75f4f233", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 143 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "ef3f3daa2750674c8d5a3fb706576cbc4fd1a4c1af7fa3c413a95930f35da9f4", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 144 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "468404f085a91213483b0255679c563c3e2363785bbc39253d491e2014c53a71", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 145 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1160d99bddd2d845baea71193ba1505e2a17458721782080e00ebfc76671b6f0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 146 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "e0caf13bbdbab0d94accbb5959eafee6becf2af3b43bd78c7fb4d02a5ce7687c", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 147 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "eb883bc090d7e2d0bcf37c7386c2459bd7e187a2c0dbf1798e815a3228361f85", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 148 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "2563915fb1200f27c4de2b87604d6c62a2d69d0b8aa77419ccdde2951484c090", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 149 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "a622ff261c67525ef497ec5d573c536152204e3c6864dc196e66d14daf6f1332", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 150 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "a62f7495d0848b2150e3c6e7f6bd9167c5ceebd366e7c2fe18ac56ff724ec37e", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 151 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "69122646bda3a4f4e8f8b3fbd3ca3be27310ea85645fdbb100d21f06e0de19d4", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 152 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "2ada52f3187626128267391cf5c89aafa22ce454b5f4627e312fcc2a80a7007e", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 153 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "23b5b2ebf2448c5629dedd3aa6dc6aed5aa0626525174ea92c3d3e1d50cc57dc", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 154 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "2f75a4d56e681292a9e901bccc6e74d9949a727869d6a989f71a0f3d56ab35db", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 155 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "ac320bbce9a23a9eefb6b96b2f7341a1923c996d95c06db3da492b61750007fe", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 156 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "0910bb2736c171acfaed7a916eb446fb9f8024ed5e2b45f7e5430450b660f9a0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 157 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "42e5fd84e9e800af97e98fd1266dccc09ac107eb06ceddc5a106aa5795f4b8b7", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 158 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "a38e0a7821a8be35dbceb547b44310d935b4912fc07bea1d3acc0f97da1760fb", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 159 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "9ed4400cc7c50cdf98ae4150621e5da0b00e2cfcea9bb4bd4b53a0c4864296d3", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 160 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3fc590ae80dcb8d0044f2562be2b1eeefc7fa41d718a9b11f678559d509a8771", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 161 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "0b185b098400d6bc21ae1325ce55d55875456f196b4906f281519764e72bb478", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 162 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "8f56d97e2e4597d1625a60ff5128bc651b7ce0e3011230eb0940058582cbcb94", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 163 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1a9fd4a3e05d4c0bb3120fd6566c0a872ba0545f572bf91a5491a94a961f2c20", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 164 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1dee24997db1fb7d28ad4946c6e8277d431df9cef8d3b7c9afc6486382060157", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 165 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5c0a9fef39fed6dab42eb079f4cd7a0fae9c4ec511b0c95683913c6e5ad3384e", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 166 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1401d5579a239edc79d90224ffc9f8613a8d0a6403241b14d95145b91d2ef5f4", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 167 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "33e461946ab5b529e6fd259b78cd64900642f6984f8c0da5264b3feed82b7bcd", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 168 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "bf8d2d00c1d2da84addb4eae10be7bf6ca530f93c035ee9f206ffda1df235c32", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 169 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5ca743dca9e5be23323fca0ad91905231150b78915036af80a2d7f2198e21060", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 170 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "cab103f59270e579e9307d2d405e449aa3774f112b49d360560892c352da0148", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 171 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "208ff7ec648f5159fbd61f27aacc8aa1fd3d03f3d618bd10b580d61183b93ef5", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 172 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "7d2f25c3bccbf3216569f6a8441a9cc0d0ef2ff069f8e526f36ebc8ebe380348", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 173 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "f44f2077c211c8f69eb4714b8e00d3b396629190e5a7436088e4bb0349730c70", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 174 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d360116e4c02a8aa8d375eb34b7ee64eb9b85ec8ddeee9a7fcf573fffdd785d1", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 175 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d8f8dc115b050e87e25635c7b98a1804c8b3345ae22fdb752e4ea42310818b13", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 176 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "b07bde6399b997d3fd0141c78cdf1784af65297b380e9dc183c5921a568006c6", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 177 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "cc341721edb67da414ca76b38aed117d359377fa9f0ce907a332c7f6fcb23302", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 178 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "428006caa988f2658a958cf893f2c4d8af70eaaea06f910edcb8c327643ce5ed", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 179 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "0fff3746e5d4c07da546d2b1071debfe2063f50f61f6808a784e327b56c8d070", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 180 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "6602462c6486b0268899403af0d8519df418074cb2f6c0233601ea033e19e0f5", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 181 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5ae43975821a6f0ff2b800b7b87d0bcc6985c2714d25e3ce567d2d8548121a85", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 182 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3f627a8b5c67f7b2741aaa9c004c99b0426720ea403cedfbb2ced065f4983660", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 183 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "114a504aa911c366381e486d078ef74f95922b18a4111f432e85138120b52b43", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 184 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "a0d870e47760be8e8def067e363a3679188cd8ad72a59ce9dce9bf48535c18c5", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 185 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "88537a719513b967533cbecf06b41ba0ac490a31b2ec6be0a1b90c910d9de6bc", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 186 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "5d3dfac8713f442e3807453299fc0f12cbd63b8c5ce355c195ba0e0ae1b9321f", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 187 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "7db5c857be96dbbab3e1ffa8f8c6794352a65d553ab08e55aa67a40700e7ec18", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 188 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "092970014dc09de9d345c083d1b501e725d4e7bf6bb159b1d5cc1fad44e118e0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 189 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "be9264d65ba0c6e76b08999c1d2dd604e8dec08873f4e4261e79782a2f1d1fb5", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 190 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "d4d888626fbb77d3f5bf42b52fd1bb6e86c17b321a1843bb1df3ad348ff97246", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 191 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "20efc31d723ea58356a3febe1050fb641d42342a4312fec07682f4b268a89a73", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 192 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "9a2bbde294ae39b7730a120e9a00ff0c382cdf3662c6f3f41bd40e464e8c8404", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 193 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "1ce1651fb62294813dd1f4a4f4b0cd17a1cf6ade23506978b40c258f9c4f636e", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 194 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "59a6332f0b08a33d0a0cb03286ba0bbc01521cb520f48afafa5b1553a4c5f912", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 195 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "e6f47fa7733d725a26b3c3108ce9c2b5ed2ff5f210734e2109feec229adadbc0", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 196 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "9128ebef94c863688c1ecea4a9554913c8786ffbb1c7e13240b07c695ac79322", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 197 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "3bbec50c04f95f2437b835a98eaece17cb97f8c8194aaa8f4dbcbc598209ab38", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 198 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "984030374dbf47d12426fa569dafb20786472f63a4e648f43651c6abaefe2e21", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625125000, + "height": 199 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "93f9f3c0283692bea14116ae2ad824eb2aeece5bd320eafe8dfc4c570716afc7", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 625095000, + "height": 200 + }, + { + "address": "tmBsTi2xWTjUdEXnuTceL7fecEQKeWaPDJd", + "txid": "175fa286e2daa34ec6541e8b2628c169269631e90a0b8d849cebae7c83ef91b2", + "outputIndex": 0, + "script": "76a91417a95184b6b158dcb8d576fc182d9b7f14377d0488ac", + "satoshis": 200000, + "height": 200 + } + ], + 62510790000 +] \ No newline at end of file diff --git a/zaino-state/src/chain_index/tests/vectors/recipient_data.json b/zaino-state/src/chain_index/tests/vectors/recipient_data.json new file mode 100644 index 000000000..56a07db65 --- /dev/null +++ b/zaino-state/src/chain_index/tests/vectors/recipient_data.json @@ -0,0 +1,208 @@ +[ + [ + "baa5078cb901ad86ec45ccdbdc7430928dd40499b44f2ea79a87338fe752758a", + "287ed62eb559789ed8ef5f31c52387c291db780aaba7d1421c2ef7ef46c235fe", + "89f227b0b1ba668674d200f41923b8438cb47f1a1a33cd0f2024ed0ea6d6a351", + "7ffadff6b74b6e543aad982ccc58f379f1d12f9252cd36dd34c61d034c4df091", + "0230ab53a9a5f44241b109d07462657c3afc186645cffae5a38dcecf364759a0", + "02e1703b3c885729968972c1e4ee359f43ba20e6e4d29ccb0a5396a66378b388", + "87617186f2015de3eb83b9fdf0c4150ecf07ca1d05df67f866ee8d068def906e", + "1faf0c3ff547b0a035ec97864db5ee9d40b72fa057647f0fdc8acbcbe608dc5b", + "1d12181718d15ee6d053da8d4ecd26c0e151389493418cfab5c47b274b035e2c", + "64fe37d26f9c4012f6148cc1a321ddaddd19c4f3c95b166363fc44ef45062302", + "fd7ae37ac436c0288ca570e6b1a0533ed3a85e745aca54955d688be696b5ef1d", + "892700bdf747f743fa7d0f563d6d87fe1ee34e6a3e0469995bbddfd15467f432", + "1e7315c331b68709370fed053f625bc45da1a3b3e52190af9e7097afbae5303b", + "490452f9bba38c5dd78420f82ed753b6707a65ec1761def3e2d9d060023f55c6", + "f331dc04d6b4fcca4d6cae89228ad716e462377ac5873abe7c9bcbb31d80c5d0", + "9fbe2008e31ab125e25352b5bb2058f7e1d725a28301c0f2e2eb071be8101294", + "429e596a59351ee00d3a63c80b963d3be8f254fac31b8f0561f2e8ea80b64aa3", + "af44ff9674fc22994952a42fa7eeaa788574503483035c192f736eaa944d12c7", + "e2e53ee2e40b7659a24e552f326e4922ddb96e6000f8e8add98aa9681d9efe79", + "9b1188c4330fb0060d248fc517543468d6e3739aa774800f99b359482d27e4f4", + "c204b841e02539e04e8ec3462f99ad7f22a91f1caa14427f33c2c4099efdcec7", + "1cab750becc21dcb8f65d2bdd57127b7533707ca5d8ccd1626d1da70b208e6c2", + "0cef5898150e90271973901dd171ba5d4946be4f2e193c08debe57d403c74c51", + "826a13510e6e4fe141d91fcc449db8a90dfcc832dbb4f8397d9af1cc2f703c19", + "ae5dffc9ad2613e7118c4df4ac1720a1d5caeb0f302c14fe82a01a83b6c35aff", + "e33759b0132a2188fd8c338addecba0d02c48373e02dce1a525c35a287a686a9", + "801a7ca69c01d5b7144307750d071c6ef246604cd4544e0d64e13f45bfd14014", + "fef050fdc5f0ac92f885c8b5e4e2fbb984a85363013c841bbd23bd32899c13de", + "9f56f0c0eb09fb0543cbc17336094f10950d3e28e44c845c62ed313c996230a4", + "23c84bdc6a26388132ca04cff1f2f5b605a45af01775cddd63cebe2b5b7d7edd", + "ea5907ee0d6c6f78dbdf16f5af74089a51a26beb3d25949feec3aa7c733ce0d9", + "649bd25dc3530d206f1db56ef2709f4cd7f427a326135d5e09c433b6a30e7d5e", + "115b72b19e496da04e49c35311656153136822e22ce08ab69689207dc23668a8", + "14e64606eff175f5fea414210c4f791ecf2da6548a66ab0f7900dff1df1df460", + "e7f195e5360e09074f8d5e8cba1786b6bacc6cbf34d1623ec73f22515794b149", + "6c03169ee7fb2d96669d51437d7fdadd5b049fe3864e71b53382bc6b8daef96b", + "fd73c3fe9ca2c19ce484f25a7809136d56d01e66d70f9f68824f78ce539c05ab", + "755679352cd7fe2fe6abd3a67faf05ed371c9032b82e8cdbaa50e8fd24a46db1", + "cef6a8994812287e8def0f147da6b2f3f8d7282ae78e170a2c2c08d6e916c16d", + "1d3e56c3d1ffd270d3611e1de1baec4c49ff5ec35e18bd8d579a0c3f7b991393", + "0af3c5b96176618e911887736000cfc9f2e32ff173edcc533a67bb62216f45c5", + "be94e2ed6858b5a60f3b5ccbae9d0e8b7de34e6e6298ab54169c7845f3d917f1", + "dd42e48df9e01a6d2dc6578d6428cb57bd67571aa57c9c26c2efc67b434ca0db", + "36950bb09a68b9aa132d2b1ee9df7bb54fb914311b2f208845cbc7aa8b43c055", + "135db2759ce033ccfbc7b6f671671f92def0f33db91cb77df5b9a0dc730f4336", + "6c8d57821cee7f14c186305042d182cc1af535f1276f83d072564469c11fc96d", + "3cfabf94db46464b72b6efb3ecae7e99844084de3a49a23b9e919eb19d7c7bc7", + "f0be959ed1bdce4c7c2a5d584142624e557631d96e3105d596cbef17482af06e", + "8f6286d9b85688dc2349818a9eaeca0596be02f75117212ecc68aeb06328c2c7", + "8fcbee837176d3d932afd4a5b58f230ddfdcc3915f362abbf21922a74d3dfce8", + "74bda8ddb833ef3b38b4d3369be3752d981324476b35732f770788ff54eaeb53", + "68064111e152e0a832e6f7876670503e329640e49ba1505d3a821e236ccb12f8", + "63d7a2067fd352d187007727aafddfedd59a924b223b4c7303e22109e952baa4", + "092008493ebf851404a84cd31bd961a083b26b38d0c8c03ce9b543ee1bd2df57", + "6ceea3830f283a529455928d22debbd9b23f00bc21432d2a127d89db1715314d", + "3b289f5e107fd78664390fb45709d88327ce54e5fb094a157806ea21e04c7513", + "e928850785300c2db2f311e07c2af2c237106fe1202db0000175e6c6486cd1cd", + "d186278ea5b89de81be99bb8d0b4291b0050eb155e27289f807490a87cfeabe7", + "c07210f70e2c1e0d00ac121671313aced75e3dc39bfb65ec55c39daf6463dc2a", + "b22363ca89dcbbbb3928cdc885e2351d276fa5401dbaacd09cd376f9b72b6849", + "38b043e90d648c5cbb01846784634f8c8526dd233351ef8a2c8edb5c1d7ad92b", + "be103d50c4dcb75ffa7bb545da3b0d6eea8dda4a971093f460e1eb8011b5a0c4", + "090b81b391add90593208e7bfea2fd7f6ca4acbdb6753d6715dbb9acf78dbf89", + "e9beb463bb2ab4fe5bbc3ba2c5b8fbde5954e8022bfb74248cf2f7c42771d3ef", + "32a3298675592c4f765baf43c94f96a5a4ea330b9b8f6f44aa46a0406080c891", + "cbfe35073e4531d000c8ff0b091de58141a7d37ed2c3a7dc56585b2fb71ee5f6", + "4f8a0dfab0ff3cb8468863458754b014542ea0a51f1485a36f1b415d9a5846f9", + "a7e541efea88424fb40eab9d6ece3c45392f9ef6fcd35c738b837eaf6872c3b9", + "24aca1d451a9459fe1c427068c7f6f584c3d4f839c4343cabb0ce5a7df7bf151", + "8a8fd68af9fe17b370397d0712b3978b7385e96fb1bd17bc9a9115552a026b55", + "3076c4a84c69c1411fc9d174ec15029a2ae5ee866137aeec5303ebb4495a3dcb", + "8f95a0c876bc077901de48b72ce60441515a1019c127f26e4b96ecdeb49da27e", + "ecd129648fe8e624e4f401e88327b642f038ef3c21058888f794a672449a5a79", + "7e84d25e55a169c9f12799ceb2d577e73bc05180f7f2dbd7bfde55bb6685624e", + "9c8c9445f412e0189be56f432678bd19a15185d6dd90fc9dd9edb8fc5220a7cd", + "f1f106076971704f0272d19bf8d67771954dbb42146da5bb33cb7ea64d83e72b", + "42834b5feeefb23fff5b596b5f4507ebb02ae09109c98dfe5fd18068e47446f5", + "75b8cfc81997b91f2fc3679b5244bd9e24a02f7ee10a63e2a820174a23fd55ae", + "50be1dad1bdfe0d4a0a8e43036195b1ebc8f26ff5edc8778886eb5c25264fd62", + "16b6b00ef22a156524dae7e1562b67824cac493973f444a2d369d585b8e21db6", + "c8c10949bf44df7e6dc70febbc947be906b7fab2815daeb87cb282c56c857665", + "5e1b824cf301b7da59017862cf1ee106c62946f8c77d21039fb11f6a932f66e3", + "2465a7ce25d0d0b5b956737d73995e5315b2c3b728d5cc0e0d9e9b4bcceb67f8", + "ca8d92f28cc1cea30155c80f044f9df4f5a60b81d65d5978ba7ee5847b43c2de", + "fb7cab38953cf73d014a2e8970c0c944742273146d7d3c562c710b7514dc9da7", + "2b4653c0087604317953dc1b8f79f83c23b7338cf23ea98896ac65e41268a727", + "6803252e7aa9befeb84de46ec3d52360cc70b215592dd267cd3e1bdfbd80b023", + "99aca78756ffa763bd68b5666377acc33e8b60182e3dff501c29ea18c636c2f5", + "2b2cd864dbf1c55846166f32f0047a048c5f686a8a00c522953f28f957cf2870", + "8f2977002de9064480f2154e65c696ed136f076d16d0d80b3d9547e3d218b222", + "d672b0cc92c8a0de059b2a4091bcf47e736d6b7651df31daca37fb702ea59748", + "728315618c9311c1e31eecba49e444eb1e1f73b02b5f2307bfa6a356cc53d355", + "ee52d20b5166dea32f0c18fd62d749d3eb0469e874e7ea23ac395ad7f17e915f", + "9158a3cc7f3b410b9a34b44da33c35e2e9541bd406a01f35ff068e30e4f0dd00", + "a5cda871cd617cd436739610fc9a6a2be73e631d60f2c8e2ad980c3ac207a9d2", + "cfeef00f02e5fbc8ab68270129387ac488e6a9f047f178dacde63c1218511a31", + "0326aac066e7409e8b5f5ce2206671ef82bb6f9849f0387deb4fcbbebaf4031e", + "9db1ff33616464ca1c1964703bf3d5582320b1531421518e081c1ba7f6204eb4", + "526c4addd9f53581504a76d2a50a6ab92bd069fff220be439360f9f8b50c3600", + "ab83be06998ac0581397698c9b09ffa791c269974c120bd8d9f94ab0debb445f", + "dd2bbb2a2250df2c96569cbbd851017091a233cf5bc068e522d0b4a6f1cb01df", + "79363d3702425166601e7fde3178290c7316d5010b0468e0aa9dce57527ac01d", + "29bb40b4d953d0819d8b3eaa172101fa125a8fa969d974a14fda14359c11e729", + "957e56ac3a5d1c92bcc818c8ea4d739656c7ee769ec764842df0b3aa15b3fd02", + "64b2aa98fc418dba0a8afd29455f968babf5b278c03aedd602904ab9ab682a13", + "95fcd1b1f961a96a43e975e40f772d830f311ecc961f77e67ed744a720ae6075", + "4308d0ea8db708cd338352aca85d822727362b9a80f18811ad429275aefb678d", + "dc8a565fce45752ae164cb70d0fd3a22d6d3e71a0e9d4d6b47e47181b83c3aae", + "184c272dbd0bcc014ee0c6ca97e0236537b1a731e7baf0f41e0381d68cc403d9", + "1a9d3fa9497953e2eded1e58e0fc406db1b219450b2ab3a03babc1407d50af86", + "954b141bc57bd08ec87627403446a001255c06ac2da9204bff3e0275637ca06e", + "1d50a9f8b4463648f3d860dfe30befe215c519f2a88ea06a2db68b10550f6657", + "cc44946d3223700304ad2ac88a3fda07f4a9b5cfb05bd6ae82b0cda0bc68332b", + "defdc0bd777badbda8a6811b43822cf33bd19e8bff93a85999d963be42beabda", + "bda02b8b94374a58d88bab0b39c6ab55a513bab3b3ba6eaf367aacd73facdf2c", + "6a9f2bf2dc10e814a505405baaa9b209d6334bab5d0183c9a7d83893ec96f84d", + "85f23473ded81ef4b4543613f463d2ae5f1d562ad4afea6615e5cc7274806c2c", + "d56233019bab63ca08b4da53130e6cf29e7cb244501d2cb6ebb4beee7dfedfbf", + "9d0cf47b8342c2e782cf0857b8a9cf4c0e3962080ec5c960bfe93e1ca24e669d", + "15226a777d10f470bdebbadfdc445503f7c0d533f75831ec41b9648d7c3e9454", + "38f18d7cc3beebee5ca0e4d63ed0b692832e5b33290e2d67c3c77bf81d12b821", + "5a3f49c0edfbf7526d08569acfa75152dba25574e7bf2eb38a83c11d6c77b38e", + "14f970e7caab5b842feab54715a1c15930d9c0034f0f3ea6f5a0962ec2de3841", + "0640e5922c48577e82f2cf282dbf28621d8c98d2b6a9b954f8afc36639b1630e", + "ad39f8b222db05d4f04a198c97bdfbd226bad3e623a6aa0863841d280cb1749f", + "229896d06327dafd3a7933178e12782043e22de3899a39af828b013d381c3889", + "5828458c45d60b3a11b4cabf0e262d28c5682e7d68c3d2ad62d6d628e2986f84", + "b0f73f9f7b163d66c779d79fef45b48583d172600b0efc1277d5c22356ffc5f4", + "a423bdbd6e642c65ce7467331d2bfc204efce5a0a39a47f19e0d80f3812f24fc", + "0193fe8ed5fcfdb0986d74dada1f614c697be3fe328d63a76353c3d1f85a1f41", + "7e26e634ecb4e75d307eaa3bfb967eaeaa29f7080966e35787fd9b4000c74237", + "81b7d031b51420d9c1133fdb4b5dc6d3ce970d7abb3c9ab4537e5f60908002dd", + "6fba4bd6cb17030847e9126f6f24a2946be7bdcc7ef38fe820d4928178a5e11e", + "5f60e545224dacedcb254a9455ab2c42b96f67d4a120c8bda8cea59b61a126fb", + "098a6fca8963de176bab39b5df6e0dab47f5dcd55a2ab636d231d5463b047c92", + "62af4319c12e0842d7c5c8e1cf256640bb732b9b1197d63372a51121f95be8b7", + "452a3117e73aef45dda5bc77af50ef7af577d3ebeb30f599f3937a0ca75fd3b9", + "76c890c384c9673ca33529ff12f9a290140d96b8e9b9c4ce0464a2782ce8d792", + "4de6e590407eec9518485635d759db78eab493f9aa4f358dcb14312dd6b09aeb", + "e58cdc78c20c393f6d56f3db75906870579dd8f4699e15c5788b1d65ea7f6330", + "fbedea0845316d65aa0fd2b15c82d1fa4e8c14338568682651436268af6bf271", + "b6efbe1aba97c1cb4992a80a68c44c448a311fe7500edfc9d658bebebbf73eab", + "6cbddaf2054d5abb661eb2a457ba5211c874e2d79978cb5f6ddba6dd5c805b40", + "5d5cf561c6ec1390555e1f206d3ae1ce3078ee3319ef7e004db1967498a5d05d", + "417a7cdc1fd0db6d27563767e0a88dff0f541cfe9e263623ad244032aa84642f", + "f1a5e4412b9e82e934d41880c5b65cd39376eff5b0d0d9376726cd4dbebf44b0", + "6e914866330a0b9fb24c452de8fd2a94a5edee31a374a5f159fb959c7b96856a", + "1cf5dba506c316a5f73a0b4af3e9f57a66d705c5b03f75d547a8b694ef0e08dd", + "aec00b0138d91e262fa40d661a9bcdca40fd84bdb2b320f0ce9b37eb7e85a256", + "7ec4f9bbb6803adbd3585930c0a7436def2d91011dc8f4cdb4ba0e5d00db959d", + "9937db6fb2363cbf0e144e399447394ec87e304abda68b998de0bd4b0f418682", + "23289241a0d7eb31a0db21d91cd7feb9876645f5b6a46b54fca3c248b146f510", + "3754eb5d2b07615bb618f07210214831c5d293ef58ecc668457ec25e5d237825", + "e01734457910756bfd59702cab370ae80be7bb2d7fe2ce37553f67c8d1319f79", + "84a528d37c76ca8575122c4b0582bf6cae5789d3e5f966210905fd3ffc35cd61", + "e94c5eab5b49b7c32753d1c10d2389cd6b80320f55d6c20bff445cce67c82dc0", + "dd8199589e614cc918880263bd1d1aaae1a57ce26f3ea75726e004d494ab27e8", + "b768411ee024c81ae5c3f293e7b4de073347b97741faad1676a66ee65c0d0a31", + "53a6896246def01f67c72fd9b8051fdd6695db2ad06c9e6462c4f65532539b97", + "c13aa7fbc172e3d652b0cb04f91c737d843c58ac354ee80f399f19a924ab54c0", + "06d5f94dba30fefa2250fdc5ff381ce4e3be8210d79108ffcb7cd16d9cc21cbd", + "10edd9219805c08061abd5dc32e6f518a7a1ef42d9ca2c0e09e9b1bfeaa4c047", + "5c51f0ccbb3e425ef02d2df2c59420f0f471f60823155e4712f618866df41830", + "1c44db60f6448a903386d99fdfe5b5d06231524e9188ab94a54faa72f995f087", + "ab79d90f2e60463edfe54b0a77e18cd7f5bda814f83c6069282f3e4ab9546615", + "e35cff3debb72863d2972614a4552e7a473e12e31bb75c5fb83a42479928ed2f", + "71f9e8c9f49ac6ddc87c463325953a8daf001d91ee98c5fd8e98d9b346b8a923", + "6cee151c5061ca271a434020c76fcfe91443ef742eedd73d93500bd3c27fd4d4", + "12608fe7d16d3ee9825180e8d853e733d3fdce16d3ec859183562284b863f350", + "3ca99ce8349e7611182ef894c033c509295e675bebb09af4812ef209d7e4af68", + "933db62967d0368560dd0b77ea4fe075a3bfd28921ce96e335fbbc227e101e0f", + "3daf30ef2e8478161bd1f211ddb2efd2730c69cd6cd86778fd1d7fb3877d494f", + "c35474be844c2c8d80474290105ece9c7f2cd41755d014618b169a83f2b1a518", + "51859d01179da27263353a8835b7ac3207f8faaee6ca4209bc81a204d670dd23", + "e2357192ab221164a664937faa4a52558d0aebba463f7703a04c516001649184", + "4c563af9fd1d3bbef1cac6ea2acebb568217d09778214c13d37e7a5896c1b91c", + "904c35fc81dfeb5976c7cec6501d7ad2e972ab395fa11d964a004cbe9b1c8ffc", + "04bacc4ee6cdbc93174a8ae509f2455e181599894808f96f33e295ad16dd4b74", + "88379d71cc15c4973af55ece940b43fa1391e695145c1273485f371bef4bc0c3", + "0147d548eb16778496c2a51970db1b1e8e769caa528e3c297612f6c72f013301", + "71ffc92a278168d882c0bd26f24e84fd4e7bc1cbd9db45da736973c951e8d1be", + "792c6fd67861e0662a2d595118a04a3164d9e13951a0f945d5934bfd1bd3f0ca", + "8c74688de9ece5b21b092bb77ef2628a16b03f4e854530388be032c574e1bc78", + "70a011e3947c3214e37edeb491265de721d098d096ef75cf52e2d3b629e66004", + "bb05b032cd1d59dca4775bedbacfe5df5f684451a8291798f188c47dca6985f3", + "d2672c113772de73f91429f4bf1724393a9bcaaac795be362de0e4b2d6822c63", + "d50d77b844614381884c0000324cae8a73fdd336218872e32cf4175f6000c56d", + "572c2934bbe8c1b88abc90f24baa120db2e8e105bab458f54327fe447c8594da", + "7408fc30d0e58ab7d678d1c3ce6d3cd0cc906baf9e35c99133a1c3e0619d13bf", + "52f0cc264cdce65fd21f1f9dece9bf8217d273c37bc6c08c9e3e76d6cb209951", + "1f07b921300beaffeea8feba2cc064d8f52e683b3a05a0fc4a3d13b0535e9fa2", + "68bb3de6e7fe44b52a7ad0c478c85deaa160a02d927fc581efca9c0c48fe6bde", + "9791caf8af690ca7ad598a0c1171210bcd3d3071d32c3447cc214ab725d1f004" + ], + [ + { + "address": "tmFLszfkjgim4zoUMAXpuohnFBAKy99rr2i", + "txid": "9791caf8af690ca7ad598a0c1171210bcd3d3071d32c3447cc214ab725d1f004", + "outputIndex": 0, + "script": "76a9143dc1334a351e672794415cc30a4fba1b9303871f88ac", + "satoshis": 250000, + "height": 200 + } + ], + 250000 +] \ No newline at end of file diff --git a/zaino-state/src/chain_index/tests/vectors/tree_roots.dat b/zaino-state/src/chain_index/tests/vectors/tree_roots.dat new file mode 100644 index 000000000..f174e56e8 Binary files /dev/null and b/zaino-state/src/chain_index/tests/vectors/tree_roots.dat differ diff --git a/zaino-state/src/chain_index/tests/vectors/tree_states.dat b/zaino-state/src/chain_index/tests/vectors/tree_states.dat new file mode 100644 index 000000000..6eaab94a8 Binary files /dev/null and b/zaino-state/src/chain_index/tests/vectors/tree_states.dat differ diff --git a/zaino-state/src/chain_index/tests/vectors/v1_test_db/regtest/v1/data.mdb b/zaino-state/src/chain_index/tests/vectors/v1_test_db/regtest/v1/data.mdb new file mode 100644 index 000000000..3a6871381 Binary files /dev/null and b/zaino-state/src/chain_index/tests/vectors/v1_test_db/regtest/v1/data.mdb differ diff --git a/zaino-state/src/chain_index/tests/vectors/zcash_blocks.dat b/zaino-state/src/chain_index/tests/vectors/zcash_blocks.dat new file mode 100644 index 000000000..ddc6a6691 Binary files /dev/null and b/zaino-state/src/chain_index/tests/vectors/zcash_blocks.dat differ diff --git a/zaino-state/src/chain_index/types.rs b/zaino-state/src/chain_index/types.rs new file mode 100644 index 000000000..e2316e34a --- /dev/null +++ b/zaino-state/src/chain_index/types.rs @@ -0,0 +1,46 @@ +//! Type definitions for the chain index. +//! +//! This module provides types for blockchain indexing, organized into two main categories: +//! +//! ## Database Types +//! Types that implement `ZainoVersionedSerde` for database persistence. +//! These types follow strict versioning rules and require migrations for any changes. +//! +//! Currently organized in `db/legacy.rs` (pending refactoring into focused modules): +//! - Block types: BlockHash, BlockIndex, BlockData, IndexedBlock, etc. +//! - Transaction types: TransactionHash, CompactTxData, TransparentCompactTx, etc. +//! - Address types: AddrScript, Outpoint, AddrHistRecord, etc. +//! - Shielded types: SaplingCompactTx, OrchardCompactTx, etc. +//! - Primitives: Height, ChainWork, ShardIndex, etc. +//! +//! ## Helper Types +//! Non-database types for in-memory operations and conversions: +//! - BestChainLocation, NonBestChainLocation - Transaction location tracking +//! - TreeRootData - Commitment tree roots wrapper +//! - BlockMetadata, BlockWithMetadata - Block construction helpers +//! +//! ## Module Organization Rules +//! +//! **Database Types (`db` module):** +//! 1. Must implement `ZainoVersionedSerde` +//! 2. Never use external types as fields directly - store fundamental data +//! 3. Never change without implementing a new version and database migration +//! 4. Follow stringent versioning rules for backward compatibility +//! +//! **Helper Types (`helpers` module):** +//! 1. Do NOT implement `ZainoVersionedSerde` +//! 2. Used for in-memory operations, conversions, and coordination +//! 3. Can be changed more freely as they're not persisted + +pub mod db; +pub mod helpers; +pub mod primitives; + +// Re-export database types for backward compatibility +pub use db::legacy::*; +pub use db::{CommitmentTreeData, CommitmentTreeRoots, CommitmentTreeSizes}; + +// Re-export helper types +pub use helpers::{ + BestChainLocation, BlockMetadata, BlockWithMetadata, NonBestChainLocation, TreeRootData, +}; diff --git a/zaino-state/src/chain_index/types/db.rs b/zaino-state/src/chain_index/types/db.rs new file mode 100644 index 000000000..a9cd09488 --- /dev/null +++ b/zaino-state/src/chain_index/types/db.rs @@ -0,0 +1,31 @@ +//! Database-serializable types for the chain index. +//! +//! This module contains all types that implement `ZainoVersionedSerde` and are used +//! for database persistence. These types follow strict versioning rules to maintain +//! backward compatibility across database schema changes. +//! +//! ## Rules for Types in This Module +//! +//! 1. **Never use external types as fields directly** +//! - Store fundamental data in the struct +//! - Implement `From`/`Into` or getters/setters for external type conversions +//! +//! 2. **Must implement ZainoVersionedSerde** +//! - Follow stringent versioning rules outlined in the trait +//! - Ensure backward compatibility +//! +//! 3. **Never change structs without proper migration** +//! - Implement a new version when changes are needed +//! - Update ZainoDB and implement necessary migrations + +pub mod address; +pub mod block; +pub mod commitment; +pub mod legacy; +pub mod metadata; +pub mod primitives; +pub mod shielded; +pub mod transaction; + +pub use commitment::{CommitmentTreeData, CommitmentTreeRoots, CommitmentTreeSizes}; +pub use legacy::*; diff --git a/zaino-state/src/chain_index/types/db/address.rs b/zaino-state/src/chain_index/types/db/address.rs new file mode 100644 index 000000000..f9d0212c9 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/address.rs @@ -0,0 +1,7 @@ +//! Address-related database-serializable types. +//! +//! Contains types for address and UTXO data that implement `ZainoVersionedSerde`: +//! - AddrScript +//! - Outpoint +//! - AddrHistRecord +//! - AddrEventBytes diff --git a/zaino-state/src/chain_index/types/db/block.rs b/zaino-state/src/chain_index/types/db/block.rs new file mode 100644 index 000000000..1a4be05a3 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/block.rs @@ -0,0 +1,10 @@ +//! Block-related database-serializable types. +//! +//! Contains types for block data that implement `ZainoVersionedSerde`: +//! - BlockHash +//! - BlockIndex +//! - BlockData +//! - BlockHeaderData +//! - IndexedBlock +//! - EquihashSolution +//! - ChainWork diff --git a/zaino-state/src/chain_index/types/db/commitment.rs b/zaino-state/src/chain_index/types/db/commitment.rs new file mode 100644 index 000000000..9bb0e9730 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/commitment.rs @@ -0,0 +1,176 @@ +//! Commitment tree types and utilities. +//! +//! This module contains types for managing Zcash commitment tree state, including +//! Merkle tree roots for Sapling and Orchard pools and combined tree metadata structures. +//! +//! Commitment trees track the existence of shielded notes in the Sapling and Orchard +//! shielded pools, enabling efficient zero-knowledge proofs and wallet synchronization. + +use core2::io::{self, Read, Write}; + +use crate::chain_index::encoding::{ + read_fixed_le, read_u32_le, version, write_fixed_le, write_u32_le, FixedEncodedLen, + ZainoVersionedSerde, +}; + +/// Holds commitment tree metadata (roots and sizes) for a block. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CommitmentTreeData { + roots: CommitmentTreeRoots, + sizes: CommitmentTreeSizes, +} + +impl CommitmentTreeData { + /// Returns a new CommitmentTreeData instance. + pub fn new(roots: CommitmentTreeRoots, sizes: CommitmentTreeSizes) -> Self { + Self { roots, sizes } + } + + /// Returns the commitment tree roots for the block. + pub fn roots(&self) -> &CommitmentTreeRoots { + &self.roots + } + + /// Returns the commitment tree sizes for the block. + pub fn sizes(&self) -> &CommitmentTreeSizes { + &self.sizes + } +} + +impl ZainoVersionedSerde for CommitmentTreeData { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + self.roots.serialize(&mut w)?; // carries its own tag + self.sizes.serialize(&mut w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let roots = CommitmentTreeRoots::deserialize(&mut r)?; + let sizes = CommitmentTreeSizes::deserialize(&mut r)?; + Ok(CommitmentTreeData::new(roots, sizes)) + } +} + +/// CommitmentTreeData: 74 bytes total +impl FixedEncodedLen for CommitmentTreeData { + // 1 byte tag + 64 body for roots + // + 1 byte tag + 8 body for sizes + const ENCODED_LEN: usize = + (CommitmentTreeRoots::ENCODED_LEN + 1) + (CommitmentTreeSizes::ENCODED_LEN + 1); +} + +/// Commitment tree roots for shielded transactions, enabling shielded wallet synchronization. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CommitmentTreeRoots { + /// Sapling note-commitment tree root (anchor) at this block. + sapling: [u8; 32], + /// Orchard note-commitment tree root at this block. + orchard: [u8; 32], +} + +impl CommitmentTreeRoots { + /// Reutns a new CommitmentTreeRoots instance. + pub fn new(sapling: [u8; 32], orchard: [u8; 32]) -> Self { + Self { sapling, orchard } + } + + /// Returns sapling commitment tree root. + pub fn sapling(&self) -> &[u8; 32] { + &self.sapling + } + + /// returns orchard commitment tree root. + pub fn orchard(&self) -> &[u8; 32] { + &self.orchard + } +} + +impl ZainoVersionedSerde for CommitmentTreeRoots { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.sapling)?; + write_fixed_le::<32, _>(&mut w, &self.orchard) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let sapling = read_fixed_le::<32, _>(&mut r)?; + let orchard = read_fixed_le::<32, _>(&mut r)?; + Ok(CommitmentTreeRoots::new(sapling, orchard)) + } +} + +/// CommitmentTreeRoots: 64 bytes total +impl FixedEncodedLen for CommitmentTreeRoots { + /// 32 byte hash + 32 byte hash. + const ENCODED_LEN: usize = 32 + 32; +} + +/// Sizes of commitment trees, indicating total number of shielded notes created. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CommitmentTreeSizes { + /// Total notes in Sapling commitment tree. + sapling: u32, + /// Total notes in Orchard commitment tree. + orchard: u32, +} + +impl CommitmentTreeSizes { + /// Creates a new CompactSaplingSizes instance. + pub fn new(sapling: u32, orchard: u32) -> Self { + Self { sapling, orchard } + } + + /// Returns sapling commitment tree size + pub fn sapling(&self) -> u32 { + self.sapling + } + + /// Returns orchard commitment tree size + pub fn orchard(&self) -> u32 { + self.orchard + } +} + +impl ZainoVersionedSerde for CommitmentTreeSizes { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_u32_le(&mut w, self.sapling)?; + write_u32_le(&mut w, self.orchard) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let sapling = read_u32_le(&mut r)?; + let orchard = read_u32_le(&mut r)?; + Ok(CommitmentTreeSizes::new(sapling, orchard)) + } +} + +/// CommitmentTreeSizes: 8 bytes total +impl FixedEncodedLen for CommitmentTreeSizes { + /// 4 byte LE int32 + 4 byte LE int32 + const ENCODED_LEN: usize = 4 + 4; +} diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs new file mode 100644 index 000000000..a46d859e6 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -0,0 +1,3050 @@ +//! Type definitions for the chain index. +//! +//! MODULE RULES: These rules must **always** be followed with no exeptions. +//! - structs in this module must never use external types as fields directly, +//! instead fundamental data should be saved into the struct, and from / into +//! (or appropriate getters / setters) should be implemented. +//! +//! - structs in this module must implement ZainoVersionedSerialize and abide by +//! the stringent version rules outlined in that trait. +//! +//! - structs in this module must never be changed without implementing a new version +//! and implementing the necessary ZainoDB updates and migrations. +//! +//! This module is currently in transition from a large monolithic file to well-organized +//! submodules. The organized types have been moved to focused modules: +//! +//! ## Organized Modules +//! - [`super::primitives`] - Foundational types (hashes, heights, tree sizes, etc.) +//! - [`super::commitment`] - Commitment tree data structures and utilities +//! +//! ## Planned Module Organization +//! The remaining types in this file will be migrated to: +//! - `block.rs` - Block-related structures (BlockIndex, BlockData, IndexedBlock) +//! - `transaction.rs` - Transaction types (CompactTxData, TransparentCompactTx, etc.) +//! - `address.rs` - Address and UTXO types (AddrScript, Outpoint, etc.) +//! - `shielded.rs` - Shielded pool types (SaplingCompactTx, OrchardCompactTx, etc.) + +// ============================================================================= +// IMPORTS +// ============================================================================= + +use core2::io::{self, Read, Write}; +use hex::{FromHex, ToHex}; +use primitive_types::U256; +use std::{fmt, io::Cursor}; +use zebra_chain::serialization::BytesInDisplayOrder as _; + +use crate::chain_index::encoding::{ + read_fixed_le, read_i64_le, read_option, read_u16_be, read_u32_be, read_u32_le, read_u64_le, + read_vec, version, write_fixed_le, write_i64_le, write_option, write_u16_be, write_u32_be, + write_u32_le, write_u64_le, write_vec, FixedEncodedLen, ZainoVersionedSerde, +}; + +use super::commitment::{CommitmentTreeData, CommitmentTreeRoots, CommitmentTreeSizes}; + +// ============================================================================= +// LEGACY TYPES AWAITING MIGRATION +// ============================================================================= +// The types below should be extracted to their appropriate modules. +// Each section should be migrated as a complete unit to maintain clean git history. + +/// Block hash (SHA256d hash of the block header). +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct BlockHash(pub [u8; 32]); + +impl BlockHash { + /// Return the hash bytes in big-endian byte-order suitable for printing out byte by byte. + pub fn bytes_in_display_order(&self) -> [u8; 32] { + let mut reversed_bytes = self.0; + reversed_bytes.reverse(); + reversed_bytes + } + + /// Convert bytes in big-endian byte-order into a [`self::BlockHash`]. + pub fn from_bytes_in_display_order(bytes_in_display_order: &[u8; 32]) -> BlockHash { + let mut internal_byte_order = *bytes_in_display_order; + internal_byte_order.reverse(); + + BlockHash(internal_byte_order) + } +} + +impl fmt::Display for BlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.encode_hex::()) + } +} + +impl fmt::Debug for BlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "BlockHash({})", self.encode_hex::()) + } +} + +impl ToHex for &BlockHash { + fn encode_hex>(&self) -> T { + self.bytes_in_display_order().encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.bytes_in_display_order().encode_hex_upper() + } +} + +impl ToHex for BlockHash { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} + +impl FromHex for BlockHash { + type Error = <[u8; 32] as FromHex>::Error; + + fn from_hex>(hex: T) -> Result { + let hash = <[u8; 32]>::from_hex(hex)?; + + Ok(Self::from_bytes_in_display_order(&hash)) + } +} + +impl From<[u8; 32]> for BlockHash { + fn from(bytes: [u8; 32]) -> Self { + BlockHash(bytes) + } +} + +impl From for [u8; 32] { + fn from(hash: BlockHash) -> Self { + hash.0 + } +} + +impl PartialEq for zebra_chain::block::Hash { + fn eq(&self, other: &BlockHash) -> bool { + self.0 == other.0 + } +} + +impl PartialEq for BlockHash { + fn eq(&self, other: &zebra_chain::block::Hash) -> bool { + self.0 == other.0 + } +} + +impl From for zebra_chain::block::Hash { + fn from(hash: BlockHash) -> Self { + zebra_chain::block::Hash(hash.0) + } +} + +impl From for BlockHash { + fn from(hash: zebra_chain::block::Hash) -> Self { + BlockHash(hash.0) + } +} + +impl From for zcash_primitives::block::BlockHash { + fn from(hash: BlockHash) -> Self { + zcash_primitives::block::BlockHash(hash.0) + } +} + +impl From for BlockHash { + fn from(hash: zcash_primitives::block::BlockHash) -> Self { + BlockHash(hash.0) + } +} + +impl ZainoVersionedSerde for BlockHash { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<32, _>(w, &self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let bytes = read_fixed_le::<32, _>(r)?; + Ok(BlockHash(bytes)) + } +} + +/// Hash = 32-byte body. +impl FixedEncodedLen for BlockHash { + /// 32 bytes, LE + const ENCODED_LEN: usize = 32; +} + +/// Transaction hash. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TransactionHash(pub [u8; 32]); + +impl TransactionHash { + /// Return the hash bytes in big-endian byte-order suitable for printing out byte by byte. + pub fn bytes_in_display_order(&self) -> [u8; 32] { + let mut reversed_bytes = self.0; + reversed_bytes.reverse(); + reversed_bytes + } + + /// Convert bytes in big-endian byte-order into a [`self::TransactionHash`]. + pub fn from_bytes_in_display_order(bytes_in_display_order: &[u8; 32]) -> TransactionHash { + let mut internal_byte_order = *bytes_in_display_order; + internal_byte_order.reverse(); + + TransactionHash(internal_byte_order) + } +} + +impl fmt::Display for TransactionHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.encode_hex::()) + } +} + +impl ToHex for &TransactionHash { + fn encode_hex>(&self) -> T { + self.bytes_in_display_order().encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.bytes_in_display_order().encode_hex_upper() + } +} + +impl ToHex for TransactionHash { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} + +impl FromHex for TransactionHash { + type Error = <[u8; 32] as FromHex>::Error; + + fn from_hex>(hex: T) -> Result { + let hash = <[u8; 32]>::from_hex(hex)?; + + Ok(Self::from_bytes_in_display_order(&hash)) + } +} + +impl From<[u8; 32]> for TransactionHash { + fn from(bytes: [u8; 32]) -> Self { + TransactionHash(bytes) + } +} + +impl From for [u8; 32] { + fn from(hash: TransactionHash) -> Self { + hash.0 + } +} + +impl From for zebra_chain::transaction::Hash { + fn from(hash: TransactionHash) -> Self { + zebra_chain::transaction::Hash(hash.0) + } +} + +impl From for TransactionHash { + fn from(hash: zebra_chain::transaction::Hash) -> Self { + TransactionHash(hash.0) + } +} + +impl From for zcash_primitives::transaction::TxId { + fn from(hash: TransactionHash) -> Self { + zcash_primitives::transaction::TxId::from_bytes(hash.0) + } +} + +impl From for TransactionHash { + fn from(hash: zcash_primitives::transaction::TxId) -> Self { + TransactionHash(hash.into()) + } +} + +impl ZainoVersionedSerde for TransactionHash { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<32, _>(w, &self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let bytes = read_fixed_le::<32, _>(r)?; + Ok(TransactionHash(bytes)) + } +} + +/// Hash = 32-byte body. +impl FixedEncodedLen for TransactionHash { + /// 32 bytes, LE + const ENCODED_LEN: usize = 32; +} + +/// Block height. +/// +/// NOTE: Encoded as 4-byte big-endian byte-string to ensure height ordering +/// for keys in Lexicographically sorted B-Tree. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct Height(pub(crate) u32); + +impl PartialOrd for Height { + fn partial_cmp(&self, other: &zebra_chain::block::Height) -> Option { + Some(self.0.cmp(&other.0)) + } +} + +impl PartialOrd for zebra_chain::block::Height { + fn partial_cmp(&self, other: &Height) -> Option { + Some(self.0.cmp(&other.0)) + } +} + +impl PartialEq for Height { + fn eq(&self, other: &zebra_chain::block::Height) -> bool { + self.0 == other.0 + } +} + +impl PartialEq for zebra_chain::block::Height { + fn eq(&self, other: &Height) -> bool { + self.0 == other.0 + } +} + +/// The first block +pub const GENESIS_HEIGHT: Height = Height(0); + +impl TryFrom for Height { + type Error = &'static str; + + fn try_from(height: u32) -> Result { + // Zebra enforces Height <= 2^31 - 1 + if height <= zebra_chain::block::Height::MAX.0 { + Ok(Self(height)) + } else { + Err("height must be ≤ 2^31 - 1") + } + } +} + +impl From for u32 { + fn from(h: Height) -> Self { + h.0 + } +} + +impl std::ops::Add for Height { + type Output = Self; + + fn add(self, rhs: u32) -> Self::Output { + Height(self.0 + rhs) + } +} + +impl std::ops::Sub for Height { + type Output = Self; + + fn sub(self, rhs: u32) -> Self::Output { + Height(self.0 - rhs) + } +} + +impl std::fmt::Display for Height { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::str::FromStr for Height { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let h = s.parse::().map_err(|_| "invalid u32")?; + Self::try_from(h) + } +} + +impl From for zebra_chain::block::Height { + fn from(h: Height) -> Self { + zebra_chain::block::Height(h.0) + } +} + +impl From for Height { + // Zebra checks heights to ensure they're not above + // height::MAX as we do. We should trust zebra heights + // to be valid + fn from(h: zebra_chain::block::Height) -> Self { + Height(h.0) + } +} + +impl From for zcash_protocol::consensus::BlockHeight { + fn from(h: Height) -> Self { + zcash_protocol::consensus::BlockHeight::from(h.0) + } +} + +impl TryFrom for Height { + type Error = &'static str; + + fn try_from(h: zcash_protocol::consensus::BlockHeight) -> Result { + Height::try_from(u32::from(h)) + } +} + +impl ZainoVersionedSerde for Height { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + // Height must sort lexicographically - write **big-endian** + write_u32_be(w, self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let raw = read_u32_be(r)?; + Height::try_from(raw).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + } +} + +/// Height = 4-byte big-endian body. +impl FixedEncodedLen for Height { + /// 4 bytes, BE + const ENCODED_LEN: usize = 4; +} + +/// Numerical index of subtree / shard roots. +/// +/// NOTE: Encoded as 4-byte big-endian byte-string to ensure height ordering +/// for keys in Lexicographically sorted B-Tree. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct ShardIndex(pub u32); + +impl ZainoVersionedSerde for ShardIndex { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + // Index must sort lexicographically - write **big-endian** + write_u32_be(w, self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let raw = read_u32_be(r)?; + Ok(ShardIndex(raw)) + } +} + +/// Index = 4-byte big-endian body. +impl FixedEncodedLen for ShardIndex { + /// 4 bytes (BE u32) + const ENCODED_LEN: usize = 4; +} + +/// A 20-byte hash160 *plus* a 1-byte ScriptType tag. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct AddrScript { + hash: [u8; 20], + script_type: u8, +} + +impl AddrScript { + /// Create from raw 20-byte hash + type byte. + pub fn new(hash: [u8; 20], script_type: u8) -> Self { + Self { hash, script_type } + } + + /// Borrow the 20-byte hash. + pub fn hash(&self) -> &[u8; 20] { + &self.hash + } + + /// The raw type byte (0x00 = P2PKH, 0x01 = P2SH, 0xFF = NonStandard). + pub fn script_type(&self) -> u8 { + self.script_type + } + + /// Serialize into exactly 21 bytes: [hash‖type]. + pub fn to_raw_bytes(&self) -> [u8; 21] { + let mut b = [0u8; 21]; + b[..20].copy_from_slice(&self.hash); + b[20] = self.script_type; + b + } + + /// Parse from exactly 21 raw bytes. + pub fn from_raw_bytes(b: &[u8; 21]) -> Self { + let mut hash = [0u8; 20]; + hash.copy_from_slice(&b[..20]); + let script_type = b[20]; + Self { hash, script_type } + } + + /// Try to extract an AddrScript (20-byte hash + type) from a full locking script. + pub fn from_script(script: &[u8]) -> Option { + parse_standard_script(script).map(|(hash, stype)| AddrScript::new(hash, stype as u8)) + } + + /// Rebuild the canonical P2PKH or P2SH scriptPubKey bytes for this AddrScript. + pub fn to_script_pubkey(&self) -> Option> { + let stype = ScriptType::try_from(self.script_type).ok()?; + build_standard_script(self.hash, stype) + } +} + +impl fmt::Display for AddrScript { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.encode_hex::()) + } +} + +impl ToHex for &AddrScript { + fn encode_hex>(&self) -> T { + self.to_raw_bytes().encode_hex() + } + fn encode_hex_upper>(&self) -> T { + self.to_raw_bytes().encode_hex_upper() + } +} +impl ToHex for AddrScript { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} + +impl FromHex for AddrScript { + type Error = <[u8; 21] as FromHex>::Error; + + fn from_hex>(hex: T) -> Result { + let raw: [u8; 21] = <[u8; 21]>::from_hex(hex)?; + Ok(AddrScript::from_raw_bytes(&raw)) + } +} + +impl From<[u8; 21]> for AddrScript { + fn from(raw: [u8; 21]) -> Self { + AddrScript::from_raw_bytes(&raw) + } +} + +impl From for [u8; 21] { + fn from(a: AddrScript) -> Self { + a.to_raw_bytes() + } +} + +impl ZainoVersionedSerde for AddrScript { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<20, _>(&mut *w, &self.hash)?; + w.write_all(&[self.script_type]) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let hash = read_fixed_le::<20, _>(&mut *r)?; + let mut buf = [0u8; 1]; + r.read_exact(&mut buf)?; + Ok(AddrScript { + hash, + script_type: buf[0], + }) + } +} + +/// AddrScript = 21 bytes of body data. +impl FixedEncodedLen for AddrScript { + /// 20 bytes, LE + 1 byte script type + const ENCODED_LEN: usize = 21; +} + +/// Reference to a spent transparent UTXO. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct Outpoint { + /// Transaction ID of the UTXO being spent. + prev_txid: [u8; 32], + /// Index of that output in the previous transaction. + prev_index: u32, +} + +impl Outpoint { + /// Construct a new outpoint. + pub fn new(prev_txid: [u8; 32], prev_index: u32) -> Self { + Self { + prev_txid, + prev_index, + } + } + + /// Build from a *display-order* txid. + pub fn new_from_be(txid_be: &[u8; 32], index: u32) -> Self { + let le = TransactionHash::from_bytes_in_display_order(txid_be).0; + Self::new(le, index) + } + + /// Returns the txid of the transaction being spent. + pub fn prev_txid(&self) -> &[u8; 32] { + &self.prev_txid + } + + /// Returns the outpoint index withing the transaction. + pub fn prev_index(&self) -> u32 { + self.prev_index + } +} + +impl ZainoVersionedSerde for Outpoint { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.prev_txid)?; + write_u32_le(&mut w, self.prev_index) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let txid = read_fixed_le::<32, _>(&mut r)?; + let index = read_u32_le(&mut r)?; + Ok(Outpoint::new(txid, index)) + } +} + +/// Outpoint = 32‐byte txid + 4-byte LE u32 index = 36 bytes +impl FixedEncodedLen for Outpoint { + /// 32 byte txid + 4 byte tx index. + const ENCODED_LEN: usize = 32 + 4; +} + +// *** Block Level Objects *** + +/// Metadata about the block used to identify and navigate the blockchain. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct BlockIndex { + /// The hash identifying this block uniquely. + pub hash: BlockHash, + /// The hash of this block's parent block (previous block in chain). + pub parent_hash: BlockHash, + /// The cumulative proof-of-work of the blockchain up to this block, used for chain selection. + pub chainwork: ChainWork, + /// The height of this block. + pub height: Height, +} + +impl BlockIndex { + /// Constructs a new `BlockIndex`. + pub fn new( + hash: BlockHash, + parent_hash: BlockHash, + chainwork: ChainWork, + height: Height, + ) -> Self { + Self { + hash, + parent_hash, + chainwork, + height, + } + } + + /// Returns the hash of this block. + pub fn hash(&self) -> &BlockHash { + &self.hash + } + + /// Returns the hash of the parent block. + pub fn parent_hash(&self) -> &BlockHash { + &self.parent_hash + } + + /// Returns the cumulative chainwork up to this block. + pub fn chainwork(&self) -> &ChainWork { + &self.chainwork + } + + /// Returns the height of this block if it’s part of the best chain. + pub fn height(&self) -> Height { + self.height + } +} + +impl ZainoVersionedSerde for BlockIndex { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + + self.hash.serialize(&mut w)?; + self.parent_hash.serialize(&mut w)?; + self.chainwork.serialize(&mut w)?; + + write_option(&mut w, &Some(self.height), |w, h| h.serialize(w)) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let hash = BlockHash::deserialize(&mut r)?; + let parent_hash = BlockHash::deserialize(&mut r)?; + let chainwork = ChainWork::deserialize(&mut r)?; + let height = read_option(&mut r, |r| Height::deserialize(r))?; + + Ok(BlockIndex::new( + hash, + parent_hash, + chainwork, + height.expect("blocks always have height"), + )) + } +} + +/// Cumulative proof-of-work of the chain, +/// stored as a **big-endian** 256-bit unsigned integer. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct ChainWork([u8; 32]); + +impl ChainWork { + ///Returns ChainWork as a U256. + pub fn to_u256(&self) -> U256 { + U256::from_big_endian(&self.0) + } + + /// Builds a ChainWork from a U256. + pub fn from_u256(value: U256) -> Self { + let buf: [u8; 32] = value.to_big_endian(); + ChainWork(buf) + } + + /// Adds 2 ChainWorks. + pub fn add(&self, other: &Self) -> Self { + Self::from_u256(self.to_u256() + other.to_u256()) + } + + /// Subtract one ChainWork from another. + pub fn sub(&self, other: &Self) -> Self { + Self::from_u256(self.to_u256() - other.to_u256()) + } + + /// Returns ChainWork bytes. + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } +} + +impl From for ChainWork { + fn from(value: U256) -> Self { + Self::from_u256(value) + } +} + +impl From for U256 { + fn from(value: ChainWork) -> Self { + value.to_u256() + } +} + +impl fmt::Display for ChainWork { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.to_u256().fmt(f) + } +} + +impl ZainoVersionedSerde for ChainWork { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<32, _>(w, &self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let bytes = read_fixed_le::<32, _>(r)?; + Ok(ChainWork(bytes)) + } +} + +/// 32 byte body. +impl FixedEncodedLen for ChainWork { + /// 32 bytes, LE + const ENCODED_LEN: usize = 32; +} + +/// Essential block header fields required for chain validation and serving block header data. +/// +/// NOTE: Optional fields may be added for: +/// - hashLightClientRoot (FlyClient proofs) +/// - hashAuthDataRoot (ZIP-244 witness commitments) +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct BlockData { + /// Version number of the block format (protocol upgrades). + pub version: u32, + /// Unix timestamp of when the block was mined (seconds since epoch). + pub time: i64, + /// Merkle root hash of all transaction IDs in the block (used for quick tx inclusion proofs). + pub merkle_root: [u8; 32], + /// Digest representing the block-commitments Merkle root (commitment to note states). + /// - < V4: `hashFinalSaplingRoot` - Sapling note commitment tree root. + /// - => V4: `hashBlockCommitments` - digest over hashLightClientRoot and hashAuthDataRoot.`` + pub block_commitments: [u8; 32], + /// Compact difficulty target used for proof-of-work and difficulty calculation. + pub bits: u32, + /// Equihash nonse. + pub nonce: [u8; 32], + /// Equihash solution + pub solution: EquihashSolution, +} + +impl BlockData { + /// Creates a new BlockData instance. + #[allow(clippy::too_many_arguments)] + pub fn new( + version: u32, + time: i64, + merkle_root: [u8; 32], + block_commitments: [u8; 32], + bits: u32, + nonse: [u8; 32], + solution: EquihashSolution, + ) -> Self { + Self { + version, + time, + merkle_root, + block_commitments, + bits, + nonce: nonse, + solution, + } + } + + /// Convert zebra block commitment to 32-byte array + pub fn commitment_to_bytes(commitment: zebra_chain::block::Commitment) -> [u8; 32] { + match commitment { + zebra_chain::block::Commitment::PreSaplingReserved(bytes) => bytes, + zebra_chain::block::Commitment::FinalSaplingRoot(root) => root.into(), + zebra_chain::block::Commitment::ChainHistoryActivationReserved => [0; 32], + zebra_chain::block::Commitment::ChainHistoryRoot(chain_history_mmr_root_hash) => { + chain_history_mmr_root_hash.bytes_in_serialized_order() + } + zebra_chain::block::Commitment::ChainHistoryBlockTxAuthCommitment( + chain_history_block_tx_auth_commitment_hash, + ) => chain_history_block_tx_auth_commitment_hash.bytes_in_serialized_order(), + } + } + + /// Returns block Version. + pub fn version(&self) -> u32 { + self.version + } + + /// Returns block time. + pub fn time(&self) -> i64 { + self.time + } + + /// Returns block merkle root. + pub fn merkle_root(&self) -> &[u8; 32] { + &self.merkle_root + } + + /// Returns block finalSaplingRoot or authDataRoot depending on version. + pub fn block_commitments(&self) -> &[u8; 32] { + &self.block_commitments + } + + /// Returns nbits. + pub fn bits(&self) -> u32 { + self.bits + } + + /// Converts compact bits field into the full target as a 256-bit integer. + pub fn target(&self) -> U256 { + Self::compact_to_target_u256(self.bits) + } + + /// Returns the block work as 2^256 / (target + 1) + pub fn work(&self) -> U256 { + let target = self.target(); + if target.is_zero() { + U256::zero() + } else { + (U256::one() << 256) / (target + 1) + } + } + + /// Returns difficulty as ratio of the genesis target to this block's target. + pub fn difficulty(&self) -> f64 { + let max_target = Self::compact_to_target_u256(0x1d00ffff); // Zcash genesis + let target = self.target(); + Self::u256_to_f64(max_target) / Self::u256_to_f64(target) + } + + /// Used to convert bits to target. + fn compact_to_target_u256(bits: u32) -> U256 { + let exponent = (bits >> 24) as usize; + let mantissa = bits & 0x007fffff; + + if exponent <= 3 { + U256::from(mantissa) >> (8 * (3 - exponent)) + } else { + U256::from(mantissa) << (8 * (exponent - 3)) + } + } + + /// Converts a `U256` to `f64` lossily (sufficient for difficulty comparison). + fn u256_to_f64(value: U256) -> f64 { + let mut result = 0.0f64; + for (i, word) in value.0.iter().enumerate() { + result += (*word as f64) * 2f64.powi(64 * i as i32); + } + result + } + + /// Returns Equihash Nonse. + pub fn nonse(&self) -> [u8; 32] { + self.nonce + } + + /// Returns Equihash Nonse. + pub fn solution(&self) -> EquihashSolution { + self.solution + } +} + +impl ZainoVersionedSerde for BlockData { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; // re-borrow + + write_u32_le(&mut w, self.version)?; + write_i64_le(&mut w, self.time)?; + + write_fixed_le::<32, _>(&mut w, &self.merkle_root)?; + write_fixed_le::<32, _>(&mut w, &self.block_commitments)?; + + write_u32_le(&mut w, self.bits)?; + write_fixed_le::<32, _>(&mut w, &self.nonce)?; + + self.solution.serialize(&mut w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + + let version = read_u32_le(&mut r)?; + let time = read_i64_le(&mut r)?; + + let merkle_root = read_fixed_le::<32, _>(&mut r)?; + let block_commitments = read_fixed_le::<32, _>(&mut r)?; + + let bits = read_u32_le(&mut r)?; + let nonse = read_fixed_le::<32, _>(&mut r)?; + + let solution = EquihashSolution::deserialize(&mut r)?; + + Ok(BlockData::new( + version, + time, + merkle_root, + block_commitments, + bits, + nonse, + solution, + )) + } +} + +/// Equihash solution as it appears in a block header. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +// NOTE: if memory usage becomes too large we could move this data to the heap. +#[allow(clippy::large_enum_variant)] +pub enum EquihashSolution { + /// 200-9 solution (mainnet / testnet). + #[cfg_attr(test, serde(with = "serde_arrays"))] + Standard([u8; 1344]), + /// 48-5 solution (regtest). + #[cfg_attr(test, serde(with = "serde_arrays"))] + Regtest([u8; 36]), +} + +impl From for EquihashSolution { + fn from(value: zebra_chain::work::equihash::Solution) -> Self { + match value { + zebra_chain::work::equihash::Solution::Common(array) => Self::Standard(array), + zebra_chain::work::equihash::Solution::Regtest(array) => Self::Regtest(array), + } + } +} + +impl EquihashSolution { + /// Return a slice view (convenience). + pub fn as_bytes(&self) -> &[u8] { + match self { + Self::Standard(b) => b, + Self::Regtest(b) => b, + } + } +} + +impl TryFrom> for EquihashSolution { + type Error = &'static str; + + fn try_from(bytes: Vec) -> Result { + Self::try_from(bytes.as_slice()) + } +} + +impl<'a> TryFrom<&'a [u8]> for EquihashSolution { + type Error = &'static str; + + fn try_from(bytes: &'a [u8]) -> Result { + match bytes.len() { + 1344 => { + let mut arr = [0u8; 1344]; + arr.copy_from_slice(bytes); + Ok(EquihashSolution::Standard(arr)) + } + 36 => { + let mut arr = [0u8; 36]; + arr.copy_from_slice(bytes); + Ok(EquihashSolution::Regtest(arr)) + } + _ => Err("invalid Equihash solution length (expected 36 or 1344 bytes)"), + } + } +} + +impl ZainoVersionedSerde for EquihashSolution { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + + match self { + Self::Standard(bytes) => { + w.write_all(&[0])?; + write_fixed_le::<1344, _>(&mut w, bytes) + } + Self::Regtest(bytes) => { + w.write_all(&[1])?; + write_fixed_le::<36, _>(&mut w, bytes) + } + } + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + + let mut tag = [0u8; 1]; + r.read_exact(&mut tag)?; + match tag[0] { + 0 => { + let bytes = read_fixed_le::<1344, _>(&mut r)?; + Ok(EquihashSolution::Standard(bytes)) + } + 1 => { + let bytes = read_fixed_le::<36, _>(&mut r)?; + Ok(EquihashSolution::Regtest(bytes)) + } + other => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("unknown Equihash variant tag {other}"), + )), + } + } +} + +/// Represents the indexing data of a single compact Zcash block used internally by Zaino. +/// Provides efficient indexing for blockchain state queries and updates. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct IndexedBlock { + /// Metadata and indexing information for this block. + pub index: BlockIndex, + /// Essential header and metadata information for the block. + pub data: BlockData, + /// Compact representations of transactions in this block. + pub transactions: Vec, + /// Sapling and orchard commitment tree data for the chain + /// *after this block has been applied. + pub commitment_tree_data: CommitmentTreeData, +} + +impl IndexedBlock { + /// Creates a new `IndexedBlock`. + pub fn new( + index: BlockIndex, + data: BlockData, + tx: Vec, + commitment_tree_data: CommitmentTreeData, + ) -> Self { + Self { + index, + data, + transactions: tx, + commitment_tree_data, + } + } + + /// Returns a reference to the block index metadata. + pub fn index(&self) -> &BlockIndex { + &self.index + } + + /// Returns a reference to the header and auxiliary block data. + pub fn data(&self) -> &BlockData { + &self.data + } + + /// Returns a reference to the compact transactions in this block. + pub fn transactions(&self) -> &[CompactTxData] { + &self.transactions + } + + /// Returns the commitment tree data for this block. + pub fn commitment_tree_data(&self) -> &CommitmentTreeData { + &self.commitment_tree_data + } + + /// Returns the block hash. + pub fn hash(&self) -> &BlockHash { + self.index.hash() + } + + /// Returns the block height if available. + pub fn height(&self) -> Height { + self.index.height() + } + + /// Returns the cumulative chainwork. + pub fn chainwork(&self) -> &ChainWork { + self.index.chainwork() + } + + /// Returns the raw work value (targeted work contribution). + pub fn work(&self) -> U256 { + self.data.work() + } + + /// Converts this `IndexedBlock` into a CompactBlock protobuf message using proto v4 format. + /// + /// NOTE: This method currently includes transparent tx data in the compact block produced, + /// `zaino-state::local_cache::compact_block_with_pool_types` should be used to selectively + /// remove tx data by pool type. Alternatively this method could be updated to take a + /// `zaino-proto::proto::utils::PoolTypeFilter` could be added as an input to this method, + /// with tx data being added selectively here. + pub fn to_compact_block(&self) -> zaino_proto::proto::compact_formats::CompactBlock { + // NOTE: Returns u64::MAX if the block is not in the best chain. + let height: u64 = self.height().0.into(); + + let hash = self.hash().0.to_vec(); + let prev_hash = self.index().parent_hash().0.to_vec(); + + let vtx: Vec = self + .transactions() + .iter() + .map(|tx| tx.to_compact_tx(None)) + .collect(); + + let sapling_commitment_tree_size = self.commitment_tree_data().sizes().sapling(); + let orchard_commitment_tree_size = self.commitment_tree_data().sizes().orchard(); + + zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height, + hash, + prev_hash, + time: self.data().time() as u32, + header: vec![], + vtx, + chain_metadata: Some(zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size, + orchard_commitment_tree_size, + }), + } + } +} + +impl ZainoVersionedSerde for IndexedBlock { + const VERSION: u8 = version::V1; + + fn encode_body(&self, mut w: &mut W) -> io::Result<()> { + self.index.serialize(&mut w)?; + self.data.serialize(&mut w)?; + write_vec(&mut w, &self.transactions, |w, tx| tx.serialize(w))?; + self.commitment_tree_data.serialize(&mut w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let index = BlockIndex::deserialize(&mut r)?; + let data = BlockData::deserialize(&mut r)?; + let tx = read_vec(&mut r, |r| CompactTxData::deserialize(r))?; + let ctd = CommitmentTreeData::deserialize(&mut r)?; + + Ok(IndexedBlock::new(index, data, tx, ctd)) + } +} +/// TryFrom inputs: +/// - FullBlock: +/// - Holds block data. +/// - parent_block_chain_work: +/// - Used to calculate cumulative chain work. +/// - Final sapling root: +/// - Must be fetched from separate RPC. +/// - Final orchard root: +/// - Must be fetched from separate RPC. +/// - parent_block_sapling_tree_size: +/// - Used to calculate sapling tree size. +/// - parent_block_orchard_tree_size: +/// - Used to calculate sapling tree size. +impl + TryFrom<( + zaino_fetch::chain::block::FullBlock, + ChainWork, + [u8; 32], + [u8; 32], + u32, + u32, + )> for IndexedBlock +{ + type Error = String; + + fn try_from( + ( + full_block, + parent_chainwork, + final_sapling_root, + final_orchard_root, + parent_sapling_size, + parent_orchard_size, + ): ( + zaino_fetch::chain::block::FullBlock, + ChainWork, + [u8; 32], + [u8; 32], + u32, + u32, + ), + ) -> Result { + // --- Block Header Info --- + let header = full_block.header(); + let height = Height::try_from(full_block.height() as u32) + .map_err(|e| format!("Invalid block height: {e}"))?; + + let hash: [u8; 32] = header + .cached_hash() + .try_into() + .map_err(|_| "Block hash must be 32 bytes")?; + let parent_hash: [u8; 32] = header + .hash_prev_block() + .try_into() + .map_err(|_| "Parent block hash must be 32 bytes")?; + + let merkle_root: [u8; 32] = header + .hash_merkle_root() + .try_into() + .map_err(|v: Vec| format!("merkle root must be 32 bytes, got {}", v.len()))?; + + let block_commitments: [u8; 32] = header + .final_sapling_root() + .try_into() + .map_err(|v: Vec| format!("block commitment must be 32 bytes, got {}", v.len()))?; + + let n_bits_bytes = header.n_bits_bytes(); + if n_bits_bytes.len() != 4 { + return Err("nBits must be 4 bytes".to_string()); + } + let bits = u32::from_le_bytes(n_bits_bytes.try_into().unwrap()); + + let nonse: [u8; 32] = header + .nonce() + .try_into() + .map_err(|v: Vec| format!("nonse must be 32 bytes, got {}", v.len()))?; + + let solution = EquihashSolution::try_from(header.solution()).map_err(|_| { + format!( + "solution must be 32 or 1344 bytes, got {}", + header.solution().len() + ) + })?; + + // --- Convert transactions --- + let mut sapling_note_count = 0; + let mut orchard_note_count = 0; + + let full_transactions = full_block.transactions(); + let mut tx = Vec::with_capacity(full_transactions.len()); + + for (i, ftx) in full_transactions.into_iter().enumerate() { + let txdata = CompactTxData::try_from((i as u64, ftx)) + .map_err(|e| format!("TxData conversion failed at index {i}: {e}"))?; + + sapling_note_count += txdata.sapling().outputs().len(); + orchard_note_count += txdata.orchard().actions().len(); + + tx.push(txdata); + } + + // --- Compute commitment trees --- + let sapling_root = final_sapling_root; + let orchard_root = final_orchard_root; + + let commitment_tree_data = CommitmentTreeData::new( + CommitmentTreeRoots::new(sapling_root, orchard_root), + CommitmentTreeSizes::new( + parent_sapling_size + sapling_note_count as u32, + parent_orchard_size + orchard_note_count as u32, + ), + ); + + // --- Compute chainwork --- + let block_data = BlockData::new( + header.version() as u32, + header.time() as i64, + merkle_root, + block_commitments, + bits, + nonse, + solution, + ); + + let chainwork = parent_chainwork.add(&ChainWork::from(block_data.work())); + + // --- Final index and block data --- + let index = BlockIndex::new( + BlockHash::from(hash), + BlockHash::from(parent_hash), + chainwork, + height, + ); + + Ok(IndexedBlock::new( + index, + block_data, + tx, + commitment_tree_data, + )) + } +} + +/// Tree root data from blockchain source +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CompactTxData { + /// The index (position) of this transaction within its block (0-based). + index: u64, + /// Unique identifier (hash) of the transaction, used for lookup and indexing. + txid: TransactionHash, + /// Compact representation of transparent inputs/outputs in the transaction. + transparent: TransparentCompactTx, + /// Compact representation of Sapling shielded data. + sapling: SaplingCompactTx, + /// Compact representation of Orchard actions (shielded pool transactions). + orchard: OrchardCompactTx, +} + +impl CompactTxData { + /// Creates a new TxData instance. + pub fn new( + index: u64, + txid: TransactionHash, + transparent: TransparentCompactTx, + sapling: SaplingCompactTx, + orchard: OrchardCompactTx, + ) -> Self { + Self { + index, + txid, + transparent, + sapling, + orchard, + } + } + + /// Returns transactions index within block. + pub fn index(&self) -> u64 { + self.index + } + + /// Returns transaction ID. + pub fn txid(&self) -> &TransactionHash { + &self.txid + } + + /// Returns sapling and orchard value balances. + pub fn balances(&self) -> (Option, Option) { + (self.sapling.value, self.orchard.value) + } + + /// Returns compact transparent tx data. + pub fn transparent(&self) -> &TransparentCompactTx { + &self.transparent + } + + /// Returns compact sapling tx data. + pub fn sapling(&self) -> &SaplingCompactTx { + &self.sapling + } + + /// Returns compact orchard tx data. + pub fn orchard(&self) -> &OrchardCompactTx { + &self.orchard + } + + /// Converts this `TxData` into a `CompactTx` protobuf message with an optional fee. + pub fn to_compact_tx( + &self, + fee: Option, + ) -> zaino_proto::proto::compact_formats::CompactTx { + let fee = fee.unwrap_or(0); + + let spends = self + .sapling() + .spends() + .iter() + .map( + |s| zaino_proto::proto::compact_formats::CompactSaplingSpend { + nf: s.nullifier().to_vec(), + }, + ) + .collect(); + + let outputs = self + .sapling() + .outputs() + .iter() + .map( + |o| zaino_proto::proto::compact_formats::CompactSaplingOutput { + cmu: o.cmu().to_vec(), + ephemeral_key: o.ephemeral_key().to_vec(), + ciphertext: o.ciphertext().to_vec(), + }, + ) + .collect(); + + let actions = self + .orchard() + .actions() + .iter() + .map( + |a| zaino_proto::proto::compact_formats::CompactOrchardAction { + nullifier: a.nullifier().to_vec(), + cmx: a.cmx().to_vec(), + ephemeral_key: a.ephemeral_key().to_vec(), + ciphertext: a.ciphertext().to_vec(), + }, + ) + .collect(); + + let vout = self.transparent().compact_vout(); + + let vin = self.transparent().compact_vin(); + + zaino_proto::proto::compact_formats::CompactTx { + index: self.index(), + txid: self.txid().0.to_vec(), + fee, + spends, + outputs, + actions, + vin, + vout, + } + } +} + +/// TryFrom inputs: +/// - Transaction Index +/// - Full Transaction +impl TryFrom<(u64, zaino_fetch::chain::transaction::FullTransaction)> for CompactTxData { + type Error = String; + + fn try_from( + (index, tx): (u64, zaino_fetch::chain::transaction::FullTransaction), + ) -> Result { + let txid_vec = tx.tx_id(); + // NOTE: Is this byte order correct? + let txid: [u8; 32] = txid_vec + .try_into() + .map_err(|_| "txid must be 32 bytes".to_string())?; + + let (sapling_balance, orchard_balance) = tx.value_balances(); + + let vin: Vec = tx + .transparent_inputs() + .into_iter() + .map(|(prev_txid, prev_index, _)| { + let prev_txid_arr: [u8; 32] = prev_txid + .try_into() + .map_err(|_| "prev_txid must be 32 bytes".to_string())?; + Ok::<_, String>(TxInCompact::new(prev_txid_arr, prev_index)) + }) + .collect::>()?; + + //TODO: We should error handle on these, a failure here should probably be + // reacted to + let vout: Vec = tx + .transparent_outputs() + .into_iter() + .filter_map(|(value, script)| { + if let Some((hash20, stype)) = parse_standard_script(&script) { + TxOutCompact::new(value, hash20, stype as u8) + } else { + let mut fallback = [0u8; 20]; + let copy_len = script.len().min(20); + fallback[..copy_len].copy_from_slice(&script[..copy_len]); + TxOutCompact::new(value, fallback, ScriptType::NonStandard as u8) + } + }) + .collect(); + + let transparent = TransparentCompactTx::new(vin, vout); + + let spends: Vec = tx + .shielded_spends() + .into_iter() + .map(|nf| { + let arr: [u8; 32] = nf + .try_into() + .map_err(|_| "sapling nullifier must be 32 bytes".to_string())?; + Ok::<_, String>(CompactSaplingSpend::new(arr)) + }) + .collect::>()?; + + let outputs: Vec = tx + .shielded_outputs() + .into_iter() + .map(|(cmu, epk, ct)| { + let cmu: [u8; 32] = cmu + .try_into() + .map_err(|_| "cmu must be 32 bytes".to_string())?; + let epk: [u8; 32] = epk + .try_into() + .map_err(|_| "ephemeral_key must be 32 bytes".to_string())?; + let ct: [u8; 52] = ct + .get(..52) + .ok_or("ciphertext must be at least 52 bytes")? + .try_into() + .map_err(|_| "ciphertext must be 52 bytes".to_string())?; + Ok::<_, String>(CompactSaplingOutput::new(cmu, epk, ct)) + }) + .collect::>()?; + + let sapling = SaplingCompactTx::new(sapling_balance, spends, outputs); + + let actions: Vec = tx + .orchard_actions() + .into_iter() + .map(|(nf, cmx, epk, ct)| { + let nf: [u8; 32] = nf + .try_into() + .map_err(|_| "orchard nullifier must be 32 bytes".to_string())?; + let cmx: [u8; 32] = cmx + .try_into() + .map_err(|_| "orchard cmx must be 32 bytes".to_string())?; + let epk: [u8; 32] = epk + .try_into() + .map_err(|_| "orchard ephemeral_key must be 32 bytes".to_string())?; + let ct: [u8; 52] = ct + .get(..52) + .ok_or("orchard ciphertext must be at least 52 bytes")? + .try_into() + .map_err(|_| "orchard ciphertext must be 52 bytes".to_string())?; + Ok::<_, String>(CompactOrchardAction::new(nf, cmx, epk, ct)) + }) + .collect::>()?; + + let orchard = OrchardCompactTx::new(orchard_balance, actions); + + Ok(CompactTxData::new( + index, + // NOTE: do we need to use from_bytes_in_display_order here? + txid.into(), + transparent, + sapling, + orchard, + )) + } +} + +impl ZainoVersionedSerde for CompactTxData { + const VERSION: u8 = version::V1; + + fn encode_body(&self, mut w: &mut W) -> io::Result<()> { + write_u64_le(&mut w, self.index)?; + + self.txid.serialize(&mut w)?; + self.transparent.serialize(&mut w)?; + self.sapling.serialize(&mut w)?; + self.orchard.serialize(&mut w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let index = read_u64_le(&mut r)?; + + let txid = TransactionHash::deserialize(&mut r)?; + let transparent = TransparentCompactTx::deserialize(&mut r)?; + let sapling = SaplingCompactTx::deserialize(&mut r)?; + let orchard = OrchardCompactTx::deserialize(&mut r)?; + + Ok(CompactTxData::new( + index, + txid, + transparent, + sapling, + orchard, + )) + } +} + +/// Compact transaction inputs and outputs for transparent (unshielded) transactions. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TransparentCompactTx { + /// Transaction inputs (spent outputs from previous transactions). + vin: Vec, + /// Transaction outputs (newly created UTXOs). + vout: Vec, +} + +impl ZainoVersionedSerde for TransparentCompactTx { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + + write_vec(&mut w, &self.vin, |w, txin| txin.serialize(w))?; + write_vec(&mut w, &self.vout, |w, txout| txout.serialize(w)) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + + let vin = read_vec(&mut r, |r| TxInCompact::deserialize(r))?; + let vout = read_vec(&mut r, |r| TxOutCompact::deserialize(r))?; + + Ok(TransparentCompactTx::new(vin, vout)) + } +} + +impl TransparentCompactTx { + /// Creates a new TransparentCompactTx instance. + pub fn new(vin: Vec, vout: Vec) -> Self { + Self { vin, vout } + } + + /// Returns transparent inputs. + pub fn inputs(&self) -> &[TxInCompact] { + &self.vin + } + + /// Returns transparent outputs. + pub fn outputs(&self) -> &[TxOutCompact] { + &self.vout + } + + /// Returns Proto CompactTxIn values, omitting the null prevout used by coinbase. + pub fn compact_vin(&self) -> Vec { + self.inputs() + .iter() + .filter(|txin| !txin.is_null_prevout()) + .map(|txin| txin.to_compact()) + .collect() + } + + /// Returns Proto TxOut values. + pub fn compact_vout(&self) -> Vec { + self.outputs() + .iter() + .map(|txout| txout.to_compact()) + .collect() + } +} + +/// A compact reference to a previously created transparent UTXO being spent. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TxInCompact { + /// Transaction ID of the output being spent. + prevout_txid: [u8; 32], + /// Index (position) of the output in the previous transaction being spent. + prevout_index: u32, +} + +impl TxInCompact { + /// Creates a new TxInCompact instance. + pub fn new(prevout_txid: [u8; 32], prevout_index: u32) -> Self { + Self { + prevout_txid, + prevout_index, + } + } + + /// Constructs a canonical "null prevout" (coinbase marker). + pub fn null_prevout() -> Self { + Self { + prevout_txid: [0u8; 32], + prevout_index: u32::MAX, + } + } + + /// Returns txid of the transaction that holds the output being sent. + pub fn prevout_txid(&self) -> &[u8; 32] { + &self.prevout_txid + } + + /// Returns the index of the output being sent within the transaction. + pub fn prevout_index(&self) -> u32 { + self.prevout_index + } + + /// `true` if this input is the special “null” out-point used by a + /// coinbase transaction (all-zero txid, index 0xffff_ffff). + pub fn is_null_prevout(&self) -> bool { + self.prevout_txid == [0u8; 32] && self.prevout_index == u32::MAX + } + + /// Creates a Proto CompactTxIn from this record. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::CompactTxIn { + zaino_proto::proto::compact_formats::CompactTxIn { + prevout_txid: self.prevout_txid.to_vec(), + prevout_index: self.prevout_index, + } + } +} + +impl ZainoVersionedSerde for TxInCompact { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.prevout_txid)?; + write_u32_le(&mut w, self.prevout_index) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let txid = read_fixed_le::<32, _>(&mut r)?; + let idx = read_u32_le(&mut r)?; + Ok(TxInCompact::new(txid, idx)) + } +} + +/// TxInCompact = 36 bytes +impl FixedEncodedLen for TxInCompact { + /// 32-byte txid + 4-byte LE index + const ENCODED_LEN: usize = 32 + 4; +} + +/// Identifies the type of transparent transaction output script. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub enum ScriptType { + /// Standard pay-to-public-key-hash (P2PKH) address (`t1...`). + P2PKH = 0x00, + /// Standard pay-to-script-hash (P2SH) address (`t3...`). + P2SH = 0x01, + /// Non-standard output script (rare). + NonStandard = 0xFF, +} + +impl TryFrom for ScriptType { + type Error = (); + + fn try_from(value: u8) -> Result { + match value { + 0x00 => Ok(ScriptType::P2PKH), + 0x01 => Ok(ScriptType::P2SH), + 0xFF => Ok(ScriptType::NonStandard), + _ => Err(()), + } + } +} + +impl ScriptType { + /// Returns ScriptType as a String. + pub fn as_str(&self) -> &'static str { + match self { + ScriptType::P2PKH => "P2PKH", + ScriptType::P2SH => "P2SH", + ScriptType::NonStandard => "NonStandard", + } + } +} + +impl ZainoVersionedSerde for ScriptType { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + w.write_all(&[*self as u8]) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut b = [0u8; 1]; + r.read_exact(&mut b)?; + ScriptType::try_from(b[0]) + .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "unknown ScriptType")) + } +} + +/// ScriptType = 1 byte +impl FixedEncodedLen for ScriptType { + /// 1 byte + const ENCODED_LEN: usize = 1; +} + +/// Try to recognise a standard P2PKH / P2SH locking script. +/// Returns (payload-hash, ScriptType) on success. +pub(crate) fn parse_standard_script(script: &[u8]) -> Option<([u8; 20], ScriptType)> { + // P2PKH 76 a9 14 <20-B hash> 88 ac + const P2PKH_PREFIX: &[u8] = &[0x76, 0xa9, 0x14]; + const P2PKH_SUFFIX: &[u8] = &[0x88, 0xac]; + + // P2SH a9 14 <20-B hash> 87 + const P2SH_PREFIX: &[u8] = &[0xa9, 0x14]; + const P2SH_SUFFIX: &[u8] = &[0x87]; + + if script.starts_with(P2PKH_PREFIX) && script.ends_with(P2PKH_SUFFIX) && script.len() == 25 { + let mut hash = [0u8; 20]; + hash.copy_from_slice(&script[3..23]); + return Some((hash, ScriptType::P2PKH)); + } + if script.starts_with(P2SH_PREFIX) && script.ends_with(P2SH_SUFFIX) && script.len() == 23 { + let mut hash = [0u8; 20]; + hash.copy_from_slice(&script[2..22]); + return Some((hash, ScriptType::P2SH)); + } + None +} + +/// Reconstruct the canonical P2PKH or P2SH scriptPubKey for a 20-byte payload. +/// Returns `None` if given `ScriptType::NonStandard` (or any other unknown type). +pub(crate) fn build_standard_script(hash: [u8; 20], stype: ScriptType) -> Option> { + const P2PKH_PREFIX: &[u8] = &[0x76, 0xa9, 0x14]; + const P2PKH_SUFFIX: &[u8] = &[0x88, 0xac]; + const P2PKH_LEN: usize = 25; + + const P2SH_PREFIX: &[u8] = &[0xa9, 0x14]; + const P2SH_SUFFIX: u8 = 0x87; + const P2SH_LEN: usize = 23; + + match stype { + ScriptType::P2PKH => { + let mut script = Vec::with_capacity(P2PKH_LEN); + script.extend_from_slice(P2PKH_PREFIX); + script.extend_from_slice(&hash); + script.extend_from_slice(P2PKH_SUFFIX); + debug_assert!(script.len() == P2PKH_LEN); + Some(script) + } + ScriptType::P2SH => { + let mut script = Vec::with_capacity(P2SH_LEN); + script.extend_from_slice(P2SH_PREFIX); + script.extend_from_slice(&hash); + script.push(P2SH_SUFFIX); + debug_assert!(script.len() == P2SH_LEN); + Some(script) + } + ScriptType::NonStandard => None, + } +} + +/// Compact representation of a transparent output, optimized for indexing and efficient querying. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TxOutCompact { + /// Amount of ZEC sent to this output (in zatoshis). + value: u64, + /// 20-byte hash representation of the script or address this output pays to. + script_hash: [u8; 20], + /// Type indicator for the output's script/address type, enabling efficient address reconstruction. + script_type: u8, +} + +impl TxOutCompact { + /// Creates a new TxOutCompact instance. + pub fn new(value: u64, script_hash: [u8; 20], script_type: u8) -> Option { + if ScriptType::try_from(script_type).is_ok() { + Some(Self { + value, + script_hash, + script_type, + }) + } else { + None + } + } + + /// Returns the valuse in zatoshi sent in this output. + pub fn value(&self) -> u64 { + self.value + } + + /// Returns script hash. + pub fn script_hash(&self) -> &[u8; 20] { + &self.script_hash + } + + /// Returns script type u8. + pub fn script_type(&self) -> u8 { + self.script_type + } + + /// Returns script type Enum. + pub fn script_type_enum(&self) -> Option { + ScriptType::try_from(self.script_type).ok() + } + + /// Creates a Proto TxOut from this record. + /// + /// Note: this reconstructs standard P2PKH / P2SH scripts. For NonStandard outputs, + /// this returns an empty script_pub_key. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::TxOut { + let script_pub_key = self + .script_type_enum() + .and_then(|script_type| build_standard_script(self.script_hash, script_type)) + .unwrap_or_default(); + + zaino_proto::proto::compact_formats::TxOut { + value: self.value, + script_pub_key, + } + } +} + +impl> TryFrom<(u64, T)> for TxOutCompact { + type Error = (); + + fn try_from((value, script): (u64, T)) -> Result { + let script_bytes = script.as_ref(); + + if let Some(addr) = AddrScript::from_script(script_bytes) { + TxOutCompact::new(value, *addr.hash(), addr.script_type()).ok_or(()) + } else if script_bytes.len() == 21 { + let script_type = script_bytes[0]; + let mut hash_bytes = [0u8; 20]; + hash_bytes.copy_from_slice(&script_bytes[1..]); + TxOutCompact::new(value, hash_bytes, script_type).ok_or(()) + } else { + // fallback for nonstandard scripts + let mut fallback = [0u8; 20]; + let usable_len = script_bytes.len().min(20); + fallback[..usable_len].copy_from_slice(&script_bytes[..usable_len]); + TxOutCompact::new(value, fallback, ScriptType::NonStandard as u8).ok_or(()) + } + } +} + +impl ZainoVersionedSerde for TxOutCompact { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_u64_le(&mut w, self.value)?; + write_fixed_le::<20, _>(&mut w, &self.script_hash)?; + w.write_all(&[self.script_type]) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let value = read_u64_le(&mut r)?; + let script_hash = read_fixed_le::<20, _>(&mut r)?; + + let mut b = [0u8; 1]; + r.read_exact(&mut b)?; + TxOutCompact::new(value, script_hash, b[0]) + .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "invalid script_type")) + } +} + +/// TxOutCompact = 29 bytes +impl FixedEncodedLen for TxOutCompact { + /// 8-byte LE value + 20-byte script hash + 1-byte type + const ENCODED_LEN: usize = 8 + 20 + 1; +} + +/// Compact representation of Sapling shielded transaction data for wallet scanning. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct SaplingCompactTx { + /// Net Sapling value balance (before fees); `None` if no Sapling component. + value: Option, + /// Shielded spends (notes being consumed). + spends: Vec, + /// Shielded outputs (new notes created). + outputs: Vec, +} + +impl SaplingCompactTx { + /// Creates a new SaplingCompactTx instance. + pub fn new( + value: Option, + spends: Vec, + outputs: Vec, + ) -> Self { + Self { + value, + spends, + outputs, + } + } + + /// Returns the net sapling value balance (before fees); `None` if no sapling component. + pub fn value(&self) -> Option { + self.value + } + + /// Returns sapling spends. + pub fn spends(&self) -> &[CompactSaplingSpend] { + &self.spends + } + + /// Returns sapling outputs + pub fn outputs(&self) -> &[CompactSaplingOutput] { + &self.outputs + } +} + +impl ZainoVersionedSerde for SaplingCompactTx { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + + write_option(&mut w, &self.value, |w, v| write_i64_le(w, *v))?; + write_vec(&mut w, &self.spends, |w, s| s.serialize(w))?; + write_vec(&mut w, &self.outputs, |w, o| o.serialize(w)) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + + let value = read_option(&mut r, |r| read_i64_le(r))?; + let spends = read_vec(&mut r, |r| CompactSaplingSpend::deserialize(r))?; + let outputs = read_vec(&mut r, |r| CompactSaplingOutput::deserialize(r))?; + + Ok(SaplingCompactTx::new(value, spends, outputs)) + } +} + +/// Compact representation of a Sapling shielded spend (consuming a previous shielded note). +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CompactSaplingSpend { + /// Nullifier of the Sapling note being spent, prevents double spends. + nf: [u8; 32], +} + +impl CompactSaplingSpend { + /// Creates a new CompactSaplingSpend instance. + pub fn new(nf: [u8; 32]) -> Self { + Self { nf } + } + + /// Returns sapling nullifier. + pub fn nullifier(&self) -> &[u8; 32] { + &self.nf + } + + /// Creates a Proto CompactSaplingSpend from this record. + pub fn into_compact(&self) -> zaino_proto::proto::compact_formats::CompactSaplingSpend { + zaino_proto::proto::compact_formats::CompactSaplingSpend { + nf: self.nf.to_vec(), + } + } +} + +impl ZainoVersionedSerde for CompactSaplingSpend { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<32, _>(w, &self.nf) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + Ok(CompactSaplingSpend::new(read_fixed_le::<32, _>(r)?)) + } +} + +/// 32-byte nullifier +impl FixedEncodedLen for CompactSaplingSpend { + /// 32 bytes + const ENCODED_LEN: usize = 32; +} + +/// Compact representation of a newly created Sapling shielded note output. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CompactSaplingOutput { + /// Commitment of the newly created shielded note. + cmu: [u8; 32], + /// Ephemeral public key used by receivers to detect/decrypt the note. + ephemeral_key: [u8; 32], + /// Encrypted note ciphertext (minimal required portion). + #[cfg_attr(test, serde(with = "serde_arrays"))] + ciphertext: [u8; 52], +} + +impl CompactSaplingOutput { + /// Creates a new CompactSaplingOutput instance. + pub fn new(cmu: [u8; 32], ephemeral_key: [u8; 32], ciphertext: [u8; 52]) -> Self { + Self { + cmu, + ephemeral_key, + ciphertext, + } + } + + /// Returns cmu. + pub fn cmu(&self) -> &[u8; 32] { + &self.cmu + } + + /// Returns ephemeral key. + pub fn ephemeral_key(&self) -> &[u8; 32] { + &self.ephemeral_key + } + + /// Returns ciphertext. + pub fn ciphertext(&self) -> &[u8; 52] { + &self.ciphertext + } + + /// Creates a Proto CompactSaplingOutput from this record. + pub fn into_compact(&self) -> zaino_proto::proto::compact_formats::CompactSaplingOutput { + zaino_proto::proto::compact_formats::CompactSaplingOutput { + cmu: self.cmu.to_vec(), + ephemeral_key: self.ephemeral_key.to_vec(), + ciphertext: self.ciphertext.to_vec(), + } + } +} + +impl ZainoVersionedSerde for CompactSaplingOutput { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.cmu)?; + write_fixed_le::<32, _>(&mut w, &self.ephemeral_key)?; + write_fixed_le::<52, _>(&mut w, &self.ciphertext) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let cmu = read_fixed_le::<32, _>(&mut r)?; + let epk = read_fixed_le::<32, _>(&mut r)?; + let ciphertext = read_fixed_le::<52, _>(&mut r)?; + Ok(CompactSaplingOutput::new(cmu, epk, ciphertext)) + } +} + +/// 116 bytes +impl FixedEncodedLen for CompactSaplingOutput { + /// 32-byte cmu + 32-byte ephemeral_key + 52-byte ciphertext + const ENCODED_LEN: usize = 32 + 32 + 52; +} + +/// Compact summary of all shielded activity in a transaction. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct OrchardCompactTx { + /// Net Orchard value balance (before fees); `None` if no Orchard component. + value: Option, + /// Orchard actions (may be empty). + actions: Vec, +} + +impl OrchardCompactTx { + /// Creates a new CompactOrchardTx instance. + pub fn new(value: Option, actions: Vec) -> Self { + Self { value, actions } + } + + /// Returns the net orchard value balance (before fees); `None` if no Orchard component. + pub fn value(&self) -> Option { + self.value + } + + /// Returns the orchard actions in this transaction. + pub fn actions(&self) -> &[CompactOrchardAction] { + &self.actions + } +} + +impl ZainoVersionedSerde for OrchardCompactTx { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + + write_option(&mut w, &self.value, |w, v| write_i64_le(w, *v))?; + write_vec(&mut w, &self.actions, |w, a| a.serialize(w)) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + + let value = read_option(&mut r, |r| read_i64_le(r))?; + let actions = read_vec(&mut r, |r| CompactOrchardAction::deserialize(r))?; + + Ok(OrchardCompactTx::new(value, actions)) + } +} + +/// Compact representation of Orchard shielded action (note spend or output). +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct CompactOrchardAction { + /// Nullifier preventing double spends of the Orchard note. + nullifier: [u8; 32], + /// Commitment of the new Orchard note created. + cmx: [u8; 32], + /// Ephemeral public key for detecting and decrypting Orchard notes. + ephemeral_key: [u8; 32], + /// Encrypted ciphertext of the Orchard note (minimal required portion). + #[cfg_attr(test, serde(with = "serde_arrays"))] + ciphertext: [u8; 52], +} + +impl CompactOrchardAction { + /// Creates a new CompactOrchardAction instance. + pub fn new( + nullifier: [u8; 32], + cmx: [u8; 32], + ephemeral_key: [u8; 32], + ciphertext: [u8; 52], + ) -> Self { + Self { + nullifier, + cmx, + ephemeral_key, + ciphertext, + } + } + + /// Returns orchard nullifier. + pub fn nullifier(&self) -> &[u8; 32] { + &self.nullifier + } + + /// Returns cmx. + pub fn cmx(&self) -> &[u8; 32] { + &self.cmx + } + + /// Returns ephemeral key. + pub fn ephemeral_key(&self) -> &[u8; 32] { + &self.ephemeral_key + } + + /// Returns ciphertext. + pub fn ciphertext(&self) -> &[u8; 52] { + &self.ciphertext + } + + /// Creates a Proto CompactOrchardAction from this record. + pub fn into_compact(&self) -> zaino_proto::proto::compact_formats::CompactOrchardAction { + zaino_proto::proto::compact_formats::CompactOrchardAction { + nullifier: self.nullifier.to_vec(), + cmx: self.cmx.to_vec(), + ephemeral_key: self.ephemeral_key.to_vec(), + ciphertext: self.ciphertext.to_vec(), + } + } +} + +impl ZainoVersionedSerde for CompactOrchardAction { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.nullifier)?; + write_fixed_le::<32, _>(&mut w, &self.cmx)?; + write_fixed_le::<32, _>(&mut w, &self.ephemeral_key)?; + write_fixed_le::<52, _>(&mut w, &self.ciphertext) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let nf = read_fixed_le::<32, _>(&mut r)?; + let cmx = read_fixed_le::<32, _>(&mut r)?; + let epk = read_fixed_le::<32, _>(&mut r)?; + let ctxt = read_fixed_le::<52, _>(&mut r)?; + Ok(CompactOrchardAction::new(nf, cmx, epk, ctxt)) + } +} + +// CompactOrchardAction = 148 bytes +impl FixedEncodedLen for CompactOrchardAction { + /// 32-byte nullifier + 32-byte cmx + 32-byte ephemeral_key + 52-byte ciphertext + const ENCODED_LEN: usize = 32 + 32 + 32 + 52; +} + +/// Identifies a transaction's location by block height and transaction index. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TxLocation { + /// Block height in chain. + block_height: u32, + /// Transaction index in block. + tx_index: u16, +} + +impl TxLocation { + /// Creates a new TxLocation instance. + pub fn new(block_height: u32, tx_index: u16) -> Self { + Self { + block_height, + tx_index, + } + } + + /// Returns the block height held in the TxLocation. + pub fn block_height(&self) -> u32 { + self.block_height + } + + /// Returns the transaction index held in the TxLocation. + pub fn tx_index(&self) -> u16 { + self.tx_index + } +} + +impl ZainoVersionedSerde for TxLocation { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_u32_be(&mut *w, self.block_height)?; + write_u16_be(&mut *w, self.tx_index) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let block_height = read_u32_be(&mut *r)?; + let tx_index = read_u16_be(&mut *r)?; + Ok(TxLocation::new(block_height, tx_index)) + } +} + +/// 6 bytes, BE encoded. +impl FixedEncodedLen for TxLocation { + /// 4-byte big-endian block_index + 2-byte big-endian tx_index + const ENCODED_LEN: usize = 4 + 2; +} + +/// Single transparent-address activity record (input or output). +/// +/// Note when flag is set to IS_INPUT, out_index is actually the index of the input event. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct AddrHistRecord { + tx_location: TxLocation, + out_index: u16, + flags: u8, + value: u64, +} + +/* ----- flag helpers ----- */ +impl AddrHistRecord { + /// Flag mask for is_mined. + pub const FLAG_MINED: u8 = 0b00000001; + + /// Flag mask for is_spent. + pub const FLAG_SPENT: u8 = 0b00000010; + + /// Flag mask for is_input. + pub const FLAG_IS_INPUT: u8 = 0b00000100; + + /// Creatues a new AddrHistRecord instance. + pub fn new(tx_location: TxLocation, out_index: u16, value: u64, flags: u8) -> Self { + Self { + tx_location, + out_index, + flags, + value, + } + } + + /// Returns the TxLocation in this record. + pub fn tx_location(&self) -> TxLocation { + self.tx_location + } + + /// Returns the out index of this record. + pub fn out_index(&self) -> u16 { + self.out_index + } + + /// Returns the value of this record. + pub fn value(&self) -> u64 { + self.value + } + + /// Returns the flag byte of this record. + pub fn flags(&self) -> u8 { + self.flags + } + + /// Returns true if this record is from a mined block. + pub fn is_mined(&self) -> bool { + self.flags & Self::FLAG_MINED != 0 + } + + /// Returns true if this record is a spend. + pub fn is_spent(&self) -> bool { + self.flags & Self::FLAG_SPENT != 0 + } + + /// Returns true if this record is an input. + pub fn is_input(&self) -> bool { + self.flags & Self::FLAG_IS_INPUT != 0 + } +} + +impl ZainoVersionedSerde for AddrHistRecord { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + self.tx_location.serialize(&mut *w)?; + write_u16_be(&mut *w, self.out_index)?; + w.write_all(&[self.flags])?; + write_u64_le(&mut *w, self.value) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let tx_location = TxLocation::deserialize(&mut *r)?; + let out_index = read_u16_be(&mut *r)?; + let mut flag = [0u8; 1]; + r.read_exact(&mut flag)?; + let value = read_u64_le(&mut *r)?; + + Ok(AddrHistRecord::new(tx_location, out_index, value, flag[0])) + } +} + +/// 18 byte total +impl FixedEncodedLen for AddrHistRecord { + /// 1 byte: TxLocation tag + /// +6 bytes: TxLocation body (4 BE block_index + 2 BE tx_index) + /// +2 bytes: out_index (BE) + /// +8 bytes: value (LE) + /// +1 byte : flags + /// =18 bytes + const ENCODED_LEN: usize = (TxLocation::ENCODED_LEN + 1) + 2 + 8 + 1; +} + +/// AddrHistRecord database byte array. +/// +/// Layout (all big-endian except `value`): +/// ```text +/// [0..4] height +/// [4..6] tx_index +/// [6..8] vout +/// [8] flags +/// [9..17] value (little-endian, matches Zcashd) +/// ``` +/// +/// Note when flag is set to IS_INPUT, vout is actually the index of the input event. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[allow(dead_code)] +pub(crate) struct AddrEventBytes([u8; 17]); + +#[allow(dead_code)] +impl AddrEventBytes { + const LEN: usize = 17; + + /// Flag mask for is_mined. + pub const FLAG_MINED: u8 = 0x01; + + /// Flag mask for is_spent. + pub const FLAG_SPENT: u8 = 0x02; + + /// Flag mask for is_input. + pub const FLAG_IS_INPUT: u8 = 0x04; + + /// Create an [`AddrEventBytes`] from an [`AddrHistRecord`], + /// returning an I/O error if any write fails. + #[allow(dead_code)] + pub(crate) fn from_record(rec: &AddrHistRecord) -> io::Result { + let mut buf = [0u8; Self::LEN]; + let mut c = Cursor::new(&mut buf[..]); + + write_u32_be(&mut c, rec.tx_location.block_height)?; + write_u16_be(&mut c, rec.tx_location.tx_index)?; + write_u16_be(&mut c, rec.out_index)?; + c.write_all(&[rec.flags])?; + write_u64_le(&mut c, rec.value)?; + + Ok(AddrEventBytes(buf)) + } + + /// Create an [`AddrHistRecord`] from an [`AddrEventBytes`], + /// returning an I/O error if any read fails or data is invalid. + #[allow(dead_code)] + pub(crate) fn as_record(&self) -> io::Result { + let mut c = Cursor::new(&self.0[..]); + + let block_height = read_u32_be(&mut c)?; + let tx_index = read_u16_be(&mut c)?; + let out_index = read_u16_be(&mut c)?; + let mut flag = [0u8; 1]; + c.read_exact(&mut flag)?; + let value = read_u64_le(&mut c)?; + + Ok(AddrHistRecord::new( + TxLocation::new(block_height, tx_index), + out_index, + value, + flag[0], + )) + } +} + +impl ZainoVersionedSerde for AddrEventBytes { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_fixed_le::<17, _>(w, &self.0) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + Ok(AddrEventBytes(read_fixed_le::<17, _>(r)?)) + } +} + +/// 17 byte body: +/// +/// ```text +/// [0..4] block_height (BE u32) | Block height +/// [4..6] tx_index (BE u16) | Transaction index within block +/// [6..8] vout (BE u16) | Input/output index within transaction +/// [8] flags ( u8 ) | Bitflags (mined/spent/input masks) +/// [9..17] value (LE u64) | Amount in zatoshi, little-endian +/// ``` +impl FixedEncodedLen for AddrEventBytes { + const ENCODED_LEN: usize = 17; +} + +// *** Sharding *** + +/// Root commitment for a state shard. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct ShardRoot { + /// Shard commitment tree root (256-bit digest) + hash: [u8; 32], + /// Hash of the final block in this shard + final_block_hash: [u8; 32], + /// Height of the final block in this shard + final_block_height: u32, +} + +impl ShardRoot { + /// Creates a new ShardRoot instance. + pub fn new(hash: [u8; 32], final_block_hash: [u8; 32], final_block_height: u32) -> Self { + Self { + hash, + final_block_hash, + final_block_height, + } + } + + /// Returns commitment tree root. + pub fn hash(&self) -> &[u8; 32] { + &self.hash + } + + /// Returns the hash of the final block in this shard. + pub fn final_block_hash(&self) -> &[u8; 32] { + &self.final_block_hash + } + + /// Returns the Height of the final block in this shard. + pub fn final_block_height(&self) -> u32 { + self.final_block_height + } +} + +impl ZainoVersionedSerde for ShardRoot { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_fixed_le::<32, _>(&mut w, &self.hash)?; + write_fixed_le::<32, _>(&mut w, &self.final_block_hash)?; + write_u32_le(&mut w, self.final_block_height) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let hash = read_fixed_le::<32, _>(&mut r)?; + let final_block_hash = read_fixed_le::<32, _>(&mut r)?; + let final_block_height = read_u32_le(&mut r)?; + Ok(ShardRoot::new(hash, final_block_hash, final_block_height)) + } +} + +/// 68 byte body. +impl FixedEncodedLen for ShardRoot { + /// 32 byte hash + 32 byte hash + 4 byte block height + const ENCODED_LEN: usize = 32 + 32 + 4; +} + +// *** Wrapper Objects *** + +/// Holds full block header data, +/// split into chain indexeing data and additional header data. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct BlockHeaderData { + /// Chain indexing data + index: BlockIndex, + /// Block header data + data: BlockData, +} + +impl BlockHeaderData { + /// Constructs a new `BlockHeaderData`. + pub fn new(index: BlockIndex, data: BlockData) -> Self { + Self { index, data } + } + + /// Returns the stored [`BlockIndex`]. + pub fn index(&self) -> &BlockIndex { + &self.index + } + + /// Returns the stored [`BlockData`]. + pub fn data(&self) -> &BlockData { + &self.data + } +} + +impl ZainoVersionedSerde for BlockHeaderData { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + self.index.serialize(&mut *w)?; + self.data.serialize(w) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let index = BlockIndex::deserialize(&mut *r)?; + let data = BlockData::deserialize(r)?; + Ok(BlockHeaderData::new(index, data)) + } +} + +/// Database wrapper for `Vec`. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TxidList { + /// Txids. + txids: Vec, +} + +impl TxidList { + /// Creates a new `TxidList`. + pub fn new(tx: Vec) -> Self { + Self { txids: tx } + } + + /// Returns a slice of the contained txids. + pub fn txids(&self) -> &[TransactionHash] { + &self.txids + } +} + +impl ZainoVersionedSerde for TxidList { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_vec(w, &self.txids, |w, h| h.serialize(w)) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let tx = read_vec(r, |r| TransactionHash::deserialize(r))?; + Ok(TxidList::new(tx)) + } +} + +/// Wrapper for the list of transparent components of each transaction. +/// +/// Each entry is `Some(TransparentCompactTx)` when the transaction **has** +/// a transparent part, and `None` when it does not. +/// +/// This ensures 1-to-1 indexing with `TxidList`: element *i* matches txid *i*. +/// `None` keeps the index when the tx lacks this pool. +/// +/// **Serialization layout for `TransparentTxList` (implements `ZainoVersionedSerde`)** +/// +/// ┌──────────── byte 0 ─────────────┬────────── CompactSize ─────────────┬──────────── entries ───────────────┐ +/// │ TransparentTxList version tag │ num_txs (CompactSize) = N │ [`Option`; N]│ +/// └─────────────────────────────────┴────────────────────────────────────┴────────────────────────────────────┘ +/// +/// Each `Option` is serialized as: +/// +/// ┌── 1 byte ──┬────────── TransparentCompactTx ─────────────┐ +/// │ 0 or 1 │ If Some: 1-byte version + body │ +/// └────────────┴─────────────────────────────────────────────┘ +/// +/// TransparentCompactTx: +/// ┌── version ─┬──── CompactSize vin_len ─┬──── vin entries ─────┬──── CompactSize vout_len ──┬──── vout entries ────┐ +/// │ 0x01 │ N1 (CompactSize) │ [TxInCompact; N1] │ N2 (CompactSize) │ [TxOutCompact; N2] │ +/// └────────────┴──────────────────────────┴──────────────────────┴────────────────────────────┴──────────────────────┘ +/// +/// Each `TxInCompact` is serialized as: +/// ┌── version ─┬────────────── 36 bytes body ──────────────┐ +/// │ 0x01 │ 32-byte txid + 4-byte LE prevout_index │ +/// └────────────┴───────────────────────────────────────────┘ +/// +/// Each `TxOutCompact` is serialized as: +/// ┌── version ─┬────────────── 29 bytes body ──────────────┐ +/// │ 0x01 │ 8-byte LE value + 20-byte script_hash │ +/// │ │ + 1-byte script_type │ +/// └────────────┴───────────────────────────────────────────┘ +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct TransparentTxList { + /// Transparent transaction data. + tx: Vec>, +} + +impl TransparentTxList { + /// Creates a new `TransparentTxList`. + pub fn new(tx: Vec>) -> Self { + Self { tx } + } + + /// Returns the slice of optional transparent tx fragments. + pub fn tx(&self) -> &[Option] { + &self.tx + } +} + +impl ZainoVersionedSerde for TransparentTxList { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_vec(w, &self.tx, |w, opt| { + write_option(w, opt, |w, t| t.serialize(w)) + }) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let tx = read_vec(r, |r| { + read_option(r, |r| TransparentCompactTx::deserialize(r)) + })?; + Ok(TransparentTxList::new(tx)) + } +} + +/// List of the Sapling component (if any) of every transaction in a block. +/// +/// * Each element is `Some(SaplingCompactTx)` when that transaction **does** +/// contain Sapling data, or `None` when it does not. +/// +/// This ensures 1-to-1 indexing with `TxidList`: element *i* matches txid *i*. +/// `None` keeps the index when the tx lacks this pool. +/// +/// **Serialization layout for `SaplingTxList` (implements `ZainoVersionedSerde`)** +/// +/// ┌──────────── byte 0 ─────────────┬────────── CompactSize ─────────────┬──────────── entries ───────────────┐ +/// │ SaplingTxList version tag = 1 │ num_txs (CompactSize) = N │ [`Option`; N] │ +/// └─────────────────────────────────┴────────────────────────────────────┴────────────────────────────────────┘ +/// +/// Each `Option` is serialized as: +/// +/// ┌── 1 byte ──┬────────────── SaplingCompactTx ──────────────┐ +/// │ 0 or 1 │ If Some: 1-byte version + body │ +/// └────────────┴──────────────────────────────────────────────┘ +/// +/// SaplingCompactTx: +/// ┌── version ─┬──── 1 byte opt ─────┬──── CompactSize ──┬──── spend entries ─────────┬──── CompactSize ───┬──── output entries ─────────┐ +/// │ 0x01 │ 0 or 1 + i64 (value)│ N1 = num_spends │ `[CompactSaplingSpend;N1]` │ N2 = num_outputs │ `[CompactSaplingOutput;N2]` │ +/// └────────────┴─────────────────────┴───────────────────┴────────────────────────────┴────────────────────┴─────────────────────────────┘ +/// +/// - The **Sapling value** is encoded as an `Option` using: +/// - 0 = None +/// - 1 = Some followed by 8-byte little-endian i64 +/// +/// Each `CompactSaplingSpend` is serialized as: +/// +/// ┌── version ─┬────────────── 32 bytes ──────────────┐ +/// │ 0x01 │ 32-byte nullifier │ +/// └────────────┴──────────────────────────────────────┘ +/// +/// Each `CompactSaplingOutput` is serialized as: +/// +/// ┌── version ─┬────────────── 116 bytes ─────────────────────────────────────────────┐ +/// │ 0x01 │ 32-byte cmu + 32-byte ephemeral_key + 52-byte ciphertext │ +/// └────────────┴──────────────────────────────────────────────────────────────────────┘ +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct SaplingTxList { + tx: Vec>, +} + +impl SaplingTxList { + /// Creates a new [`SaplingTxList`] + pub fn new(tx: Vec>) -> Self { + Self { tx } + } + + /// Returns transactions in this item. + pub fn tx(&self) -> &[Option] { + &self.tx + } +} + +impl ZainoVersionedSerde for SaplingTxList { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_vec(w, &self.tx, |w, opt| { + write_option(w, opt, |w, t| t.serialize(w)) + }) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let tx = read_vec(r, |r| read_option(r, |r| SaplingCompactTx::deserialize(r)))?; + Ok(SaplingTxList::new(tx)) + } +} + +/// List of the Orchard component (if any) of every transaction in a block. +/// +/// * Each element is `Some(OrchardCompactTx)` when that transaction **does** +/// contain Sapling data, or `None` when it does not. +/// +/// This ensures 1-to-1 indexing with `TxidList`: element *i* matches txid *i*. +/// `None` keeps the index when the tx lacks this pool. +/// +/// **Serialization layout for `OrchardTxList` (implements `ZainoVersionedSerde`)** +/// +/// ┌──────────── byte 0 ─────────────┬────────── CompactSize ─────────────┬──────────── entries ───────────────┐ +/// │ OrchardTxList version tag = 1 │ num_txs (CompactSize) = N │ [`Option`; N] │ +/// └─────────────────────────────────┴────────────────────────────────────┴────────────────────────────────────┘ +/// +/// Each `Option` is serialized as: +/// +/// ┌── 1 byte ──┬────────────── OrchardCompactTx ───────────────┐ +/// │ 0 or 1 │ If Some: 1-byte version + body │ +/// └────────────┴───────────────────────────────────────────────┘ +/// +/// OrchardCompactTx: +/// ┌── version ─┬──── 1 byte opt ─────┬──── CompactSize ──────┬────────── action entries ─────────┐ +/// │ 0x01 │ 0 or 1 + i64 (value)│ N = num_actions │ [CompactOrchardAction; N] │ +/// └────────────┴─────────────────────┴───────────────────────┴───────────────────────────────────┘ +/// +/// - The **Orchard value** is encoded as an `Option` using: +/// - 0 = None +/// - 1 = Some followed by 8-byte little-endian i64 +/// +/// Each `CompactOrchardAction` is serialized as: +/// +/// ┌── version ─┬──────────── 148 bytes ─────────────────────────────────────────────────────────────┐ +/// │ 0x01 │ 32-byte nullifier + 32-byte cmx + 32-byte ephemeral_key + 52-byte ciphertext │ +/// └────────────┴────────────────────────────────────────────────────────────────────────────────────┘ +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +pub struct OrchardTxList { + tx: Vec>, +} + +impl OrchardTxList { + /// Creates a new [`OrchardTxList`] + pub fn new(tx: Vec>) -> Self { + Self { tx } + } + + /// Returns transactions in this item. + pub fn tx(&self) -> &[Option] { + &self.tx + } +} + +impl ZainoVersionedSerde for OrchardTxList { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + write_vec(w, &self.tx, |w, opt| { + write_option(w, opt, |w, t| t.serialize(w)) + }) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let tx = read_vec(r, |r| read_option(r, |r| OrchardCompactTx::deserialize(r)))?; + Ok(OrchardTxList::new(tx)) + } +} + +// *** Custom serde based debug serialisation *** + +#[cfg(test)] +/// utilities for serializing/deserializing nonstandard-sized arrays +pub mod serde_arrays { + use serde::{Deserialize, Deserializer, Serializer}; + + /// Serialze an arbirtary fixed-size array + pub fn serialize(val: &[u8; N], s: S) -> Result + where + S: Serializer, + { + s.serialize_bytes(val) + } + + /// Deserialze an arbirtary fixed-size array + pub fn deserialize<'de, const N: usize, D>(d: D) -> Result<[u8; N], D::Error> + where + D: Deserializer<'de>, + { + let v: &[u8] = Deserialize::deserialize(d)?; + v.try_into() + .map_err(|_| serde::de::Error::custom(format!("invalid length for [u8; {N}]"))) + } +} diff --git a/zaino-state/src/chain_index/types/db/metadata.rs b/zaino-state/src/chain_index/types/db/metadata.rs new file mode 100644 index 000000000..e58ee26f7 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/metadata.rs @@ -0,0 +1,65 @@ +//! Metadata objects + +use core2::io::{self, Read, Write}; + +use crate::{read_u64_le, version, write_u64_le, FixedEncodedLen, ZainoVersionedSerde}; + +/// Holds information about the mempool state. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct MempoolInfo { + /// Current tx count + pub size: u64, + /// Sum of all tx sizes + pub bytes: u64, + /// Total memory usage for the mempool + pub usage: u64, +} + +impl ZainoVersionedSerde for MempoolInfo { + const VERSION: u8 = version::V1; + + fn encode_body(&self, w: &mut W) -> io::Result<()> { + let mut w = w; + write_u64_le(&mut w, self.size)?; + write_u64_le(&mut w, self.bytes)?; + write_u64_le(&mut w, self.usage) + } + + fn decode_latest(r: &mut R) -> io::Result { + Self::decode_v1(r) + } + + fn decode_v1(r: &mut R) -> io::Result { + let mut r = r; + let size = read_u64_le(&mut r)?; + let bytes = read_u64_le(&mut r)?; + let usage = read_u64_le(&mut r)?; + Ok(MempoolInfo { size, bytes, usage }) + } +} + +/// 24 byte body. +impl FixedEncodedLen for MempoolInfo { + /// 8 byte size + 8 byte bytes + 8 byte usage + const ENCODED_LEN: usize = 8 + 8 + 8; +} + +impl From for MempoolInfo { + fn from(resp: zaino_fetch::jsonrpsee::response::GetMempoolInfoResponse) -> Self { + MempoolInfo { + size: resp.size, + bytes: resp.bytes, + usage: resp.usage, + } + } +} + +impl From for zaino_fetch::jsonrpsee::response::GetMempoolInfoResponse { + fn from(info: MempoolInfo) -> Self { + zaino_fetch::jsonrpsee::response::GetMempoolInfoResponse { + size: info.size, + bytes: info.bytes, + usage: info.usage, + } + } +} diff --git a/zaino-state/src/chain_index/types/db/primitives.rs b/zaino-state/src/chain_index/types/db/primitives.rs new file mode 100644 index 000000000..da658f7e4 --- /dev/null +++ b/zaino-state/src/chain_index/types/db/primitives.rs @@ -0,0 +1,7 @@ +//! Primitive database-serializable types. +//! +//! Contains basic primitive types that implement `ZainoVersionedSerde`: +//! - Height +//! - ShardIndex +//! - ScriptType +//! - ShardRoot diff --git a/zaino-state/src/chain_index/types/db/shielded.rs b/zaino-state/src/chain_index/types/db/shielded.rs new file mode 100644 index 000000000..6e135434d --- /dev/null +++ b/zaino-state/src/chain_index/types/db/shielded.rs @@ -0,0 +1,10 @@ +//! Shielded pool database-serializable types. +//! +//! Contains types for Sapling and Orchard shielded pool data that implement `ZainoVersionedSerde`: +//! - SaplingCompactTx +//! - CompactSaplingSpend +//! - CompactSaplingOutput +//! - OrchardCompactTx +//! - CompactOrchardAction +//! - SaplingTxList +//! - OrchardTxList diff --git a/zaino-state/src/chain_index/types/db/transaction.rs b/zaino-state/src/chain_index/types/db/transaction.rs new file mode 100644 index 000000000..8b757a36a --- /dev/null +++ b/zaino-state/src/chain_index/types/db/transaction.rs @@ -0,0 +1,11 @@ +//! Transaction-related database-serializable types. +//! +//! Contains types for transaction data that implement `ZainoVersionedSerde`: +//! - TransactionHash +//! - CompactTxData +//! - TransparentCompactTx +//! - TxInCompact +//! - TxOutCompact +//! - TxLocation +//! - TxidList +//! - TransparentTxList diff --git a/zaino-state/src/chain_index/types/helpers.rs b/zaino-state/src/chain_index/types/helpers.rs new file mode 100644 index 000000000..99c88caff --- /dev/null +++ b/zaino-state/src/chain_index/types/helpers.rs @@ -0,0 +1,329 @@ +//! Helper types for chain index operations. +//! +//! This module contains non-database types used for in-memory operations, +//! conversions, and coordination between database types. These types do NOT +//! implement `ZainoVersionedSerde` and are not persisted to disk. +//! +//! Types in this module: +//! - BestChainLocation - Transaction location in best chain +//! - NonBestChainLocation - Transaction location not in best chain +//! - TreeRootData - Commitment tree roots wrapper +//! - BlockMetadata - Block metadata for construction +//! - BlockWithMetadata - Block with associated metadata + +use primitive_types::U256; + +use super::db::legacy::*; +use crate::ChainWork; + +/// The location of a transaction in the best chain +#[derive(Debug, PartialEq, Eq, Hash)] +pub enum BestChainLocation { + /// the block containing the transaction + Block(BlockHash, Height), + /// If the transaction is in the mempool and the mempool + /// matches the snapshot's chaintip + /// Return the target height, which is known to be a block above + /// the provided snapshot's chaintip and is returned for convenience + Mempool(Height), +} + +/// The location of a transaction not in the best chain +#[derive(Debug, PartialEq, Eq, Hash)] +pub enum NonBestChainLocation { + /// the block containing the transaction + Block(BlockHash, Height), + /// if the transaction is in the mempool + /// but the mempool does not match the + /// snapshot's chaintip, return the target height if known + /// + /// This likely means that the provided + /// snapshot is out-of-date + Mempool(Option), +} + +/// Wrapper for optional commitment tree roots from blockchain source +#[derive(Clone)] +pub struct TreeRootData { + /// Sapling tree root and size + pub sapling: Option<(zebra_chain::sapling::tree::Root, u64)>, + /// Orchard tree root and size + pub orchard: Option<(zebra_chain::orchard::tree::Root, u64)>, +} + +impl TreeRootData { + /// Create new tree root data + pub fn new( + sapling: Option<(zebra_chain::sapling::tree::Root, u64)>, + orchard: Option<(zebra_chain::orchard::tree::Root, u64)>, + ) -> Self { + Self { sapling, orchard } + } + + /// Extract with defaults for genesis/sync use case + pub fn extract_with_defaults( + self, + ) -> ( + zebra_chain::sapling::tree::Root, + u64, + zebra_chain::orchard::tree::Root, + u64, + ) { + let (sapling_root, sapling_size) = self.sapling.unwrap_or_default(); + let (orchard_root, orchard_size) = self.orchard.unwrap_or_default(); + (sapling_root, sapling_size, orchard_root, orchard_size) + } +} + +/// Intermediate type to hold block metadata separate from the block itself +#[derive(Debug, Clone)] +pub struct BlockMetadata { + /// Sapling commitment tree root + pub sapling_root: zebra_chain::sapling::tree::Root, + /// Sapling tree size + pub sapling_size: u32, + /// Orchard commitment tree root + pub orchard_root: zebra_chain::orchard::tree::Root, + /// Orchard tree size + pub orchard_size: u32, + /// Parent block's chainwork + pub parent_chainwork: ChainWork, + /// Network for block validation + pub network: zebra_chain::parameters::Network, +} + +impl BlockMetadata { + /// Create new block metadata + pub fn new( + sapling_root: zebra_chain::sapling::tree::Root, + sapling_size: u32, + orchard_root: zebra_chain::orchard::tree::Root, + orchard_size: u32, + parent_chainwork: ChainWork, + network: zebra_chain::parameters::Network, + ) -> Self { + Self { + sapling_root, + sapling_size, + orchard_root, + orchard_size, + parent_chainwork, + network, + } + } +} + +/// Intermediate type combining a block with its metadata +#[derive(Debug, Clone)] +pub struct BlockWithMetadata<'a> { + /// The zebra block + pub block: &'a zebra_chain::block::Block, + /// Additional metadata needed for IndexedBlock creation + pub metadata: BlockMetadata, +} + +impl<'a> BlockWithMetadata<'a> { + /// Create a new block with metadata + pub fn new(block: &'a zebra_chain::block::Block, metadata: BlockMetadata) -> Self { + Self { block, metadata } + } + + /// Extract block header data + fn extract_block_data(&self) -> Result { + let block = self.block; + let network = &self.metadata.network; + + Ok(BlockData { + version: block.header.version, + time: block.header.time.timestamp(), + merkle_root: block.header.merkle_root.0, + bits: u32::from_be_bytes(block.header.difficulty_threshold.bytes_in_display_order()), + block_commitments: BlockData::commitment_to_bytes( + block + .commitment(network) + .map_err(|_| "Block commitment could not be computed".to_string())?, + ), + nonce: *block.header.nonce, + solution: block.header.solution.into(), + }) + } + + /// Extract and process all transactions in the block + fn extract_transactions(&self) -> Result, String> { + let mut transactions = Vec::new(); + + for (i, txn) in self.block.transactions.iter().enumerate() { + let transparent = self.extract_transparent_data(txn)?; + let sapling = self.extract_sapling_data(txn); + let orchard = self.extract_orchard_data(txn); + + let txdata = + CompactTxData::new(i as u64, txn.hash().into(), transparent, sapling, orchard); + transactions.push(txdata); + } + + Ok(transactions) + } + + /// Extract transparent transaction data (inputs and outputs) + fn extract_transparent_data( + &self, + txn: &zebra_chain::transaction::Transaction, + ) -> Result { + let inputs: Vec = txn + .inputs() + .iter() + .map(|input| match input.outpoint() { + Some(outpoint) => TxInCompact::new(outpoint.hash.0, outpoint.index), + None => TxInCompact::null_prevout(), + }) + .collect(); + + let outputs = txn + .outputs() + .iter() + .map(|output| { + let value = u64::from(output.value); + let script_bytes = output.lock_script.as_raw_bytes(); + + let addr = AddrScript::from_script(script_bytes).unwrap_or_else(|| { + let mut fallback = [0u8; 20]; + let usable = script_bytes.len().min(20); + fallback[..usable].copy_from_slice(&script_bytes[..usable]); + AddrScript::new(fallback, ScriptType::NonStandard as u8) + }); + + TxOutCompact::new(value, *addr.hash(), addr.script_type()) + .ok_or_else(|| "TxOutCompact conversion failed".to_string()) + }) + .collect::, _>>()?; + + Ok(TransparentCompactTx::new(inputs, outputs)) + } + + /// Extract sapling transaction data + fn extract_sapling_data( + &self, + txn: &zebra_chain::transaction::Transaction, + ) -> SaplingCompactTx { + let sapling_value = { + let val = txn.sapling_value_balance().sapling_amount(); + if val == 0 { + None + } else { + Some(i64::from(val)) + } + }; + + SaplingCompactTx::new( + sapling_value, + txn.sapling_nullifiers() + .map(|nf| CompactSaplingSpend::new(*nf.0)) + .collect(), + txn.sapling_outputs() + .map(|output| { + let cipher: [u8; 52] = <[u8; 580]>::from(output.enc_ciphertext)[..52] + .try_into() + .unwrap(); // TODO: Remove unwrap + CompactSaplingOutput::new( + output.cm_u.to_bytes(), + <[u8; 32]>::from(output.ephemeral_key), + cipher, + ) + }) + .collect::>(), + ) + } + + /// Extract orchard transaction data + fn extract_orchard_data( + &self, + txn: &zebra_chain::transaction::Transaction, + ) -> OrchardCompactTx { + let orchard_value = { + let val = txn.orchard_value_balance().orchard_amount(); + if val == 0 { + None + } else { + Some(i64::from(val)) + } + }; + + OrchardCompactTx::new( + orchard_value, + txn.orchard_actions() + .map(|action| { + let cipher: [u8; 52] = <[u8; 580]>::from(action.enc_ciphertext)[..52] + .try_into() + .unwrap(); // TODO: Remove unwrap + CompactOrchardAction::new( + <[u8; 32]>::from(action.nullifier), + <[u8; 32]>::from(action.cm_x), + <[u8; 32]>::from(action.ephemeral_key), + cipher, + ) + }) + .collect::>(), + ) + } + + /// Create block index from block and metadata + fn create_block_index(&self) -> Result { + let block = self.block; + let hash = BlockHash::from(block.hash()); + let parent_hash = BlockHash::from(block.header.previous_block_hash); + let height = block + .coinbase_height() + .map(|height| Height(height.0)) + .ok_or_else(|| String::from("Any valid block has a coinbase height"))?; + + let block_work = block.header.difficulty_threshold.to_work().ok_or_else(|| { + "Failed to calculate block work from difficulty threshold".to_string() + })?; + let chainwork = self + .metadata + .parent_chainwork + .add(&ChainWork::from(U256::from(block_work.as_u128()))); + + Ok(BlockIndex { + hash, + parent_hash, + chainwork, + height, + }) + } + + /// Create commitment tree data from metadata + fn create_commitment_tree_data(&self) -> super::db::CommitmentTreeData { + let commitment_tree_roots = super::db::CommitmentTreeRoots::new( + <[u8; 32]>::from(self.metadata.sapling_root), + <[u8; 32]>::from(self.metadata.orchard_root), + ); + + let commitment_tree_size = super::db::CommitmentTreeSizes::new( + self.metadata.sapling_size, + self.metadata.orchard_size, + ); + + super::db::CommitmentTreeData::new(commitment_tree_roots, commitment_tree_size) + } +} + +// Clean TryFrom implementation using the intermediate types +impl TryFrom> for IndexedBlock { + type Error = String; + + fn try_from(block_with_metadata: BlockWithMetadata<'_>) -> Result { + let data = block_with_metadata.extract_block_data()?; + let transactions = block_with_metadata.extract_transactions()?; + let index = block_with_metadata.create_block_index()?; + let commitment_tree_data = block_with_metadata.create_commitment_tree_data(); + + Ok(IndexedBlock { + index, + data, + transactions, + commitment_tree_data, + }) + } +} diff --git a/zaino-state/src/chain_index/types/primitives.rs b/zaino-state/src/chain_index/types/primitives.rs new file mode 100644 index 000000000..1935a2649 --- /dev/null +++ b/zaino-state/src/chain_index/types/primitives.rs @@ -0,0 +1,11 @@ +//! Foundational primitive types for the chain index. +//! +//! This module contains the basic building blocks that other types depend on. +//! +//! ## Current Types +//! +//! ## Planned Migrations +//! The following types should be extracted from types.rs: +//! - Block and transaction hashes +//! - Block heights and chain work +//! - Basic indexing primitives diff --git a/zaino-state/src/chain_index_passthrough.mmd b/zaino-state/src/chain_index_passthrough.mmd new file mode 100644 index 000000000..d0645078e --- /dev/null +++ b/zaino-state/src/chain_index_passthrough.mmd @@ -0,0 +1,21 @@ +flowchart TD + m_end_return_it[Return it] + m_start(Receive call) + m_start --> m_step_1 + m_step_1["STEP 1:Is the requested data available in cached Mempool?"] + m_step_1 --> |Yes| m_end_return_it + m_step_1 --> |No| m_step_2 + m_step_2["STEP 2:Is the requested data available in the NONFINALIZED SnapShot "] + m_step_2 --> |Yes| m_step_3 + m_step_2 --> |No| m_step_4 + m_step_3["STEP 3:Is that data on the ``BestChain``?, or is it acceptable on a ``NonBestChain``?"] + m_step_3 --> |Yes| m_end_return_it + m_step_3 --> |No| m_end_return_none + m_step_4["STEP 4: Is the requested data in the FINALIZED index?"] + m_step_4 -->|Yes| m_end_return_it + m_step_4 --> |No| m_step_5 + m_step_5["STEP 5:Is the requested data available and FINALIZED in the validator (passthrough)?"] + m_step_5 --> |Yes| m_end_return_it + m_step_5 --> |No| m_end_return_none + m_end_return_none[Return None] + diff --git a/zaino-state/src/config.rs b/zaino-state/src/config.rs new file mode 100644 index 000000000..7b14d8d8c --- /dev/null +++ b/zaino-state/src/config.rs @@ -0,0 +1,186 @@ +//! Holds config data for Zaino-State services. + +use std::path::PathBuf; +use zaino_common::{Network, ServiceConfig, StorageConfig}; + +/// Type of backend to be used. +/// +/// Determines how Zaino fetches blockchain data from the validator. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "lowercase")] +pub enum BackendType { + /// Uses Zebra's ReadStateService for direct state access. + /// + /// More efficient but requires running on the same machine as Zebra. + State, + /// Uses JSON-RPC client to fetch data. + /// + /// Compatible with Zcashd, Zebra, or another Zaino instance. + #[default] + Fetch, +} + +/// Unified backend configuration enum. +#[derive(Debug, Clone)] +#[allow(deprecated)] +pub enum BackendConfig { + /// StateService config. + State(StateServiceConfig), + /// Fetchservice config. + Fetch(FetchServiceConfig), +} + +/// Holds config data for [crate::StateService]. +#[derive(Debug, Clone)] +// #[deprecated] +pub struct StateServiceConfig { + /// Zebra [`zebra_state::ReadStateService`] config data + pub validator_state_config: zebra_state::Config, + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, + /// Validator gRPC address (requires ip:port format for Zebra state sync). + pub validator_grpc_address: std::net::SocketAddr, + /// Validator cookie auth. + pub validator_cookie_auth: bool, + /// Enable validator rpc cookie authentification with Some: Path to the validator cookie file. + pub validator_cookie_path: Option, + /// Validator JsonRPC user. + pub validator_rpc_user: String, + /// Validator JsonRPC password. + pub validator_rpc_password: String, + /// Service-level configuration (timeout, channel size) + pub service: ServiceConfig, + /// Storage configuration (cache and database) + pub storage: StorageConfig, + /// Network type. + pub network: Network, +} + +#[allow(deprecated)] +impl StateServiceConfig { + /// Returns a new instance of [`StateServiceConfig`]. + #[allow(clippy::too_many_arguments)] + // TODO: replace with struct-literal init only? + pub fn new( + validator_state_config: zebra_state::Config, + validator_rpc_address: String, + validator_grpc_address: std::net::SocketAddr, + validator_cookie_auth: bool, + validator_cookie_path: Option, + validator_rpc_user: Option, + validator_rpc_password: Option, + service: ServiceConfig, + storage: StorageConfig, + network: Network, + ) -> Self { + tracing::trace!( + "State service expecting NU activations:\n{:?}", + network.to_zebra_network().full_activation_list() + ); + StateServiceConfig { + validator_state_config, + validator_rpc_address, + validator_grpc_address, + validator_cookie_auth, + validator_cookie_path, + validator_rpc_user: validator_rpc_user.unwrap_or("xxxxxx".to_string()), + validator_rpc_password: validator_rpc_password.unwrap_or("xxxxxx".to_string()), + service, + storage, + network, + } + } +} + +/// Holds config data for [crate::FetchService]. +#[derive(Debug, Clone)] +#[deprecated] +pub struct FetchServiceConfig { + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, + /// Enable validator rpc cookie authentification with Some: path to the validator cookie file. + pub validator_cookie_path: Option, + /// Validator JsonRPC user. + pub validator_rpc_user: String, + /// Validator JsonRPC password. + pub validator_rpc_password: String, + /// Service-level configuration (timeout, channel size) + pub service: ServiceConfig, + /// Storage configuration (cache and database) + pub storage: StorageConfig, + /// Network type. + pub network: Network, +} + +#[allow(deprecated)] +impl FetchServiceConfig { + /// Returns a new instance of [`FetchServiceConfig`]. + #[allow(clippy::too_many_arguments)] + pub fn new( + validator_rpc_address: String, + validator_cookie_path: Option, + validator_rpc_user: Option, + validator_rpc_password: Option, + service: ServiceConfig, + storage: StorageConfig, + network: Network, + ) -> Self { + FetchServiceConfig { + validator_rpc_address, + validator_cookie_path, + validator_rpc_user: validator_rpc_user.unwrap_or("xxxxxx".to_string()), + validator_rpc_password: validator_rpc_password.unwrap_or("xxxxxx".to_string()), + service, + storage, + network, + } + } +} + +/// Holds config data for `[ZainoDb]`. +/// TODO: Rename to *ZainoDbConfig* when ChainIndex update is complete **and** remove legacy fields. +#[derive(Debug, Clone)] +pub struct BlockCacheConfig { + /// Storage configuration (cache and database) + pub storage: StorageConfig, + /// Database version selected to be run. + pub db_version: u32, + /// Network type. + pub network: Network, +} + +impl BlockCacheConfig { + /// Returns a new instance of [`BlockCacheConfig`]. + #[allow(dead_code)] + pub fn new(storage: StorageConfig, db_version: u32, network: Network, _no_sync: bool) -> Self { + BlockCacheConfig { + storage, + db_version, + network, + } + } +} + +#[allow(deprecated)] +impl From for BlockCacheConfig { + fn from(value: StateServiceConfig) -> Self { + Self { + storage: value.storage, + // TODO: update zaino configs to include db version. + db_version: 1, + network: value.network, + } + } +} + +#[allow(deprecated)] +impl From for BlockCacheConfig { + fn from(value: FetchServiceConfig) -> Self { + Self { + storage: value.storage, + // TODO: update zaino configs to include db version. + db_version: 1, + network: value.network, + } + } +} diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs new file mode 100644 index 000000000..6181523a4 --- /dev/null +++ b/zaino-state/src/error.rs @@ -0,0 +1,719 @@ +#![allow(deprecated)] +//! Holds error types for Zaino-state. + +// Needs to be module level due to the thiserror::Error macro + +use crate::BlockHash; + +use std::{any::type_name, fmt::Display}; + +use zaino_fetch::jsonrpsee::connector::RpcRequestError; +use zaino_proto::proto::utils::GetBlockRangeError; + +/// Errors related to the `StateService`. +// #[deprecated] +#[derive(Debug, thiserror::Error)] +pub enum StateServiceError { + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// An rpc-specific error we haven't accounted for + #[error("unhandled fallible RPC call {0}")] + UnhandledRpcError(String), + /// Custom Errors. *Remove before production. + #[error("Custom error: {0}")] + Custom(String), + + /// Error from a Tokio JoinHandle. + #[error("Join error: {0}")] + JoinError(#[from] tokio::task::JoinError), + + /// Error from JsonRpcConnector. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// RPC error in compatibility with zcashd. + #[error("RPC error: {0:?}")] + RpcError(#[from] zaino_fetch::jsonrpsee::connector::RpcError), + + /// Chain index error. + #[error("Chain index error: {0}")] + ChainIndexError(#[from] ChainIndexError), + + /// Error from the block cache. + #[error("Mempool error: {0}")] + BlockCacheError(#[from] BlockCacheError), + + /// Error from the mempool. + #[error("Mempool error: {0}")] + MempoolError(#[from] MempoolError), + + /// Tonic gRPC error. + #[error("Tonic status error: {0}")] + TonicStatusError(#[from] tonic::Status), + + /// Serialization error. + #[error("Serialization error: {0}")] + SerializationError(#[from] zebra_chain::serialization::SerializationError), + + /// Integer conversion error. + #[error("Integer conversion error: {0}")] + TryFromIntError(#[from] std::num::TryFromIntError), + + /// std::io::Error + #[error("IO error: {0}")] + IoError(#[from] std::io::Error), + + /// A generic boxed error. + #[error("Generic error: {0}")] + Generic(#[from] Box), + + /// The zebrad version and zebra library version do not align + #[error( + "zebrad version mismatch. this build of zaino requires a \ + version of {expected_zebrad_version}, but the connected zebrad \ + is version {connected_zebrad_version}" + )] + ZebradVersionMismatch { + /// The version string or commit hash we specify in Cargo.lock + expected_zebrad_version: String, + /// The version string of the zebrad, plus its git describe + /// information if applicable + connected_zebrad_version: String, + }, +} + +impl From for StateServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { + GetBlockRangeError::StartHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range("Error: No start height given")) + } + GetBlockRangeError::EndHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range("Error: No end height given.")) + } + GetBlockRangeError::PoolTypeArgumentError(_) => { + Self::TonicStatusError(tonic::Status::invalid_argument("Error: invalid pool type")) + } + } + } +} + +#[allow(deprecated)] +impl From for tonic::Status { + fn from(error: StateServiceError) -> Self { + match error { + StateServiceError::Critical(message) => tonic::Status::internal(message), + StateServiceError::Custom(message) => tonic::Status::internal(message), + StateServiceError::JoinError(err) => { + tonic::Status::internal(format!("Join error: {err}")) + } + StateServiceError::JsonRpcConnectorError(err) => { + tonic::Status::internal(format!("JsonRpcConnector error: {err}")) + } + StateServiceError::RpcError(err) => { + tonic::Status::internal(format!("RPC error: {err:?}")) + } + StateServiceError::ChainIndexError(err) => match err.kind { + ChainIndexErrorKind::InternalServerError => tonic::Status::internal(err.message), + ChainIndexErrorKind::InvalidSnapshot => { + tonic::Status::failed_precondition(err.message) + } + }, + StateServiceError::BlockCacheError(err) => { + tonic::Status::internal(format!("BlockCache error: {err:?}")) + } + StateServiceError::MempoolError(err) => { + tonic::Status::internal(format!("Mempool error: {err:?}")) + } + StateServiceError::TonicStatusError(err) => err, + StateServiceError::SerializationError(err) => { + tonic::Status::internal(format!("Serialization error: {err}")) + } + StateServiceError::TryFromIntError(err) => { + tonic::Status::internal(format!("Integer conversion error: {err}")) + } + StateServiceError::IoError(err) => tonic::Status::internal(format!("IO error: {err}")), + StateServiceError::Generic(err) => { + tonic::Status::internal(format!("Generic error: {err}")) + } + ref err @ StateServiceError::ZebradVersionMismatch { .. } => { + tonic::Status::internal(err.to_string()) + } + StateServiceError::UnhandledRpcError(e) => tonic::Status::internal(e.to_string()), + } + } +} + +impl From> for StateServiceError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + Self::JsonRpcConnectorError(transport_error) + } + RpcRequestError::Method(e) => Self::UnhandledRpcError(format!( + "{}: {}", + std::any::type_name::(), + e.to_string() + )), + RpcRequestError::JsonRpc(error) => Self::Custom(format!("bad argument: {error}")), + RpcRequestError::InternalUnrecoverable(e) => Self::Custom(e.to_string()), + RpcRequestError::ServerWorkQueueFull => { + Self::Custom("Server queue full. Handling for this not yet implemented".to_string()) + } + RpcRequestError::UnexpectedErrorResponse(error) => Self::Custom(format!("{error}")), + } + } +} + +/// Errors related to the `FetchService`. +#[deprecated] +#[derive(Debug, thiserror::Error)] +pub enum FetchServiceError { + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// Error from JsonRpcConnector. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// Chain index error. + #[error("Chain index error: {0}")] + ChainIndexError(#[from] ChainIndexError), + + /// RPC error in compatibility with zcashd. + #[error("RPC error: {0:?}")] + RpcError(#[from] zaino_fetch::jsonrpsee::connector::RpcError), + + /// Tonic gRPC error. + #[error("Tonic status error: {0}")] + TonicStatusError(#[from] tonic::Status), + + /// Serialization error. + #[error("Serialization error: {0}")] + SerializationError(#[from] zebra_chain::serialization::SerializationError), +} + +impl From for tonic::Status { + fn from(error: FetchServiceError) -> Self { + match error { + FetchServiceError::Critical(message) => tonic::Status::internal(message), + FetchServiceError::JsonRpcConnectorError(err) => { + tonic::Status::internal(format!("JsonRpcConnector error: {err}")) + } + FetchServiceError::ChainIndexError(err) => match err.kind { + ChainIndexErrorKind::InternalServerError => tonic::Status::internal(err.message), + ChainIndexErrorKind::InvalidSnapshot => { + tonic::Status::failed_precondition(err.message) + } + }, + FetchServiceError::RpcError(err) => { + tonic::Status::internal(format!("RPC error: {err:?}")) + } + FetchServiceError::TonicStatusError(err) => err, + FetchServiceError::SerializationError(err) => { + tonic::Status::internal(format!("Serialization error: {err}")) + } + } + } +} + +impl From> for FetchServiceError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + FetchServiceError::JsonRpcConnectorError(transport_error) + } + RpcRequestError::JsonRpc(error) => { + FetchServiceError::Critical(format!("argument failed to serialze: {error}")) + } + RpcRequestError::InternalUnrecoverable(e) => { + FetchServiceError::Critical(format!("Internal unrecoverable error: {e}")) + } + RpcRequestError::ServerWorkQueueFull => FetchServiceError::Critical( + "Server queue full. Handling for this not yet implemented".to_string(), + ), + RpcRequestError::Method(e) => FetchServiceError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + e.to_string() + )), + RpcRequestError::UnexpectedErrorResponse(error) => { + FetchServiceError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + error + )) + } + } + } +} + +impl From for FetchServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { + GetBlockRangeError::StartHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::out_of_range("Error: No start height given"), + ), + GetBlockRangeError::EndHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::out_of_range("Error: No end height given."), + ), + GetBlockRangeError::PoolTypeArgumentError(_) => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: invalid pool type"), + ), + } + } +} + +/// These aren't the best conversions, but the MempoolError should go away +/// in favor of a new type with the new chain cache is complete +impl From> for MempoolError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + MempoolError::JsonRpcConnectorError(transport_error) + } + RpcRequestError::JsonRpc(error) => { + MempoolError::Critical(format!("argument failed to serialze: {error}")) + } + RpcRequestError::InternalUnrecoverable(e) => { + MempoolError::Critical(format!("Internal unrecoverable error: {e}")) + } + RpcRequestError::ServerWorkQueueFull => MempoolError::Critical( + "Server queue full. Handling for this not yet implemented".to_string(), + ), + RpcRequestError::Method(e) => MempoolError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + e.to_string() + )), + RpcRequestError::UnexpectedErrorResponse(error) => MempoolError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + error + )), + } + } +} + +/// Errors related to the `Mempool`. +#[derive(Debug, thiserror::Error)] +pub enum MempoolError { + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// Incorrect expected chain tip given from client. + #[error( + "Incorrect chain tip (expected {expected_chain_tip:?}, current {current_chain_tip:?})" + )] + IncorrectChainTip { + expected_chain_tip: BlockHash, + current_chain_tip: BlockHash, + }, + + /// Error from JsonRpcConnector. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// Errors originating from the BlockchainSource in use. + #[error("blockchain source error: {0}")] + BlockchainSourceError(#[from] crate::chain_index::source::BlockchainSourceError), + + /// Error from a Tokio Watch Receiver. + #[error("Join error: {0}")] + WatchRecvError(#[from] tokio::sync::watch::error::RecvError), + + /// Unexpected status-related error. + #[error("Status error: {0:?}")] + StatusError(StatusError), +} + +/// Errors related to the `BlockCache`. +#[derive(Debug, thiserror::Error)] +pub enum BlockCacheError { + /// Custom Errors. *Remove before production. + #[error("Custom error: {0}")] + Custom(String), + + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// Errors from the NonFinalisedState. + #[error("NonFinalisedState Error: {0}")] + NonFinalisedStateError(#[from] NonFinalisedStateError), + + /// Errors from the FinalisedState. + #[error("FinalisedState Error: {0}")] + FinalisedStateError(#[from] FinalisedStateError), + + /// Error from JsonRpcConnector. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// Chain parse error. + #[error("Chain parse error: {0}")] + ChainParseError(#[from] zaino_fetch::chain::error::ParseError), + + /// Serialization error. + #[error("Serialization error: {0}")] + SerializationError(#[from] zebra_chain::serialization::SerializationError), + + /// UTF-8 conversion error. + #[error("UTF-8 conversion error: {0}")] + Utf8Error(#[from] std::str::Utf8Error), + + /// Integer parsing error. + #[error("Integer parsing error: {0}")] + ParseIntError(#[from] std::num::ParseIntError), + + /// Integer conversion error. + #[error("Integer conversion error: {0}")] + TryFromIntError(#[from] std::num::TryFromIntError), +} + +/// Errors related to the `NonFinalisedState`. +#[derive(Debug, thiserror::Error)] +pub enum NonFinalisedStateError { + /// Custom Errors. *Remove before production. + #[error("Custom error: {0}")] + Custom(String), + + /// Required data is missing from the non-finalised state. + #[error("Missing data: {0}")] + MissingData(String), + + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// Error from JsonRpcConnector. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// Unexpected status-related error. + #[error("Status error: {0:?}")] + StatusError(StatusError), +} + +/// These aren't the best conversions, but the NonFinalizedStateError should go away +/// in favor of a new type with the new chain cache is complete +impl From> for NonFinalisedStateError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + NonFinalisedStateError::JsonRpcConnectorError(transport_error) + } + RpcRequestError::JsonRpc(error) => { + NonFinalisedStateError::Custom(format!("argument failed to serialze: {error}")) + } + RpcRequestError::InternalUnrecoverable(e) => { + NonFinalisedStateError::Custom(format!("Internal unrecoverable error: {e}")) + } + RpcRequestError::ServerWorkQueueFull => NonFinalisedStateError::Custom( + "Server queue full. Handling for this not yet implemented".to_string(), + ), + RpcRequestError::Method(e) => NonFinalisedStateError::Custom(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + e.to_string() + )), + RpcRequestError::UnexpectedErrorResponse(error) => { + NonFinalisedStateError::Custom(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + error + )) + } + } + } +} + +/// Errors related to the `FinalisedState`. +// TODO: Update name to DbError when ZainoDB replaces legacy finalised state. +#[derive(Debug, thiserror::Error)] +pub enum FinalisedStateError { + /// Custom Errors. + // TODO: Remove before production + #[error("Custom error: {0}")] + Custom(String), + + /// Requested data is missing from the finalised state. + /// + /// This could be due to the databae not yet being synced or due to a bad request input. + /// + /// We could split this into 2 distinct types if needed. + #[error("Missing data: {0}")] + DataUnavailable(String), + + /// A block is present on disk but failed internal validation. + /// + /// *Typically means: checksum mismatch, corrupt CBOR, Merkle check + /// failed, etc.* The caller should fetch the correct data and + /// overwrite the faulty block. + #[error("invalid block @ height {height} (hash {hash}): {reason}")] + InvalidBlock { + height: u32, + hash: BlockHash, + reason: String, + }, + + /// Returned when a caller asks for a feature that the + /// currently-opened database version does not advertise. + #[error("feature unavailable: {0}")] + FeatureUnavailable(&'static str), + + /// Errors originating from the BlockchainSource in use. + #[error("blockchain source error: {0}")] + BlockchainSourceError(#[from] crate::chain_index::source::BlockchainSourceError), + + /// Critical Errors, Restart Zaino. + #[error("Critical error: {0}")] + Critical(String), + + /// Error from the LMDB database. + // NOTE: Should this error type be here or should we handle all LMDB errors internally? + #[error("LMDB database error: {0}")] + LmdbError(#[from] lmdb::Error), + + /// Serde Json serialisation / deserialisation errors. + // TODO: Remove when ZainoDB replaces legacy finalised state. + #[error("LMDB database error: {0}")] + SerdeJsonError(#[from] serde_json::Error), + + /// Unexpected status-related error. + #[error("Status error: {0:?}")] + StatusError(StatusError), + + /// Error from JsonRpcConnector. + // TODO: Remove when ZainoDB replaces legacy finalised state. + #[error("JsonRpcConnector error: {0}")] + JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), + + /// std::io::Error + #[error("IO error: {0}")] + IoError(#[from] std::io::Error), +} + +/// These aren't the best conversions, but the FinalizedStateError should go away +/// in favor of a new type with the new chain cache is complete +impl From> for FinalisedStateError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + FinalisedStateError::JsonRpcConnectorError(transport_error) + } + RpcRequestError::JsonRpc(error) => { + FinalisedStateError::Custom(format!("argument failed to serialze: {error}")) + } + RpcRequestError::InternalUnrecoverable(e) => { + FinalisedStateError::Custom(format!("Internal unrecoverable error: {e}")) + } + RpcRequestError::ServerWorkQueueFull => FinalisedStateError::Custom( + "Server queue full. Handling for this not yet implemented".to_string(), + ), + RpcRequestError::Method(e) => FinalisedStateError::Custom(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + e.to_string() + )), + RpcRequestError::UnexpectedErrorResponse(error) => { + FinalisedStateError::Custom(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + error + )) + } + } + } +} + +/// A general error type to represent error StatusTypes. +#[derive(Debug, Clone, thiserror::Error)] +#[error("Unexpected status error: {server_status:?}")] +pub struct StatusError { + pub server_status: crate::status::StatusType, +} + +#[derive(Debug, thiserror::Error)] +#[error("{kind}: {message}")] +/// The set of errors that can occur during the public API calls +/// of a NodeBackedChainIndex +pub struct ChainIndexError { + pub(crate) kind: ChainIndexErrorKind, + pub(crate) message: String, + pub(crate) source: Option>, +} + +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +/// The high-level kinds of thing that can fail +pub enum ChainIndexErrorKind { + /// Zaino is in some way nonfunctional + InternalServerError, + /// The given snapshot contains invalid data. + // This variant isn't used yet...it should indicate + // that the provided snapshot contains information unknown to Zebra + // Unlike an internal server error, generating a new snapshot may solve + // whatever went wrong + #[allow(dead_code)] + InvalidSnapshot, +} + +impl Display for ChainIndexErrorKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + ChainIndexErrorKind::InternalServerError => "internal server error", + ChainIndexErrorKind::InvalidSnapshot => "invalid snapshot", + }) + } +} + +impl ChainIndexError { + /// The error kind + pub fn kind(&self) -> ChainIndexErrorKind { + self.kind + } + pub(crate) fn backing_validator(value: impl std::error::Error + Send + Sync + 'static) -> Self { + Self { + kind: ChainIndexErrorKind::InternalServerError, + message: "InternalServerError: error receiving data from backing node".to_string(), + source: Some(Box::new(value)), + } + } + + pub(crate) fn database_hole( + missing_block: impl Display, + source: Option>, + ) -> Self { + Self { + kind: ChainIndexErrorKind::InternalServerError, + message: format!( + "InternalServerError: hole in validator database, missing block {missing_block}" + ), + source, + } + } + + pub(crate) fn validator_data_error_block_coinbase_height_missing() -> Self { + Self { + kind: ChainIndexErrorKind::InternalServerError, + message: "validator error: data error: block.coinbase_height() returned None" + .to_string(), + source: None, + } + } + + pub(crate) fn child_process_status_error(process: &str, status_err: StatusError) -> Self { + use crate::status::StatusType; + + let message = match status_err.server_status { + StatusType::Spawning => format!("{process} status: Spawning (not ready yet)"), + StatusType::Syncing => format!("{process} status: Syncing (not ready yet)"), + StatusType::Ready => format!("{process} status: Ready (unexpected error path)"), + StatusType::Busy => format!("{process} status: Busy (temporarily unavailable)"), + StatusType::Closing => format!("{process} status: Closing (shutting down)"), + StatusType::Offline => format!("{process} status: Offline (not available)"), + StatusType::RecoverableError => { + format!("{process} status: RecoverableError (retry may succeed)") + } + StatusType::CriticalError => { + format!("{process} status: CriticalError (requires operator action)") + } + }; + + ChainIndexError { + kind: ChainIndexErrorKind::InternalServerError, + message, + source: Some(Box::new(status_err)), + } + } +} + +impl From for ChainIndexError { + fn from(value: FinalisedStateError) -> Self { + let message = match &value { + FinalisedStateError::DataUnavailable(err) => format!("unhandled missing data: {err}"), + FinalisedStateError::FeatureUnavailable(err) => { + format!("unhandled missing feature: {err}") + } + FinalisedStateError::InvalidBlock { + height, + hash: _, + reason, + } => format!("invalid block at height {height}: {reason}"), + FinalisedStateError::Custom(err) | FinalisedStateError::Critical(err) => err.clone(), + FinalisedStateError::LmdbError(error) => error.to_string(), + FinalisedStateError::SerdeJsonError(error) => error.to_string(), + FinalisedStateError::StatusError(status_error) => status_error.to_string(), + FinalisedStateError::JsonRpcConnectorError(transport_error) => { + transport_error.to_string() + } + FinalisedStateError::IoError(error) => error.to_string(), + FinalisedStateError::BlockchainSourceError(blockchain_source_error) => { + blockchain_source_error.to_string() + } + }; + ChainIndexError { + kind: ChainIndexErrorKind::InternalServerError, + message, + source: Some(Box::new(value)), + } + } +} + +impl From for ChainIndexError { + fn from(value: MempoolError) -> Self { + // Construct a user-facing message depending on the variant + let message = match &value { + MempoolError::Critical(msg) => format!("critical mempool error: {msg}"), + MempoolError::IncorrectChainTip { + expected_chain_tip, + current_chain_tip, + } => { + format!( + "incorrect chain tip (expected {expected_chain_tip:?}, current {current_chain_tip:?})" + ) + } + MempoolError::JsonRpcConnectorError(err) => { + format!("mempool json-rpc connector error: {err}") + } + MempoolError::BlockchainSourceError(err) => { + format!("mempool blockchain source error: {err}") + } + MempoolError::WatchRecvError(err) => format!("mempool watch receiver error: {err}"), + MempoolError::StatusError(status_err) => { + format!("mempool status error: {status_err:?}") + } + }; + + ChainIndexError { + kind: ChainIndexErrorKind::InternalServerError, + message, + source: Some(Box::new(value)), + } + } +} diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs new file mode 100644 index 000000000..164f5d3ff --- /dev/null +++ b/zaino-state/src/indexer.rs @@ -0,0 +1,887 @@ +//! Holds the Indexer trait containing the zcash RPC definitions served by zaino +//! and generic wrapper structs for the various backend options available. + +use async_trait::async_trait; +use tokio::{sync::mpsc, time::timeout}; +use tracing::warn; +use zaino_fetch::jsonrpsee::response::{ + address_deltas::{GetAddressDeltasParams, GetAddressDeltasResponse}, + block_deltas::BlockDeltas, + block_header::GetBlockHeader, + block_subsidy::GetBlockSubsidy, + mining_info::GetMiningInfoWire, + peer_info::GetPeerInfo, + GetMempoolInfoResponse, GetNetworkSolPsResponse, +}; +use zaino_proto::proto::{ + compact_formats::CompactBlock, + service::{ + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReplyList, GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, + PingResponse, RawTransaction, SendResponse, ShieldedProtocol, SubtreeRoot, + TransparentAddressBlockFilter, TreeState, TxFilter, + }, +}; +use zebra_chain::{ + block::Height, serialization::BytesInDisplayOrder as _, subtree::NoteCommitmentSubtreeIndex, +}; +use zebra_rpc::{ + client::{GetSubtreesByIndexResponse, GetTreestateResponse, ValidateAddressResponse}, + methods::{ + AddressBalance, GetAddressBalanceRequest, GetAddressTxIdsRequest, GetAddressUtxos, + GetBlock, GetBlockHash, GetBlockchainInfoResponse, GetInfo, GetRawTransaction, + SentTransactionHash, + }, +}; + +use crate::{ + status::Status, + stream::{ + AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, + SubtreeRootReplyStream, UtxoReplyStream, + }, + BackendType, +}; + +/// Wrapper Struct for a ZainoState chain-fetch service (StateService, FetchService) +/// +/// The future plan is to also add a TonicService and DarksideService to this to enable +/// wallets to use a single unified chain fetch service. +#[derive(Clone)] +pub struct IndexerService { + /// Underlying Service. + service: Service, +} + +impl IndexerService +where + Service: ZcashService, +{ + /// Creates a new `IndexerService` using the provided `config`. + pub async fn spawn( + config: Service::Config, + ) -> Result::Error> { + Ok(IndexerService { + service: Service::spawn(config) + .await + .map_err(Into::::into)?, + }) + } + + /// Returns a reference to the inner service. + pub fn inner_ref(&self) -> &Service { + &self.service + } + + /// Consumes the `IndexerService` and returns the inner service. + pub fn inner(self) -> Service { + self.service + } +} + +/// Zcash Service functionality. +/// +/// Implementors automatically gain [`Liveness`](zaino_common::probing::Liveness) and +/// [`Readiness`](zaino_common::probing::Readiness) via the [`Status`] supertrait. +#[async_trait] +pub trait ZcashService: Sized + Status { + /// Backend type. Read state or fetch service. + const BACKEND_TYPE: BackendType; + + /// A subscriber to the service, used to fetch chain data. + type Subscriber: Clone + ZcashIndexer + LightWalletIndexer + Status; + + /// Service Config. + type Config: Clone; + + /// Spawns a [`ZcashIndexer`]. + async fn spawn(config: Self::Config) + -> Result::Error>; + + /// Returns a [`IndexerSubscriber`]. + fn get_subscriber(&self) -> IndexerSubscriber; + + /// Shuts down the StateService. + fn close(&mut self); +} + +/// Wrapper Struct for a ZainoState chain-fetch service subscriber (StateServiceSubscriber, FetchServiceSubscriber) +/// +/// The future plan is to also add a TonicServiceSubscriber and DarksideServiceSubscriber to this to enable wallets to use a single unified chain fetch service. +#[derive(Clone)] +pub struct IndexerSubscriber { + /// Underlying Service Subscriber. + subscriber: Subscriber, +} + +impl IndexerSubscriber +where + Subscriber: Clone + ZcashIndexer + LightWalletIndexer, +{ + /// Creates a new [`IndexerSubscriber`]. + pub fn new(subscriber: Subscriber) -> Self { + IndexerSubscriber { subscriber } + } + + /// Returns a reference to the inner service. + pub fn inner_ref(&self) -> &Subscriber { + &self.subscriber + } + + /// Returns a clone of the inner service. + pub fn inner_clone(&self) -> Subscriber { + self.subscriber.clone() + } + + /// Consumes the `IndexerService` and returns the inner service. + pub fn inner(self) -> Subscriber { + self.subscriber + } +} + +/// Zcash RPC method signatures. +/// +/// Doc comments taken from Zebra for consistency. +#[async_trait] +pub trait ZcashIndexer: Send + Sync + 'static { + /// Uses underlying error type of implementer. + type Error: std::error::Error + + From + + Into + + Send + + Sync + + 'static; + + /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. + /// + /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) + /// method: post + /// tags: control + /// + /// # Notes + /// + /// [The zcashd reference](https://zcash.github.io/rpc/getinfo.html) might not show some fields + /// in Zebra's [`GetInfo`]. Zebra uses the field names and formats from the + /// [zcashd code](https://github.com/zcash/zcash/blob/v4.6.0-1/src/rpc/misc.cpp#L86-L87). + /// + /// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields + /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95) + async fn get_info(&self) -> Result; + + /// Returns all changes for an address. + /// + /// Returns information about all changes to the given transparent addresses within the given (inclusive) + /// + /// block height range, default is the full blockchain. + /// If start or end are not specified, they default to zero. + /// If start is greater than the latest block height, it's interpreted as that height. + /// + /// If end is zero, it's interpreted as the latest block height. + /// + /// [Original zcashd implementation](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/misc.cpp#L881) + /// + /// zcashd reference: [`getaddressdeltas`](https://zcash.github.io/rpc/getaddressdeltas.html) + /// method: post + /// tags: address + async fn get_address_deltas( + &self, + params: GetAddressDeltasParams, + ) -> Result; + + /// Returns blockchain state information, as a [`GetBlockchainInfoResponse`] JSON struct. + /// + /// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html) + /// method: post + /// tags: blockchain + /// + /// # Notes + /// + /// Some fields from the zcashd reference are missing from Zebra's [`GetBlockchainInfoResponse`]. It only contains the fields + /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L72-L89) + async fn get_blockchain_info(&self) -> Result; + + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. + /// + /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) + /// method: post + /// tags: blockchain + async fn get_difficulty(&self) -> Result; + + /// Returns block subsidy reward, taking into account the mining slow start and the founders reward, of block at index provided. + /// + /// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `height`: (number, optional) The block height. If not provided, defaults to the current height of the chain. + async fn get_block_subsidy(&self, height: u32) -> Result; + + /// Returns details on the active state of the TX memory pool. + /// + /// zcashd reference: [`getmempoolinfo`](https://zcash.github.io/rpc/getmempoolinfo.html) + /// method: post + /// tags: mempool + /// + /// Original implementation: [`getmempoolinfo`](https://github.com/zcash/zcash/blob/18238d90cd0b810f5b07d5aaa1338126aa128c06/src/rpc/blockchain.cpp#L1555) + async fn get_mempool_info(&self) -> Result; + + /// Returns data about each connected network node as a json array of objects. + /// + /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) + /// tags: network + /// + /// Current `zebrad` does not include the same fields as `zcashd`. + async fn get_peer_info(&self) -> Result; + + /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. + /// + /// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry + /// - `addresses`: (array of strings) A list of base-58 encoded addresses. + /// + /// # Notes + /// + /// zcashd also accepts a single string parameter instead of an array of strings, but Zebra + /// doesn't because lightwalletd always calls this RPC with an array of addresses. + /// + /// zcashd also returns the total amount of Zatoshis received by the addresses, but Zebra + /// doesn't because lightwalletd doesn't use that information. + /// + /// The RPC documentation says that the returned object has a string `balance` field, but + /// zcashd actually [returns an + /// integer](https://github.com/zcash/lightwalletd/blob/bdaac63f3ee0dbef62bde04f6817a9f90d483b00/common/common.go#L128-L130). + async fn z_get_address_balance( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result; + + /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. + /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. + /// + /// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. + /// + /// # Notes + /// + /// zcashd accepts an optional `allowhighfees` parameter. Zebra doesn't support this parameter, + /// because lightwalletd doesn't use it. + async fn send_raw_transaction( + &self, + raw_transaction_hex: String, + ) -> Result; + + /// If verbose is false, returns a string that is serialized, hex-encoded data for blockheader `hash`. + /// If verbose is true, returns an Object with information about blockheader `hash`. + /// + /// # Parameters + /// + /// - hash: (string, required) The block hash + /// - verbose: (boolean, optional, default=true) true for a json object, false for the hex encoded data + /// + /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) + /// method: post + /// tags: blockchain + async fn get_block_header( + &self, + hash: String, + verbose: bool, + ) -> Result; + + /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. + /// If the block is not in Zebra's state, returns + /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) if a height was + /// passed or -5 if a hash was passed. + /// + /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. + /// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data. + /// + /// # Notes + /// + /// Zebra previously partially supported verbosity=1 by returning only the + /// fields required by lightwalletd ([`lightwalletd` only reads the `tx` + /// field of the result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152)). + /// That verbosity level was migrated to "3"; so while lightwalletd will + /// still work by using verbosity=1, it will sync faster if it is changed to + /// use verbosity=3. + /// + /// The undocumented `chainwork` field is not returned. + async fn z_get_block( + &self, + hash_or_height: String, + verbosity: Option, + ) -> Result; + + /// Returns information about the given block and its transactions. + /// + /// zcashd reference: [`getblockdeltas`](https://zcash.github.io/rpc/getblockdeltas.html) + /// method: post + /// tags: blockchain + async fn get_block_deltas(&self, hash: String) -> Result; + + /// Returns the current block count in the best valid block chain. + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + async fn get_block_count(&self) -> Result; + + /// Return information about the given Zcash address. + /// + /// # Parameters + /// - `address`: (string, required, example="tmHMBeeYRuc2eVicLNfP15YLxbQsooCA6jb") The Zcash transparent address to validate. + /// + /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) + /// method: post + /// tags: blockchain + async fn validate_address( + &self, + address: String, + ) -> Result; + + /// Returns the hash of the best block (tip) of the longest chain. + /// online zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) + /// The zcashd doc reference above says there are no parameters and the result is a "hex" (string) of the block hash hex encoded. + /// method: post + /// tags: blockchain + /// The Zcash source code is considered canonical: + /// [In the rpc definition](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/common.h#L48) there are no required params, or optional params. + /// [The function in rpc/blockchain.cpp](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L325) + /// where `return chainActive.Tip()->GetBlockHash().GetHex();` is the [return expression](https://github.com/zcash/zcash/blob/654a8be2274aa98144c80c1ac459400eaf0eacbe/src/rpc/blockchain.cpp#L339) returning a `std::string` + async fn get_best_blockhash(&self) -> Result; + + /// Returns all transaction ids in the memory pool, as a JSON array. + /// + /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) + /// method: post + /// tags: blockchain + async fn get_raw_mempool(&self) -> Result, Self::Error>; + + /// Returns information about the given block's Sapling & Orchard tree state. + /// + /// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height. + /// + /// # Notes + /// + /// The zcashd doc reference above says that the parameter "`height` can be + /// negative where -1 is the last known valid block". On the other hand, + /// `lightwalletd` only uses positive heights, so Zebra does not support + /// negative heights. + async fn z_get_treestate( + &self, + hash_or_height: String, + ) -> Result; + + /// Returns information about a range of Sapling or Orchard subtrees. + /// + /// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard". + /// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return. + /// - `limit`: (number, optional) The maximum number of subtree values to return. + /// + /// # Notes + /// + /// While Zebra is doing its initial subtree index rebuild, subtrees will become available + /// starting at the chain tip. This RPC will return an empty list if the `start_index` subtree + /// exists, but has not been rebuilt yet. This matches `zcashd`'s behaviour when subtrees aren't + /// available yet. (But `zcashd` does its rebuild before syncing any blocks.) + async fn z_get_subtrees_by_index( + &self, + pool: String, + start_index: NoteCommitmentSubtreeIndex, + limit: Option, + ) -> Result; + + /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. + /// + /// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html) + /// method: post + /// tags: transaction + /// + /// # Parameters + /// + /// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned. + /// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object. + /// + /// # Notes + /// + /// We don't currently support the `blockhash` parameter since lightwalletd does not + /// use it. + /// + /// In verbose mode, we only expose the `hex` and `height` fields since + /// lightwalletd uses only those: + /// + async fn get_raw_transaction( + &self, + txid_hex: String, + verbose: Option, + ) -> Result; + + /// Returns the transaction ids made by the provided transparent addresses. + /// + /// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: + /// - `addresses`: (json array of string, required) The addresses to get transactions from. + /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). + /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). + /// + /// # Notes + /// + /// Only the multi-argument format is used by lightwalletd and this is what we currently support: + /// + async fn get_address_tx_ids( + &self, + request: GetAddressTxIdsRequest, + ) -> Result, Self::Error>; + + /// Returns all unspent outputs for a list of addresses. + /// + /// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html) + /// method: post + /// tags: address + /// + /// # Parameters + /// + /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. + /// + /// # Notes + /// + /// lightwalletd always uses the multi-address request, without chaininfo: + /// + async fn z_get_address_utxos( + &self, + address_strings: GetAddressBalanceRequest, + ) -> Result, Self::Error>; + + /// Returns a json object containing mining-related information. + /// + /// `zcashd` reference (may be outdated): [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) + async fn get_mining_info(&self) -> Result; + + /// Returns the estimated network solutions per second based on the last n blocks. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: blockchain + /// + /// This RPC is implemented in the [mining.cpp](https://github.com/zcash/zcash/blob/d00fc6f4365048339c83f463874e4d6c240b63af/src/rpc/mining.cpp#L104) + /// file of the Zcash repository. The Zebra implementation can be found [here](https://github.com/ZcashFoundation/zebra/blob/19bca3f1159f9cb9344c9944f7e1cb8d6a82a07f/zebra-rpc/src/methods.rs#L2687). + /// + /// # Parameters + /// + /// - `blocks`: (number, optional, default=120) Number of blocks, or -1 for blocks over difficulty averaging window. + /// - `height`: (number, optional, default=-1) To estimate network speed at the time of a specific block height. + async fn get_network_sol_ps( + &self, + blocks: Option, + height: Option, + ) -> Result; + + /// Helper function to get the chain height + async fn chain_height(&self) -> Result; + + /// Helper function, to get the list of taddresses that have sends or reciepts + /// within a given block range + async fn get_taddress_txids_helper( + &self, + request: TransparentAddressBlockFilter, + ) -> Result, Self::Error> { + let chain_height = self.chain_height().await?; + let (start, end) = match request.range { + Some(range) => { + let start = if let Some(start) = range.start { + match u32::try_from(start.height) { + Ok(height) => Some(height.min(chain_height.0)), + Err(_) => { + return Err(Self::Error::from(tonic::Status::invalid_argument( + "Error: Start height out of range. Failed to convert to u32.", + ))) + } + } + } else { + None + }; + let end = if let Some(end) = range.end { + match u32::try_from(end.height) { + Ok(height) => Some(height.min(chain_height.0)), + Err(_) => { + return Err(Self::Error::from(tonic::Status::invalid_argument( + "Error: End height out of range. Failed to convert to u32.", + ))) + } + } + } else { + None + }; + match (start, end) { + (Some(start), Some(end)) => { + if start > end { + (Some(end), Some(start)) + } else { + (Some(start), Some(end)) + } + } + _ => (start, end), + } + } + None => { + return Err(Self::Error::from(tonic::Status::invalid_argument( + "Error: No block range given.", + ))) + } + }; + self.get_address_tx_ids(GetAddressTxIdsRequest::new( + vec![request.address], + start, + end, + )) + .await + } +} + +/// Light Client Protocol gRPC method signatures. +/// For more information, see [the lightwallet protocol](https://github.com/zcash/lightwallet-protocol/blob/180717dfa21f3cbf063b8a1ad7697ccba7f5b054/walletrpc/service.proto#L181). +/// +/// Doc comments taken from Zaino-Proto for consistency. +#[async_trait] +pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { + /// Return the height of the tip of the best chain + async fn get_latest_block(&self) -> Result; + + /// Return the compact block corresponding to the given block identifier + async fn get_block(&self, request: BlockId) -> Result; + + /// Same as GetBlock except actions contain only nullifiers + async fn get_block_nullifiers(&self, request: BlockId) -> Result; + + /// Return a list of consecutive compact blocks + async fn get_block_range(&self, request: BlockRange) + -> Result; + + /// Same as GetBlockRange except actions contain only nullifiers + async fn get_block_range_nullifiers( + &self, + request: BlockRange, + ) -> Result; + + /// Return the requested full (not compact) transaction (as from zcashd) + async fn get_transaction(&self, request: TxFilter) -> Result; + + /// Submit the given transaction to the Zcash network + async fn send_transaction(&self, request: RawTransaction) -> Result; + + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result; + + /// Return the txids corresponding to the given t-address within the given block range + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// Note: this method is deprecated, please use GetTaddressTransactions instead. + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result; + + /// Returns the total balance for a list of taddrs + async fn get_taddress_balance(&self, request: AddressList) -> Result; + + /// Returns the total balance for a list of taddrs + /// + /// TODO: Update input type. + async fn get_taddress_balance_stream( + &self, + request: AddressStream, + ) -> Result; + + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. + async fn get_mempool_tx( + &self, + request: GetMempoolTxRequest, + ) -> Result; + + /// Return a stream of current Mempool transactions. This will keep the output stream open while + /// there are mempool transactions. It will close the returned stream when a new block is mined. + async fn get_mempool_stream(&self) -> Result; + + /// GetTreeState returns the note commitment tree state corresponding to the given block. + /// See section 3.7 of the Zcash protocol specification. It returns several other useful + /// values also (even though they can be obtained using GetBlock). + /// The block can be specified by either height or hash. + async fn get_tree_state(&self, request: BlockId) -> Result; + + /// GetLatestTreeState returns the note commitment tree state corresponding to the chain tip. + async fn get_latest_tree_state(&self) -> Result; + + /// Helper function to get timeout and channel size from config + fn timeout_channel_size(&self) -> (u32, u32); + + /// Returns a stream of information about roots of subtrees of the Sapling and Orchard + /// note commitment trees. + async fn get_subtree_roots( + &self, + request: GetSubtreeRootsArg, + ) -> Result::Error> { + let pool = match ShieldedProtocol::try_from(request.shielded_protocol) { + Ok(protocol) => protocol.as_str_name(), + Err(_) => { + return Err(::Error::from( + tonic::Status::invalid_argument("Error: Invalid shielded protocol value."), + )) + } + }; + let start_index = match u16::try_from(request.start_index) { + Ok(value) => value, + Err(_) => { + return Err(::Error::from( + tonic::Status::invalid_argument("Error: start_index value exceeds u16 range."), + )) + } + }; + let limit = if request.max_entries == 0 { + None + } else { + match u16::try_from(request.max_entries) { + Ok(value) => Some(value), + Err(_) => { + return Err(::Error::from( + tonic::Status::invalid_argument( + "Error: max_entries value exceeds u16 range.", + ), + )) + } + } + }; + let service_clone = self.clone(); + let subtrees = service_clone + .z_get_subtrees_by_index( + pool.to_string(), + NoteCommitmentSubtreeIndex(start_index), + limit.map(NoteCommitmentSubtreeIndex), + ) + .await?; + let (service_timeout, service_channel_size) = self.timeout_channel_size(); + let (channel_tx, channel_rx) = mpsc::channel(service_channel_size as usize); + tokio::spawn(async move { + let timeout = timeout( + std::time::Duration::from_secs((service_timeout * 4) as u64), + async { + for subtree in subtrees.subtrees() { + match service_clone + .z_get_block(subtree.end_height.0.to_string(), Some(1)) + .await + { + Ok(GetBlock::Object (block_object)) => { + let checked_height = match block_object.height() { + Some(h) => h.0 as u64, + None => { + match channel_tx + .send(Err(tonic::Status::unknown( + "Error: No block height returned by node.", + ))) + .await + { + Ok(_) => break, + Err(e) => { + warn!( + "GetSubtreeRoots channel closed unexpectedly: {}", + e + ); + break; + } + } + } + }; + let checked_root_hash = match hex::decode(&subtree.root) { + Ok(hash) => hash, + Err(e) => { + match channel_tx + .send(Err(tonic::Status::unknown(format!( + "Error: Failed to hex decode root hash: {e}." + )))) + .await + { + Ok(_) => break, + Err(e) => { + warn!( + "GetSubtreeRoots channel closed unexpectedly: {}", + e + ); + break; + } + } + } + }; + if channel_tx + .send(Ok(SubtreeRoot { + root_hash: checked_root_hash, + completing_block_hash: block_object.hash() + .bytes_in_display_order() + .to_vec(), + completing_block_height: checked_height, + })) + .await + .is_err() + { + break; + } + } + Ok(GetBlock::Raw(_)) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown( + "Error: Received raw block type, this should not be possible.", + ))) + .await + .is_err() + { + break; + } + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(format!( + "Error: Could not fetch block at height [{}] from node: {}", + subtree.end_height.0, e + )))) + .await + .is_err() + { + break; + } + } + } + } + }, + ) + .await; + match timeout { + Ok(_) => {} + Err(_) => { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_mempool_stream gRPC request timed out", + ))) + .await + .ok(); + } + } + }); + Ok(SubtreeRootReplyStream::new(channel_rx)) + } + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are collected and returned as a single Vec. + async fn get_address_utxos( + &self, + request: GetAddressUtxosArg, + ) -> Result; + + /// Returns all unspent outputs for a list of addresses. + /// + /// Ignores all utxos below block height [GetAddressUtxosArg.start_height]. + /// Returns max [GetAddressUtxosArg.max_entries] utxos, or unrestricted if [GetAddressUtxosArg.max_entries] = 0. + /// Utxos are returned in a stream. + async fn get_address_utxos_stream( + &self, + request: GetAddressUtxosArg, + ) -> Result; + + /// Return information about this lightwalletd instance and the blockchain + async fn get_lightd_info(&self) -> Result; + + /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) + /// + /// NOTE: Currently unimplemented in Zaino. + async fn ping(&self, request: Duration) -> Result; +} + +/// Zcash Service functionality. +#[async_trait] +pub trait LightWalletService: Sized + ZcashService {} + +impl LightWalletService for T where T: ZcashService {} + +pub(crate) async fn handle_raw_transaction( + chain_height: u64, + transaction: Result, + transmitter: mpsc::Sender>, +) -> Result<(), mpsc::error::SendError>> { + match transaction { + Ok(GetRawTransaction::Object(transaction_obj)) => { + let height: u64 = match transaction_obj.height() { + Some(h) => h as u64, + // Zebra returns None for mempool transactions, convert to `Mempool Height`. + None => chain_height, + }; + transmitter + .send(Ok(RawTransaction { + data: transaction_obj.hex().as_ref().to_vec(), + height, + })) + .await + } + Ok(GetRawTransaction::Raw(_)) => { + transmitter + .send(Err(tonic::Status::unknown( + "Received raw transaction type, this should not be impossible.", + ))) + .await + } + Err(e) => { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + transmitter + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + } + } +} diff --git a/zaino-state/src/lib.rs b/zaino-state/src/lib.rs index 65a00ab32..a4c7138ea 100644 --- a/zaino-state/src/lib.rs +++ b/zaino-state/src/lib.rs @@ -1,4 +1,91 @@ -//! A mempool and chain-fetching service built on top of zebra's ReadStateService and TrustedChainSync. +//! Zaino's core mempool and chain-fetching Library. +//! +//! Built to use a configurable backend: +//! - FetchService +//! - Built using the Zcash Json RPC Services for backwards compatibility with Zcashd and other JsonRPC based validators. +//! - StateService +//! - Built using Zebra's ReadStateService for efficient chain access. #![warn(missing_docs)] #![forbid(unsafe_code)] + +include!(concat!(env!("OUT_DIR"), "/zebraversion.rs")); + +// Zaino's Indexer library frontend. +pub(crate) mod indexer; + +pub use indexer::{ + IndexerService, IndexerSubscriber, LightWalletIndexer, LightWalletService, ZcashIndexer, + ZcashService, +}; + +pub(crate) mod backends; + +#[allow(deprecated)] +pub use backends::{ + fetch::{FetchService, FetchServiceSubscriber}, + state::{StateService, StateServiceSubscriber}, +}; + +pub mod chain_index; + +// Core ChainIndex trait and implementations +pub use chain_index::{ChainIndex, NodeBackedChainIndex, NodeBackedChainIndexSubscriber}; +// Source types for ChainIndex backends +pub use chain_index::source::{BlockchainSource, State, ValidatorConnector}; +// Supporting types +pub use chain_index::encoding::*; +pub use chain_index::mempool::Mempool; +pub use chain_index::non_finalised_state::{ + InitError, NodeConnectionError, NonFinalizedState, NonfinalizedBlockCacheSnapshot, SyncError, + UpdateError, +}; +// NOTE: Should these be pub at all? +pub use chain_index::types::{ + AddrHistRecord, AddrScript, BlockData, BlockHash, BlockHeaderData, BlockIndex, BlockMetadata, + BlockWithMetadata, ChainWork, CommitmentTreeData, CommitmentTreeRoots, CommitmentTreeSizes, + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTxData, Height, + IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, + ScriptType, ShardIndex, ShardRoot, TransactionHash, TransparentCompactTx, TransparentTxList, + TreeRootData, TxInCompact, TxLocation, TxOutCompact, TxidList, +}; + +pub use chain_index::mempool::{MempoolKey, MempoolValue}; + +#[cfg(feature = "test_dependencies")] +/// allow public access to additional APIs, for testing +pub mod test_dependencies { + /// Testing export of chain_index + pub mod chain_index { + pub use crate::chain_index::*; + } + + pub use crate::BlockCacheConfig; +} + +pub(crate) mod config; + +#[allow(deprecated)] +pub use config::{ + BackendConfig, BackendType, BlockCacheConfig, FetchServiceConfig, StateServiceConfig, +}; + +pub(crate) mod error; + +#[allow(deprecated)] +pub use error::{FetchServiceError, StateServiceError}; + +pub(crate) mod status; + +pub use status::{AtomicStatus, Status, StatusType}; + +pub(crate) mod stream; + +pub use stream::{ + AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, + SubtreeRootReplyStream, UtxoReplyStream, +}; + +pub(crate) mod broadcast; + +pub(crate) mod utils; diff --git a/zaino-state/src/status.rs b/zaino-state/src/status.rs new file mode 100644 index 000000000..c92b67d59 --- /dev/null +++ b/zaino-state/src/status.rs @@ -0,0 +1,41 @@ +//! Thread-safe status wrapper. +//! +//! This module provides [`AtomicStatus`], a thread-safe wrapper for [`StatusType`]. + +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +pub use zaino_common::status::{Status, StatusType}; + +/// Holds a thread-safe representation of a [`StatusType`]. +#[derive(Debug, Clone)] +pub struct AtomicStatus { + inner: Arc, +} + +impl AtomicStatus { + /// Creates a new AtomicStatus. + pub fn new(status: StatusType) -> Self { + Self { + inner: Arc::new(AtomicUsize::new(status.into())), + } + } + + /// Loads the value held in the AtomicStatus. + pub fn load(&self) -> StatusType { + StatusType::from(self.inner.load(Ordering::SeqCst)) + } + + /// Sets the value held in the AtomicStatus. + pub fn store(&self, status: StatusType) { + self.inner.store(status.into(), Ordering::SeqCst); + } +} + +impl Status for AtomicStatus { + fn status(&self) -> StatusType { + self.load() + } +} diff --git a/zaino-state/src/stream.rs b/zaino-state/src/stream.rs new file mode 100644 index 000000000..7816f47fd --- /dev/null +++ b/zaino-state/src/stream.rs @@ -0,0 +1,196 @@ +//! Holds streaming response types. + +use tokio_stream::wrappers::ReceiverStream; +use zaino_proto::proto::{ + compact_formats::{CompactBlock, CompactTx}, + service::{Address, GetAddressUtxosReply, RawTransaction, SubtreeRoot}, +}; + +/// Stream of RawTransactions, output type of get_taddress_txids. +#[derive(Debug)] +pub struct RawTransactionStream { + inner: ReceiverStream>, +} + +impl RawTransactionStream { + /// Returns new instance of RawTransactionStream. + pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + RawTransactionStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for RawTransactionStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} + +/// Stream of RawTransactions, output type of get_taddress_txids. +pub struct CompactTransactionStream { + inner: ReceiverStream>, +} + +impl CompactTransactionStream { + /// Returns new instance of RawTransactionStream. + pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + CompactTransactionStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for CompactTransactionStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} + +/// Stream of CompactBlocks, output type of get_block_range. +pub struct CompactBlockStream { + inner: ReceiverStream>, +} + +impl CompactBlockStream { + /// Returns new instance of CompactBlockStream. + pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + CompactBlockStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for CompactBlockStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} + +/// Stream of CompactBlocks, output type of get_block_range. +pub struct UtxoReplyStream { + inner: ReceiverStream>, +} + +impl UtxoReplyStream { + /// Returns new instance of CompactBlockStream. + pub fn new( + rx: tokio::sync::mpsc::Receiver>, + ) -> Self { + UtxoReplyStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for UtxoReplyStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} + +/// Stream of CompactBlocks, output type of get_block_range. +pub struct SubtreeRootReplyStream { + inner: ReceiverStream>, +} + +impl SubtreeRootReplyStream { + /// Returns new instance of CompactBlockStream. + pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + SubtreeRootReplyStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for SubtreeRootReplyStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(raw_tx))) => std::task::Poll::Ready(Some(Ok(raw_tx))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} + +/// Stream of `Address`, input type for `get_taddress_balance_stream`. +pub struct AddressStream { + inner: ReceiverStream>, +} + +impl AddressStream { + /// Creates a new `AddressStream` instance. + pub fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + AddressStream { + inner: ReceiverStream::new(rx), + } + } +} + +impl futures::Stream for AddressStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll = std::pin::Pin::new(&mut self.inner).poll_next(cx); + match poll { + std::task::Poll::Ready(Some(Ok(address))) => std::task::Poll::Ready(Some(Ok(address))), + std::task::Poll::Ready(Some(Err(e))) => std::task::Poll::Ready(Some(Err(e))), + std::task::Poll::Ready(None) => std::task::Poll::Ready(None), + std::task::Poll::Pending => std::task::Poll::Pending, + } + } +} diff --git a/zaino-state/src/utils.rs b/zaino-state/src/utils.rs new file mode 100644 index 000000000..70c935f48 --- /dev/null +++ b/zaino-state/src/utils.rs @@ -0,0 +1,115 @@ +//! Contains utility funcitonality for Zaino-State. +use std::fmt; +use zebra_chain::parameters::Network; + +// *** Metadata structs *** + +/// Zaino build info. +#[derive(Debug, Clone)] +pub(crate) struct BuildInfo { + /// Git commit hash. + commit_hash: String, + /// Git Branch. + branch: String, + /// Build date. + build_date: String, + /// Build user. + build_user: String, + /// Zingo-Indexer version. + version: String, +} + +#[allow(dead_code)] +impl BuildInfo { + pub(crate) fn commit_hash(&self) -> String { + self.commit_hash.clone() + } + + pub(crate) fn branch(&self) -> String { + self.branch.clone() + } + + pub(crate) fn build_user(&self) -> String { + self.build_user.clone() + } + + pub(crate) fn build_date(&self) -> String { + self.build_date.clone() + } + + pub(crate) fn version(&self) -> String { + self.version.clone() + } +} + +impl fmt::Display for BuildInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Version: {}", self.version)?; + writeln!(f, "Commit Hash: {}", self.commit_hash)?; + writeln!(f, "Branch: {}", self.branch)?; + writeln!(f, "Build Date: {}", self.build_date)?; + writeln!(f, "Build User: {}", self.build_user) + } +} + +/// Returns build info for Zingo-Indexer. +pub(crate) fn get_build_info() -> BuildInfo { + BuildInfo { + commit_hash: env!("GIT_COMMIT").to_string(), + branch: env!("BRANCH").to_string(), + build_date: env!("BUILD_DATE").to_string(), + build_user: env!("BUILD_USER").to_string(), + version: env!("VERSION").to_string(), + } +} + +#[derive(Debug, Clone)] +pub struct ServiceMetadata { + build_info: BuildInfo, + network: Network, + zebra_build: String, + zebra_subversion: String, +} + +impl ServiceMetadata { + pub(crate) fn new( + build_info: BuildInfo, + network: Network, + zebra_build: String, + zebra_subversion: String, + ) -> Self { + Self { + build_info, + network, + zebra_build, + zebra_subversion, + } + } + + pub(crate) fn build_info(&self) -> BuildInfo { + self.build_info.clone() + } + + pub(crate) fn network(&self) -> Network { + self.network.clone() + } + + pub(crate) fn zebra_build(&self) -> String { + self.zebra_build.clone() + } + + pub(crate) fn zebra_subversion(&self) -> String { + self.zebra_subversion.clone() + } +} + +impl fmt::Display for ServiceMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Zaino Service Metadata")?; + writeln!(f, "-----------------------")?; + writeln!(f, "Build Info:\n{}", self.build_info)?; + writeln!(f, "Network: {}", self.network)?; + writeln!(f, "Zebra Build: {}", self.zebra_build)?; + writeln!(f, "Zebra Subversion: {}", self.zebra_subversion) + } +} diff --git a/zaino-testutils/Cargo.toml b/zaino-testutils/Cargo.toml index b49b72fe9..8606b9d76 100644 --- a/zaino-testutils/Cargo.toml +++ b/zaino-testutils/Cargo.toml @@ -1,28 +1,81 @@ [package] name = "zaino-testutils" description = "Crate containing Zaino test specific functionality." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } +publish = false [features] -# Used by zcash-local-net: -test_fixtures = [] +# **Experimental and alpha features** +# Exposes the **complete** set of experimental / alpha features currently implemented in Zaino. +experimental_features = ["transparent_address_history_experimental"] + +# Activates transparent address history capability in zaino +# +# NOTE: currently this is only implemented in the finalised state. +transparent_address_history_experimental = [ + "zaino-state/transparent_address_history_experimental", + "zaino-serve/transparent_address_history_experimental", + "zainod/transparent_address_history_experimental" +] [dependencies] -zaino-fetch = { path = "../zaino-fetch" } -zainod = { path = "../zainod" } +# Zaino +zaino-proto = { workspace = true } +zaino-state = { workspace = true, features = ["test_dependencies"] } +zaino-serve.workspace = true +zaino-testvectors.workspace = true +zaino-common.workspace = true +zainod = { workspace = true } + +# Librustzcash +zcash_protocol = { version = "0.7.0", features = [ + "local-consensus", +] } +zcash_client_backend = { workspace = true, features = ["lightwalletd-tonic"] } + +# Zebra +zebra-chain = { workspace = true } +zebra-state.workspace = true + +# Zingo-infra +zcash_local_net = { workspace = true } +zingo_test_vectors = { workspace = true } + +# Zingo-common +zingo_common_components = { workspace = true } +zingo-netutils.workspace = true # ZingoLib -zingolib = { workspace = true } +zingolib = { workspace = true, features = [ "testutils"] } +zingolib_testutils.workspace = true -# Miscellaneous Workspace -tokio = { workspace = true } -tonic = { workspace = true } +# Miscellaneous http = { workspace = true } - -# Miscellaneous Crate -ctrlc = { workspace = true } -tempfile = { workspace = true } +once_cell = { workspace = true } portpicker = { workspace = true } +tokio = { workspace = true } +tonic.workspace = true +tempfile = { workspace = true } +tracing.workspace = true +tracing-subscriber = { workspace = true, features = [ + "fmt", + "env-filter", + "time", +] } + +# We don't need this as a direct dependency. +# Rather, we need to pin this version of proptest. +# if we don't, cargo update will update to the semver-compatible +# version 1.7, which depends on rand-0.9.1. +# This causes compiler errors in incrementalmerkletree, due a conflict +# with its rand version of 0.8.5. +# TODO: Investigate whether proptest has violated semver guarentees with +# its rand version update. +proptest = { workspace = true } +lazy_static = { workspace = true } +zip32 = {workspace = true} diff --git a/zaino-testutils/src/lib.rs b/zaino-testutils/src/lib.rs index 4e9f16619..7c35eb1c5 100644 --- a/zaino-testutils/src/lib.rs +++ b/zaino-testutils/src/lib.rs @@ -1,274 +1,1413 @@ -//! Utility functions for Zingo-Indexer Testing. +//! Zaino Testing Utilities. #![warn(missing_docs)] #![forbid(unsafe_code)] -use std::io::Write; - -static CTRL_C_ONCE: std::sync::Once = std::sync::Once::new(); - -/// Configuration data for Zingo-Indexer Tests. -pub struct TestManager { - /// Temporary Directory for zcashd and lightwalletd configuration and regtest data. - pub temp_conf_dir: tempfile::TempDir, - /// Zingolib regtest manager. - pub regtest_manager: zingolib::testutils::regtest::RegtestManager, - /// Zingolib regtest network. - pub regtest_network: zingolib::config::RegtestNetwork, - /// Zingo-Indexer gRPC listen port. - pub indexer_port: u16, - /// Zebrad/Zcashd JsonRpc listen port. - pub zebrad_port: u16, - /// Online status of Zingo-Indexer. - pub online: std::sync::Arc, +/// Convenience reexport of zaino_testvectors +pub mod test_vectors { + pub use zaino_testvectors::*; } -impl TestManager { - /// Launches a zingo regtest manager and zingo-indexer, created TempDir for configuration and log files. - pub async fn launch( - online: std::sync::Arc, - ) -> ( - Self, - zingolib::testutils::regtest::ChildProcessHandler, - tokio::task::JoinHandle>, - ) { - let lwd_port = portpicker::pick_unused_port().expect("No ports free"); - let zebrad_port = portpicker::pick_unused_port().expect("No ports free"); - let indexer_port = portpicker::pick_unused_port().expect("No ports free"); - - let temp_conf_dir = create_temp_conf_files(lwd_port, zebrad_port).unwrap(); - let temp_conf_path = temp_conf_dir.path().to_path_buf(); - - set_custom_drops(online.clone(), Some(temp_conf_path.clone())); - - let regtest_network = zingolib::config::RegtestNetwork::new(1, 1, 1, 1, 1, 1); - - let regtest_manager = - zingolib::testutils::regtest::RegtestManager::new(temp_conf_path.clone()); - let regtest_handler = regtest_manager - .launch(true) - .expect("Failed to start regtest services"); - - // NOTE: queue and workerpool sizes may need to be changed here. - let indexer_config = zainodlib::config::IndexerConfig { - tcp_active: true, - listen_port: Some(indexer_port), - lightwalletd_port: lwd_port, - zebrad_port, - node_user: Some("xxxxxx".to_string()), - node_password: Some("xxxxxx".to_string()), - max_queue_size: 512, - max_worker_pool_size: 96, - idle_worker_pool_size: 48, +use once_cell::sync::Lazy; +use tonic::transport::Channel; +// use zingo_common_components::protocol::{ActivationHeights, ActivationHeightsBuilder}; +use std::{ + future::Future, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, +}; +use tracing::info; +use tracing_subscriber::EnvFilter; +use zaino_common::{ + network::{ActivationHeights, ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS}, + probing::{Liveness, Readiness}, + status::Status, + validator::ValidatorConfig, + CacheConfig, DatabaseConfig, Network, ServiceConfig, StorageConfig, +}; +use zaino_serve::server::config::{GrpcServerConfig, JsonRpcServerConfig}; +use zaino_state::{ + BackendType, ChainIndex, LightWalletIndexer, LightWalletService, + NodeBackedChainIndexSubscriber, ZcashIndexer, ZcashService, +}; +use zainodlib::{config::ZainodConfig, error::IndexerError, indexer::Indexer}; +pub use zcash_local_net as services; +use zcash_local_net::validator::zebrad::{Zebrad, ZebradConfig}; +pub use zcash_local_net::validator::Validator; +use zcash_local_net::validator::ValidatorConfig as _; +use zcash_local_net::{ + error::LaunchError, + validator::zcashd::{Zcashd, ZcashdConfig}, +}; +use zcash_local_net::{logs::LogsToStdoutAndStderr, process::Process}; +use zcash_protocol::PoolType; +use zebra_chain::parameters::NetworkKind; +use zingo_netutils::{GetClientError, get_client}; +use zingo_test_vectors::seeds; +pub use zingolib::get_base_address_macro; +pub use zingolib::lightclient::LightClient; +pub use zingolib::testutils::lightclient::from_inputs; +use zingolib_testutils::scenarios::ClientBuilder; + +use zcash_client_backend::proto::service::{ + compact_tx_streamer_client::CompactTxStreamerClient, ChainSpec, +}; + +/// Helper to get the test binary path from the TEST_BINARIES_DIR env var. +fn binary_path(binary_name: &str) -> Option { + std::env::var("TEST_BINARIES_DIR") + .ok() + .map(|dir| PathBuf::from(dir).join(binary_name)) +} + +/// Create local URI from port. +pub fn make_uri(indexer_port: portpicker::Port) -> http::Uri { + format!("http://127.0.0.1:{indexer_port}") + .try_into() + .unwrap() +} + +/// Polls until the given component reports ready. +/// +/// Returns `true` if the component became ready within the timeout, +/// `false` if the timeout was reached. +pub async fn poll_until_ready( + component: &impl Readiness, + poll_interval: std::time::Duration, + timeout: std::time::Duration, +) -> bool { + tokio::time::timeout(timeout, async { + let mut interval = tokio::time::interval(poll_interval); + loop { + interval.tick().await; + if component.is_ready() { + return; + } + } + }) + .await + .is_ok() +} + +// temporary until activation heights are unified to zebra-chain type. +// from/into impls not added in zaino-common to avoid unecessary addition of zcash-protocol dep to non-test code +/// Convert zaino activation heights into zcash protocol type. +pub fn local_network_from_activation_heights( + activation_heights: ActivationHeights, +) -> zcash_protocol::local_consensus::LocalNetwork { + zcash_protocol::local_consensus::LocalNetwork { + overwinter: activation_heights + .overwinter + .map(zcash_protocol::consensus::BlockHeight::from), + sapling: activation_heights + .sapling + .map(zcash_protocol::consensus::BlockHeight::from), + blossom: activation_heights + .blossom + .map(zcash_protocol::consensus::BlockHeight::from), + heartwood: activation_heights + .heartwood + .map(zcash_protocol::consensus::BlockHeight::from), + canopy: activation_heights + .canopy + .map(zcash_protocol::consensus::BlockHeight::from), + nu5: activation_heights + .nu5 + .map(zcash_protocol::consensus::BlockHeight::from), + nu6: activation_heights + .nu6 + .map(zcash_protocol::consensus::BlockHeight::from), + nu6_1: activation_heights + .nu6_1 + .map(zcash_protocol::consensus::BlockHeight::from), + } +} + +/// Path for zcashd binary. +pub static ZCASHD_BIN: Lazy> = Lazy::new(|| binary_path("zcashd")); + +/// Path for zcash-cli binary. +pub static ZCASH_CLI_BIN: Lazy> = Lazy::new(|| binary_path("zcash-cli")); + +/// Path for zebrad binary. +pub static ZEBRAD_BIN: Lazy> = Lazy::new(|| binary_path("zebrad")); + +/// Path for lightwalletd binary. +pub static LIGHTWALLETD_BIN: Lazy> = Lazy::new(|| binary_path("lightwalletd")); + +/// Path for zainod binary. +pub static ZAINOD_BIN: Lazy> = Lazy::new(|| binary_path("zainod")); + +/// Path for zcashd chain cache. +pub static ZCASHD_CHAIN_CACHE_DIR: Lazy> = Lazy::new(|| { + let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); + workspace_root_path.pop(); + Some(workspace_root_path.join("integration-tests/chain_cache/client_rpc_tests")) +}); + +/// Path for zebrad chain cache. +pub static ZEBRAD_CHAIN_CACHE_DIR: Lazy> = Lazy::new(|| { + let mut workspace_root_path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); + workspace_root_path.pop(); + Some(workspace_root_path.join("integration-tests/chain_cache/client_rpc_tests_large")) +}); + +/// Path for the Zebra chain cache in the user's home directory. +pub static ZEBRAD_TESTNET_CACHE_DIR: Lazy> = Lazy::new(|| { + let home_path = PathBuf::from(std::env::var("HOME").unwrap()); + Some(home_path.join(".cache/zebra")) +}); + +#[derive(PartialEq, Clone, Copy)] +/// Represents the type of validator to launch. +pub enum ValidatorKind { + /// Zcashd. + Zcashd, + /// Zebrad. + Zebrad, +} + +/// Config for validators. +pub enum ValidatorTestConfig { + /// Zcashd Config. + ZcashdConfig(ZcashdConfig), + /// Zebrad Config. + ZebradConfig(zcash_local_net::validator::zebrad::ZebradConfig), +} + +/// Holds zingo lightclients along with the lightclient builder for wallet-2-validator tests. +pub struct Clients { + /// Lightclient builder. + pub client_builder: ClientBuilder, + /// Faucet (zingolib lightclient). + /// + /// Mining rewards are received by this client for use in tests. + pub faucet: zingolib::lightclient::LightClient, + /// Recipient (zingolib lightclient). + pub recipient: zingolib::lightclient::LightClient, +} + +impl Clients { + /// Returns the zcash address of the faucet. + pub async fn get_faucet_address(&self, pool: &str) -> String { + zingolib::get_base_address_macro!(self.faucet, pool) + } + + /// Returns the zcash address of the recipient. + pub async fn get_recipient_address(&self, pool: &str) -> String { + zingolib::get_base_address_macro!(self.recipient, pool) + } +} + +/// Configuration data for Zaino Tests. +pub struct TestManager { + /// Control plane for a validator + pub local_net: C, + /// Data directory for the validator. + pub data_dir: PathBuf, + /// Network (chain) type: + pub network: NetworkKind, + /// Zebrad/Zcashd JsonRpc listen address. + pub full_node_rpc_listen_address: SocketAddr, + /// Zebrad/Zcashd gRpc listen address. + pub full_node_grpc_listen_address: SocketAddr, + /// Zaino Indexer JoinHandle. + pub zaino_handle: Option>>, + /// Zaino JsonRPC listen address. + pub zaino_json_rpc_listen_address: Option, + /// Zaino gRPC listen address. + pub zaino_grpc_listen_address: Option, + /// Service subscriber. + pub service_subscriber: Option, + /// JsonRPC server cookie dir. + pub json_server_cookie_dir: Option, + /// Zingolib lightclients. + pub clients: Option, +} + +/// Needed validator functionality that is not implemented in infrastructure +/// +/// TODO: Either move to Validator zcash_client_backend trait or document +/// why it should not be moved. +pub trait ValidatorExt: Validator + LogsToStdoutAndStderr { + /// Launch the validator, and return a validator config containing the + /// ports used by the validator, etc + fn launch_validator_and_return_config( + config: Self::Config, + ) -> impl Future> + Send + Sync; +} + +impl ValidatorExt for Zebrad { + async fn launch_validator_and_return_config( + config: ZebradConfig, + ) -> Result<(Self, ValidatorConfig), LaunchError> { + let zebrad = Zebrad::launch(config).await?; + let validator_config = ValidatorConfig { + validator_jsonrpc_listen_address: format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.rpc_listen_port() + ), + validator_grpc_listen_address: Some(format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.indexer_listen_port() + )), + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), }; - let indexer_handler = - zainodlib::indexer::Indexer::start_indexer_service(indexer_config, online.clone()) - .await - .unwrap(); - // NOTE: This is required to give the server time to launch, this is not used in production code but could be rewritten to improve testing efficiency. - tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; - ( - TestManager { - temp_conf_dir, - regtest_manager, - regtest_network, - indexer_port, - zebrad_port, - online, - }, - regtest_handler, - indexer_handler, - ) + Ok((zebrad, validator_config)) + } +} + +impl ValidatorExt for Zcashd { + async fn launch_validator_and_return_config( + config: Self::Config, + ) -> Result<(Self, ValidatorConfig), LaunchError> { + let zcashd = Zcashd::launch(config).await?; + let validator_config = ValidatorConfig { + validator_jsonrpc_listen_address: format!("{}:{}", Ipv4Addr::LOCALHOST, zcashd.port()), + validator_grpc_listen_address: None, + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), + }; + Ok((zcashd, validator_config)) } +} - /// Returns zingo-indexer listen address. - pub fn get_indexer_uri(&self) -> http::Uri { +impl TestManager +where + C: ValidatorExt, + Service: LightWalletService + Send + Sync + 'static, + Service::Config: TryFrom, + IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, +{ + + pub(crate)fn grpc_socket_to_uri(&self) -> http::Uri { http::Uri::builder() .scheme("http") - .authority(format!("127.0.0.1:{0}", self.indexer_port)) - .path_and_query("") + .authority(self.zaino_grpc_listen_address + .expect("grpc_listen_address should be set") + .to_string() + ) + .path_and_query("/") .build() .unwrap() } - /// Returns zebrad listen address. - pub async fn test_and_return_zebrad_uri(&self) -> http::Uri { - zaino_fetch::jsonrpc::connector::test_node_and_return_uri( - &self.zebrad_port, - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await - .unwrap() - } + /// Launches zcash-local-net. + /// + /// Possible validators: Zcashd, Zebrad. + /// + /// If chain_cache is given a path the chain will be loaded. + /// + /// If clients is set to active zingolib lightclients will be created for test use. + /// + /// TODO: Add TestManagerConfig struct and constructor methods of common test setups. + /// + /// TODO: Remove validator argument in favour of adding C::VALIDATOR associated const + pub async fn launch( + validator: &ValidatorKind, + network: Option, + activation_heights: Option, + chain_cache: Option, + enable_zaino: bool, + enable_zaino_jsonrpc_server: bool, + enable_clients: bool, + ) -> Result { + if (validator == &ValidatorKind::Zcashd) && (Service::BACKEND_TYPE == BackendType::State) { + return Err(std::io::Error::other( + "Cannot use state backend with zcashd.", + )); + } + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339()) + .with_target(true) + .try_init(); - /// Builds aand returns Zingolib lightclient. - pub async fn build_lightclient(&self) -> zingolib::lightclient::LightClient { - let mut client_builder = zingolib::testutils::scenarios::setup::ClientBuilder::new( - self.get_indexer_uri(), - self.temp_conf_dir.path().to_path_buf(), + let activation_heights = activation_heights.unwrap_or_else(|| match validator { + ValidatorKind::Zcashd => ActivationHeights::default(), + ValidatorKind::Zebrad => ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS, + }); + let network_kind = network.unwrap_or(NetworkKind::Regtest); + let zaino_network_kind = + Network::from_network_kind_and_activation_heights(&network_kind, &activation_heights); + + if enable_clients && !enable_zaino { + return Err(std::io::Error::other( + "Cannot enable clients when zaino is not enabled.", + )); + } + + // Launch LocalNet: + + let mut config = C::Config::default(); + config.set_test_parameters( + if validator == &ValidatorKind::Zebrad { + PoolType::Transparent + } else { + PoolType::ORCHARD + }, + activation_heights.into(), + chain_cache.clone(), ); - client_builder - .build_faucet(false, self.regtest_network) + + let (local_net, validator_settings) = C::launch_validator_and_return_config(config) .await - } -} + .expect("to launch a default validator"); + let rpc_listen_port = local_net.get_port(); + let full_node_rpc_listen_address = + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), rpc_listen_port); -/// Closes test manager child processes, optionally cleans configuration and log files for test. -pub async fn drop_test_manager( - temp_conf_path: Option, - child_process_handler: zingolib::testutils::regtest::ChildProcessHandler, - online: std::sync::Arc, -) { - online.store(false, std::sync::atomic::Ordering::SeqCst); - drop(child_process_handler); - - let mut temp_wallet_path = temp_conf_path.clone().unwrap(); - if let Some(dir_name) = temp_wallet_path.file_name().and_then(|n| n.to_str()) { - let new_dir_name = format!("{}_client_1", dir_name); - temp_wallet_path.set_file_name(new_dir_name); // Update the directory name - } + let data_dir = local_net.data_dir().path().to_path_buf(); + let zaino_db_path = data_dir.join("zaino"); - if let Some(ref path) = temp_conf_path { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!( - "Failed to delete temporary regtest configuration directory: {:?}.", - e + let zebra_db_path = match chain_cache { + Some(cache) => cache, + None => data_dir.clone(), + }; + + // Launch Zaino: + let ( + zaino_handle, + zaino_service_subscriber, + zaino_grpc_listen_address, + zaino_json_listen_address, + zaino_json_server_cookie_dir, + ) = if enable_zaino { + let zaino_grpc_listen_port = portpicker::pick_unused_port().expect("No ports free"); + let zaino_grpc_listen_address = + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), zaino_grpc_listen_port); + let zaino_json_listen_port = portpicker::pick_unused_port().expect("No ports free"); + let zaino_json_listen_address = + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), zaino_json_listen_port); + info!("{:?}", validator_settings.validator_grpc_listen_address); + let indexer_config = zainodlib::config::ZainodConfig { + // TODO: Make configurable. + backend: Service::BACKEND_TYPE, + json_server_settings: if enable_zaino_jsonrpc_server { + Some(JsonRpcServerConfig { + json_rpc_listen_address: zaino_json_listen_address, + cookie_dir: None, + }) + } else { + None + }, + grpc_settings: GrpcServerConfig { + listen_address: zaino_grpc_listen_address, + tls: None, + }, + validator_settings: dbg!(validator_settings.clone()), + service: ServiceConfig::default(), + storage: StorageConfig { + cache: CacheConfig::default(), + database: DatabaseConfig { + path: zaino_db_path, + ..Default::default() + }, + }, + zebra_db_path, + network: zaino_network_kind, + }; + + let (handle, service_subscriber) = Indexer::::launch_inner( + Service::Config::try_from(indexer_config.clone()) + .expect("Failed to convert ZainodConfig to service config"), + indexer_config, + ) + .await + .unwrap(); + + ( + Some(handle), + Some(service_subscriber), + Some(zaino_grpc_listen_address), + Some(zaino_json_listen_address), + None, + ) + } else { + (None, None, None, None, None) + }; + // Launch Zingolib Lightclients: + let clients = if enable_clients { + let mut client_builder = ClientBuilder::new( + make_uri( + zaino_grpc_listen_address + .expect("Error launching zingo lightclients. `enable_zaino` is None.") + .port(), + ), + tempfile::tempdir().unwrap(), + ); + + + let faucet = client_builder.build_faucet(true, activation_heights.into()); + let recipient = client_builder.build_client( + seeds::HOSPITAL_MUSEUM_SEED.to_string(), + 1, + true, + activation_heights.into(), ); + Some(Clients { + client_builder, + faucet, + recipient, + }) + } else { + None + }; + let test_manager = Self { + local_net, + data_dir, + network: network_kind, + full_node_rpc_listen_address, + full_node_grpc_listen_address: validator_settings + .validator_grpc_listen_address + .as_ref() + .and_then(|addr| addr.parse().ok()) + .unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0)), + zaino_handle, + zaino_json_rpc_listen_address: zaino_json_listen_address, + zaino_grpc_listen_address, + service_subscriber: zaino_service_subscriber, + json_server_cookie_dir: zaino_json_server_cookie_dir, + clients, + }; + + // Generate an extra block to turn on NU5 and NU6, + // as they currently must be turned on at block height = 2. + // NOTE: if this is removed when zebra fixes this issue we must replace with a generate_block_and_poll(0) when + // zaino is enabled to ensure its ready and not still syncing + if enable_zaino { + test_manager.generate_blocks_and_poll(1).await; + } else { + test_manager.local_net.generate_blocks(1).await.unwrap(); } - } - if let Some(ref path) = Some(temp_wallet_path) { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!("Failed to delete temporary directory: {:?}.", e); + + // Wait for zaino to be ready to serve requests + if let Some(ref subscriber) = test_manager.service_subscriber { + poll_until_ready( + subscriber, + std::time::Duration::from_millis(100), + std::time::Duration::from_secs(30), + ) + .await; } + + Ok(test_manager) } -} -fn set_custom_drops( - online: std::sync::Arc, - temp_conf_path: Option, -) { - let online_panic = online.clone(); - let online_ctrlc = online.clone(); - let temp_conf_path_panic = temp_conf_path.clone(); - let temp_conf_path_ctrlc = temp_conf_path.clone(); - - let mut temp_wallet_path = temp_conf_path.unwrap(); - if let Some(dir_name) = temp_wallet_path.file_name().and_then(|n| n.to_str()) { - let new_dir_name = format!("{}_client_1", dir_name); - temp_wallet_path.set_file_name(new_dir_name); // Update the directory name + /// Generate `n` blocks for the local network and poll zaino via gRPC until the chain index is synced to the target height. + pub async fn generate_blocks_and_poll(&self, n: u32) { + let mut grpc_client = build_client(self.grpc_socket_to_uri()) + .await + .unwrap(); + let chain_height = self.local_net.get_chain_height().await; + let mut next_block_height = u64::from(chain_height) + 1; + let mut interval = tokio::time::interval(std::time::Duration::from_millis(200)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + interval.tick().await; + while grpc_client + .get_latest_block(tonic::Request::new(ChainSpec {})) + .await + .unwrap() + .into_inner() + .height + < u64::from(chain_height) + n as u64 + { + if n == 0 { + interval.tick().await; + } else { + self.local_net.generate_blocks(1).await.unwrap(); + while grpc_client + .get_latest_block(tonic::Request::new(ChainSpec {})) + .await + .unwrap() + .into_inner() + .height + != next_block_height + { + interval.tick().await; + } + next_block_height += 1; + } + } } - let temp_wallet_path_panic = Some(temp_wallet_path.clone()); - let temp_wallet_path_ctrlc = Some(temp_wallet_path.clone()); - - let default_panic_hook = std::panic::take_hook(); - - std::panic::set_hook(Box::new(move |panic_info| { - default_panic_hook(panic_info); - online_panic.store(false, std::sync::atomic::Ordering::SeqCst); - if let Some(ref path) = temp_conf_path_panic { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!( - "Failed to delete temporary regtest config directory: {:?}.", - e + + /// Generate `n` blocks for the local network and poll zaino's fetch/state subscriber until the chain index is synced to the target height. + /// + /// # Panics + /// + /// Panics if the indexer is not live (Offline or CriticalError), indicating the + /// backing validator has crashed or become unreachable. + pub async fn generate_blocks_and_poll_indexer(&self, n: u32, indexer: &I) + where + I: LightWalletIndexer + Liveness + Status, + { + let chain_height = self.local_net.get_chain_height().await; + let target_height = u64::from(chain_height) + n as u64; + let mut next_block_height = u64::from(chain_height) + 1; + let mut interval = tokio::time::interval(std::time::Duration::from_millis(200)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + interval.tick().await; + + // NOTE: readstate service seems to not be functioning correctly when generate multiple blocks at once and polling the latest block. + // commented out a fall back to `get_block` to query the cache directly if needed in the future. + // while indexer.get_block(zaino_proto::proto::service::BlockId { + // height: u64::from(chain_height) + n as u64, + // hash: vec![], + // }).await.is_err() + while indexer.get_latest_block().await.unwrap().height < target_height { + // Check liveness - fail fast if the indexer is dead + if !indexer.is_live() { + let status = indexer.status(); + panic!( + "Indexer is not live (status: {status:?}). \ + The backing validator may have crashed or become unreachable." ); } + + if n == 0 { + interval.tick().await; + } else { + self.local_net.generate_blocks(1).await.unwrap(); + while indexer.get_latest_block().await.unwrap().height != next_block_height { + if !indexer.is_live() { + let status = indexer.status(); + panic!( + "Indexer is not live while waiting for block {next_block_height} (status: {status:?})." + ); + } + interval.tick().await; + } + next_block_height += 1; + } } - if let Some(ref path) = temp_wallet_path_panic { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!("Failed to delete temporary wallet directory: {:?}.", e); + + // After height is reached, wait for readiness and measure if it adds time + if !indexer.is_ready() { + let start = std::time::Instant::now(); + poll_until_ready( + indexer, + std::time::Duration::from_millis(50), + std::time::Duration::from_secs(30), + ) + .await; + let elapsed = start.elapsed(); + if elapsed.as_millis() > 0 { + info!( + "Readiness wait after height poll took {:?} (height polling alone was insufficient)", + elapsed + ); } } - // Ensures tests fail on secondary thread panics. - #[allow(clippy::assertions_on_constants)] + } + + /// Generate `n` blocks for the local network and poll zaino's chain index until the chain index is synced to the target height. + /// + /// # Panics + /// + /// Panics if the chain index is not live (Offline or CriticalError), indicating the + /// backing validator has crashed or become unreachable. + pub async fn generate_blocks_and_poll_chain_index( + &self, + n: u32, + chain_index: &NodeBackedChainIndexSubscriber, + ) { + let chain_height = self.local_net.get_chain_height().await; + let mut next_block_height = chain_height + 1; + let mut interval = tokio::time::interval(std::time::Duration::from_millis(200)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + interval.tick().await; + while u32::from(chain_index.snapshot_nonfinalized_state().best_tip.height) + < chain_height + n { - assert!(false); - } - std::process::exit(0); - })); - - CTRL_C_ONCE.call_once(|| { - ctrlc::set_handler(move || { - println!("Received Ctrl+C, exiting."); - online_ctrlc.store(false, std::sync::atomic::Ordering::SeqCst); - if let Some(ref path) = temp_conf_path_ctrlc { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!( - "Failed to delete temporary regtest config directory: {:?}.", - e - ); - } + // Check liveness - fail fast if the chain index is dead + if !chain_index.is_live() { + let status = chain_index.combined_status(); + panic!( + "Chain index is not live (status: {status:?}). \ + The backing validator may have crashed or become unreachable." + ); } - if let Some(ref path) = temp_wallet_path_ctrlc { - if let Err(e) = std::fs::remove_dir_all(path) { - eprintln!("Failed to delete temporary wallet directory: {:?}.", e); + + if n == 0 { + interval.tick().await; + } else { + self.local_net.generate_blocks(1).await.unwrap(); + while u32::from(chain_index.snapshot_nonfinalized_state().best_tip.height) + != next_block_height + { + if !chain_index.is_live() { + let status = chain_index.combined_status(); + panic!( + "Chain index is not live while waiting for block {next_block_height} (status: {status:?})." + ); + } + interval.tick().await; } + next_block_height += 1; } - // Ensures tests fail on ctrlc exit. - #[allow(clippy::assertions_on_constants)] - { - assert!(false); - } - std::process::exit(0); - }) - .expect("Error setting Ctrl-C handler"); - }) -} + } -fn write_lightwalletd_yml(dir: &std::path::Path, bind_addr_port: u16) -> std::io::Result<()> { - let file_path = dir.join("lightwalletd.yml"); - let mut file = std::fs::File::create(file_path)?; - writeln!(file, "grpc-bind-addr: 127.0.0.1:{}", bind_addr_port)?; - writeln!(file, "cache-size: 10")?; - writeln!(file, "log-level: 10")?; - writeln!(file, "zcash-conf-path: ../conf/zcash.conf")?; + // After height is reached, wait for readiness and measure if it adds time + if !chain_index.is_ready() { + let start = std::time::Instant::now(); + poll_until_ready( + chain_index, + std::time::Duration::from_millis(50), + std::time::Duration::from_secs(30), + ) + .await; + let elapsed = start.elapsed(); + if elapsed.as_millis() > 0 { + info!( + "Readiness wait after height poll took {:?} (height polling alone was insufficient)", + elapsed + ); + } + } + } - Ok(()) + /// Closes the TestManager. + pub async fn close(&mut self) { + if let Some(handle) = self.zaino_handle.take() { + handle.abort(); + } + } } -fn write_zcash_conf(dir: &std::path::Path, rpcport: u16) -> std::io::Result<()> { - let file_path = dir.join("zcash.conf"); - let mut file = std::fs::File::create(file_path)?; - writeln!(file, "regtest=1")?; - writeln!(file, "nuparams=5ba81b19:1 # Overwinter")?; - writeln!(file, "nuparams=76b809bb:1 # Sapling")?; - writeln!(file, "nuparams=2bb40e60:1 # Blossom")?; - writeln!(file, "nuparams=f5b9230b:1 # Heartwood")?; - writeln!(file, "nuparams=e9ff75a6:1 # Canopy")?; - writeln!(file, "nuparams=c2d6d0b4:1 # NU5")?; - writeln!(file, "txindex=1")?; - writeln!(file, "insightexplorer=1")?; - writeln!(file, "experimentalfeatures=1")?; - writeln!(file, "rpcuser=xxxxxx")?; - writeln!(file, "rpcpassword=xxxxxx")?; - writeln!(file, "rpcport={}", rpcport)?; - writeln!(file, "rpcallowip=127.0.0.1")?; - writeln!(file, "listen=0")?; - writeln!(file, "minetolocalwallet=0")?; - // writeln!(file, "mineraddress=zregtestsapling1fmq2ufux3gm0v8qf7x585wj56le4wjfsqsj27zprjghntrerntggg507hxh2ydcdkn7sx8kya7p")?; // USE FOR SAPLING. - writeln!(file, "mineraddress=uregtest1zkuzfv5m3yhv2j4fmvq5rjurkxenxyq8r7h4daun2zkznrjaa8ra8asgdm8wwgwjvlwwrxx7347r8w0ee6dqyw4rufw4wg9djwcr6frzkezmdw6dud3wsm99eany5r8wgsctlxquu009nzd6hsme2tcsk0v3sgjvxa70er7h27z5epr67p5q767s2z5gt88paru56mxpm6pwz0cu35m")?; - - Ok(()) +impl Drop + for TestManager +{ + fn drop(&mut self) { + if let Some(handle) = &self.zaino_handle { + handle.abort(); + }; + } } -fn create_temp_conf_files(lwd_port: u16, rpcport: u16) -> std::io::Result { - let temp_dir = tempfile::Builder::new() - .prefix("zingoindexertest") - .tempdir()?; - let conf_dir = temp_dir.path().join("conf"); - std::fs::create_dir(&conf_dir)?; - write_lightwalletd_yml(&conf_dir, lwd_port)?; - write_zcash_conf(&conf_dir, rpcport)?; - Ok(temp_dir) +/// Builds a client for creating RPC requests to the indexer/light-node +async fn build_client( + uri: http::Uri, +) -> Result, GetClientError> { + get_client(uri).await } -/// Contains zingolib::lightclient functionality used for zaino testing. -pub mod zingo_lightclient { - /// Returns the zcash address of the Zingolib::lightclient. - pub async fn get_address( - zingo_client: &zingolib::lightclient::LightClient, - pool: &str, - ) -> String { - zingolib::get_base_address_macro!(zingo_client, pool) +#[cfg(test)] +mod launch_testmanager { + use super::*; + #[allow(deprecated)] + use zaino_state::FetchService; + + mod zcashd { + use zcash_local_net::validator::zcashd::Zcashd; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn basic() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn generate_blocks() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.local_net.generate_blocks(1).await.unwrap(); + assert_eq!(3, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[ignore = "chain cache needs development"] + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn with_chain() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + ZCASHD_CHAIN_CACHE_DIR.clone(), + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(10, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + true, + false, + false, + ) + .await + .unwrap(); + + + + let _grpc_client = build_client(test_manager.grpc_socket_to_uri()) + .await + .unwrap(); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let clients = test_manager + .clients + .as_ref() + .expect("Clients are not initialized"); + dbg!(clients.faucet.do_info().await); + dbg!(clients.recipient.do_info().await); + test_manager.close().await; + } + + /// This test shows nothing about zebrad. + /// This is not the case with Zcashd and should not be the case here. + /// Even if rewards need 100 confirmations these blocks should not have to be mined at the same time. + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients_receive_mining_reward() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zcashd, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64() > 0 + || clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() > 0, + "No mining reward received from Zcashd. Faucet Orchard Balance: {:}. Faucet Transparent Balance: {:}.", + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64(), + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() + ); + + test_manager.close().await; + } + } + + mod zebrad { + use super::*; + + mod fetch_service { + + use zcash_local_net::validator::zebrad::Zebrad; + use zip32::AccountId; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn basic() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn generate_blocks() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.local_net.generate_blocks(1).await.unwrap(); + assert_eq!(3, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[ignore = "chain cache needs development"] + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn with_chain() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + ZEBRAD_CHAIN_CACHE_DIR.clone(), + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(52, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + false, + ) + .await + .unwrap(); + let _grpc_client = build_client(test_manager.grpc_socket_to_uri()) + .await + .unwrap(); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let clients = test_manager + .clients + .as_ref() + .expect("Clients are not initialized"); + dbg!(clients.faucet.do_info().await); + dbg!(clients.recipient.do_info().await); + test_manager.close().await; + } + + /// This test shows currently we do not receive mining rewards from Zebra unless we mine 100 blocks at a time. + /// This is not the case with Zcashd and should not be the case here. + /// Even if rewards need 100 confirmations these blocks should not have to be mined at the same time. + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients_receive_mining_reward() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64() > 0 + || clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() > 0, + "No mining reward received from Zebrad. Faucet Orchard Balance: {:}. Faucet Transparent Balance: {:}.", + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64(), + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() + ); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients_receive_mining_reward_and_send() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64() + > 0, + "No mining reward received from Zebrad. Faucet Transparent Balance: {:}.", + clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64() + ); + + // *Send all transparent funds to own orchard address. + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64() > 0, + "No funds received from shield. Faucet Orchard Balance: {:}. Faucet Transparent Balance: {:}.", + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64(), + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() + ); + + let recipient_zaddr = clients.get_recipient_address("sapling").await.to_string(); + zingolib::testutils::lightclient::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_zaddr, 250_000, None)], + ) + .await + .unwrap(); + + test_manager.generate_blocks_and_poll(1).await; + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.recipient.sync_and_await().await.unwrap(); + dbg!(clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_sapling_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_testnet() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + Some(NetworkKind::Testnet), + None, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + true, + false, + true, + ) + .await + .unwrap(); + let clients = test_manager + .clients + .as_ref() + .expect("Clients are not initialized"); + dbg!(clients.faucet.do_info().await); + dbg!(clients.recipient.do_info().await); + test_manager.close().await; + } + } + + mod state_service { + use super::*; + #[allow(deprecated)] + use zaino_state::StateService; + use zip32::AccountId; + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn basic() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn generate_blocks() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(2, (test_manager.local_net.get_chain_height().await)); + test_manager.local_net.generate_blocks(1).await.unwrap(); + assert_eq!(3, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[ignore = "chain cache needs development"] + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn with_chain() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + ZEBRAD_CHAIN_CACHE_DIR.clone(), + false, + false, + false, + ) + .await + .unwrap(); + assert_eq!(52, (test_manager.local_net.get_chain_height().await)); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + false, + ) + .await + .unwrap(); + let _grpc_client = build_client(test_manager.grpc_socket_to_uri()) + .await + .unwrap(); + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + let clients = test_manager + .clients + .as_ref() + .expect("Clients are not initialized"); + dbg!(clients.faucet.do_info().await); + dbg!(clients.recipient.do_info().await); + test_manager.close().await; + } + + /// This test shows currently we do not receive mining rewards from Zebra unless we mine 100 blocks at a time. + /// This is not the case with Zcashd and should not be the case here. + /// Even if rewards need 100 confirmations these blocks should not have to be mined at the same time. + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients_receive_mining_reward() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64() > 0 + || clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() > 0, + "No mining reward received from Zebrad. Faucet Orchard Balance: {:}. Faucet Transparent Balance: {:}.", + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64(), + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() + ); + + test_manager.close().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_clients_receive_mining_reward_and_send() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + None, + None, + None, + true, + false, + true, + ) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + test_manager.generate_blocks_and_poll(100).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64() + > 0, + "No mining reward received from Zebrad. Faucet Transparent Balance: {:}.", + clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_transparent_balance + .unwrap() + .into_u64() + ); + + // *Send all transparent funds to own orchard address. + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.generate_blocks_and_poll(1).await; + clients.faucet.sync_and_await().await.unwrap(); + dbg!(clients + .faucet + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert!( + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64() > 0, + "No funds received from shield. Faucet Orchard Balance: {:}. Faucet Transparent Balance: {:}.", + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().total_orchard_balance.unwrap().into_u64(), + clients.faucet.account_balance(zip32::AccountId::ZERO).await.unwrap().confirmed_transparent_balance.unwrap().into_u64() + ); + + let recipient_zaddr = clients.get_recipient_address("sapling").await.to_string(); + zingolib::testutils::lightclient::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_zaddr, 250_000, None)], + ) + .await + .unwrap(); + + test_manager.generate_blocks_and_poll(1).await; + clients.recipient.sync_and_await().await.unwrap(); + dbg!(clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap()); + + assert_eq!( + clients + .recipient + .account_balance(zip32::AccountId::ZERO) + .await + .unwrap() + .confirmed_sapling_balance + .unwrap() + .into_u64(), + 250_000 + ); + + test_manager.close().await; + } + + #[ignore = "requires fully synced testnet."] + #[tokio::test(flavor = "multi_thread")] + #[allow(deprecated)] + pub(crate) async fn zaino_testnet() { + let mut test_manager = TestManager::::launch( + &ValidatorKind::Zebrad, + Some(NetworkKind::Testnet), + None, + ZEBRAD_TESTNET_CACHE_DIR.clone(), + true, + false, + true, + ) + .await + .unwrap(); + let clients = test_manager + .clients + .as_ref() + .expect("Clients are not initialized"); + dbg!(clients.faucet.do_info().await); + dbg!(clients.recipient.do_info().await); + test_manager.close().await; + } + } } } diff --git a/zaino-testvectors/Cargo.toml b/zaino-testvectors/Cargo.toml new file mode 100644 index 000000000..d17095878 --- /dev/null +++ b/zaino-testvectors/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "zaino-testvectors" +description = "Crate containing Zaino test vectors." +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } + +[dependencies] +lazy_static = { workspace = true } + diff --git a/zaino-testvectors/src/lib.rs b/zaino-testvectors/src/lib.rs new file mode 100644 index 000000000..a4fd8d72c --- /dev/null +++ b/zaino-testvectors/src/lib.rs @@ -0,0 +1,6 @@ +//! Test vectors module for zaino testing. +//! +//! Contains organized test vector collections for different types of testing. + +/// Transaction test vectors from Zcash test data +pub mod transactions; diff --git a/zaino-testvectors/src/transactions.rs b/zaino-testvectors/src/transactions.rs new file mode 100644 index 000000000..5b432ba99 --- /dev/null +++ b/zaino-testvectors/src/transactions.rs @@ -0,0 +1,3667 @@ +use lazy_static::lazy_static; + +/// Test vector data for transaction parsing tests +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct TestVector { + pub description: &'static str, + pub version: u32, + pub lock_time: u32, + pub expiry_height: u32, + pub txid: [u8; 32], + pub is_coinbase: u8, + pub has_sapling: u8, + pub has_orchard: u8, + pub transparent_inputs: usize, + pub transparent_outputs: usize, + pub tx: Vec, +} + +// From https://github.com/zingolabs/zcash-test-vectors/plain_transactions/plain_transactions.py +lazy_static! { + #[allow(missing_docs)] + pub static ref TEST_VECTORS: Vec = vec![ + TestVector { + description: "Sprout transaction v1 #1", + version: 1, + lock_time: 2262637640, + expiry_height: 0, + txid: [ + 0x5f, 0x0a, 0x67, 0xc4, 0x44, 0x39, 0x90, 0x61, 0x7d, 0x7d, 0x86, 0x1a, 0x39, 0x56, + 0x24, 0xfa, 0x06, 0x05, 0x5d, 0xba, 0xa1, 0x49, 0xe5, 0x72, 0x7c, 0xc1, 0x5e, 0xef, + 0x54, 0x0f, 0x37, 0xb4 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 2, + tx: vec![ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0xe7, 0x71, 0x98, 0x11, 0x89, 0x3e, 0x00, 0x00, + 0x09, 0x52, 0x00, 0xac, 0x65, 0x51, 0xac, 0x63, 0x65, 0x65, 0xb2, 0x83, 0x5a, 0x08, + 0x05, 0x75, 0x02, 0x00, 0x02, 0x51, 0x51, 0x48, 0x1c, 0xdd, 0x86 + ], + }, + TestVector { + description: "Sprout transaction v1 #2", + version: 1, + lock_time: 534781804, + expiry_height: 0, + txid: [ + 0x18, 0x0c, 0x66, 0x33, 0x7b, 0xcb, 0x4b, 0xa1, 0xd1, 0x76, 0x71, 0x2e, 0x12, 0xe9, + 0x6a, 0xc1, 0x94, 0xbc, 0x22, 0xfc, 0xaf, 0xcf, 0xcf, 0xef, 0x14, 0x0b, 0xda, 0xe6, + 0x63, 0xd9, 0xcf, 0x4c + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 0, + tx: vec![ + 0x01, 0x00, 0x00, 0x00, 0x01, 0xcc, 0x43, 0x18, 0xd9, 0x61, 0x4f, 0xc8, 0x20, 0x90, + 0x5d, 0x04, 0x2b, 0xb1, 0xef, 0x9c, 0xa3, 0xf2, 0x49, 0x88, 0xc7, 0xb3, 0x53, 0x42, + 0x01, 0xcf, 0xb1, 0xcd, 0x8d, 0xbf, 0x69, 0xb8, 0x25, 0x0c, 0x18, 0xef, 0x41, 0x01, + 0xac, 0xa9, 0x79, 0x93, 0xdb, 0x00, 0x6c, 0x1f, 0xe0, 0x1f + ], + }, + TestVector { + description: "Sprout transaction v1 #3", + version: 1, + lock_time: 635528207, + expiry_height: 0, + txid: [ + 0x48, 0x05, 0x10, 0x0c, 0xe1, 0x81, 0x9b, 0x70, 0x52, 0x70, 0xcf, 0x9d, 0x0c, 0x40, + 0x2b, 0x4d, 0x89, 0x87, 0x39, 0x66, 0xc7, 0xcd, 0x90, 0xed, 0xf7, 0x8b, 0xe8, 0xda, + 0x16, 0x85, 0x21, 0x6e + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 2, + tx: vec![ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x15, 0xf0, 0xe8, 0xec, 0x2b, 0x3d, 0x04, 0x00, + 0x01, 0x63, 0x27, 0x78, 0x7a, 0x10, 0x76, 0x52, 0x02, 0x00, 0x02, 0x51, 0xac, 0x0f, + 0x64, 0xe1, 0x25 + ], + }, + TestVector { + description: "Sprout transaction v1 #4", + version: 1, + lock_time: 2080031330, + expiry_height: 0, + txid: [ + 0x5d, 0xfe, 0x78, 0x83, 0x56, 0x5f, 0x4d, 0x65, 0xeb, 0x63, 0xc8, 0x97, 0x3f, 0x6a, + 0x49, 0x25, 0xb6, 0x2e, 0x7d, 0x0f, 0x29, 0x7f, 0x78, 0x8f, 0x1e, 0x5b, 0xe5, 0x28, + 0xa0, 0x69, 0x3c, 0x1a + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 0, + tx: vec![0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xc2, 0xfa, 0x7b], + }, + TestVector { + description: "Sprout transaction v1 #5", + version: 1, + lock_time: 919800382, + expiry_height: 0, + txid: [ + 0x1e, 0xf5, 0x23, 0x0b, 0x8d, 0x32, 0x7f, 0xd5, 0x07, 0x7b, 0xe9, 0xf5, 0xcf, 0x40, + 0x59, 0x96, 0x7e, 0x03, 0xb6, 0x09, 0xc1, 0xfd, 0xaf, 0x3d, 0xce, 0xd3, 0x5f, 0xf9, + 0x36, 0x7e, 0x67, 0x73 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 1, + tx: vec![ + 0x01, 0x00, 0x00, 0x00, 0x02, 0xec, 0xbc, 0xb6, 0x4b, 0x69, 0x68, 0x91, 0x2a, 0x63, + 0x81, 0xce, 0x3d, 0xc1, 0x66, 0xd5, 0x6a, 0x1d, 0x62, 0xf5, 0xa8, 0xd7, 0x55, 0x1d, + 0xb5, 0xfd, 0x93, 0x13, 0xe8, 0xc7, 0x20, 0x3d, 0x99, 0x6a, 0xf7, 0xd4, 0x77, 0x08, + 0x6a, 0x65, 0x63, 0x52, 0x00, 0x63, 0x65, 0x6a, 0x45, 0xf4, 0x4a, 0xb0, 0x23, 0x75, + 0x2c, 0xb5, 0xb4, 0x06, 0xed, 0x89, 0x85, 0xe1, 0x81, 0x30, 0xab, 0x33, 0x36, 0x26, + 0x97, 0xb0, 0xe4, 0xe4, 0xc7, 0x63, 0xcc, 0xb8, 0xf6, 0x76, 0x49, 0x5c, 0x22, 0x2f, + 0x7f, 0xba, 0x1e, 0x31, 0xde, 0xfa, 0x01, 0x52, 0x57, 0xef, 0xc2, 0xe1, 0x01, 0x13, + 0xd7, 0xd0, 0x08, 0x12, 0xb0, 0x06, 0x00, 0x06, 0x00, 0x63, 0xac, 0x00, 0x63, 0xac, + 0x3e, 0x0a, 0xd3, 0x36 + ], + }, + TestVector { + description: "Sprout transaction v2 #1", + version: 2, + lock_time: 244954781, + expiry_height: 0, + txid: [ + 0xbe, 0x39, 0x06, 0x8c, 0x10, 0x2c, 0x74, 0x5b, 0xab, 0x04, 0xff, 0x2d, 0x80, 0xb8, + 0xff, 0x79, 0x98, 0x51, 0x1c, 0x43, 0x94, 0x4e, 0xe4, 0x63, 0x49, 0x52, 0x25, 0x96, + 0x87, 0xd2, 0x4e, 0x00 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 2, + tx: vec![ + 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x9e, 0xc9, 0xd6, 0xd8, 0x49, 0xf9, 0x04, 0x00, + 0x06, 0x6a, 0x63, 0x51, 0x51, 0x00, 0xac, 0x24, 0x3c, 0x74, 0xcb, 0x95, 0xf3, 0x01, + 0x00, 0x04, 0x00, 0x51, 0x6a, 0x6a, 0x9d, 0xb6, 0x99, 0x0e, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, + 0xd6, 0x4a, 0xf3, 0x59, 0x7c, 0x04, 0x32, 0x3e, 0xa5, 0x1b, 0x00, 0x52, 0xad, 0x80, + 0x84, 0xa8, 0xb9, 0xda, 0x94, 0x8d, 0x32, 0x0d, 0xad, 0xd6, 0x4f, 0x54, 0x31, 0xe6, + 0x1d, 0xdf, 0x65, 0x8d, 0x24, 0xae, 0x67, 0xc2, 0x2c, 0x8d, 0x13, 0x09, 0x13, 0x1f, + 0xc0, 0x0f, 0xe7, 0xf2, 0x35, 0x73, 0x42, 0x76, 0xd3, 0x8d, 0x47, 0xf1, 0xe1, 0x91, + 0xe0, 0x0c, 0x7a, 0x1d, 0x48, 0xaf, 0x04, 0x68, 0x27, 0x59, 0x1e, 0x97, 0x33, 0xa9, + 0x7f, 0xa6, 0xb6, 0x79, 0xf3, 0xdc, 0x60, 0x1d, 0x00, 0x82, 0x85, 0xed, 0xcb, 0xda, + 0xe6, 0x9c, 0xe8, 0xfc, 0x1b, 0xe4, 0xaa, 0xc0, 0x0f, 0xf2, 0x71, 0x1e, 0xbd, 0x93, + 0x1d, 0xe5, 0x18, 0x85, 0x68, 0x78, 0xf7, 0x34, 0x76, 0xf2, 0x1a, 0x48, 0x2e, 0xc9, + 0x37, 0x83, 0x65, 0xc8, 0xf7, 0x39, 0x3c, 0x94, 0xe2, 0x88, 0x53, 0x15, 0xeb, 0x46, + 0x71, 0x09, 0x8b, 0x79, 0x53, 0x5e, 0x79, 0x0f, 0xe5, 0x3e, 0x29, 0xfe, 0xf2, 0xb3, + 0x76, 0x66, 0x97, 0xac, 0x32, 0xb4, 0xf4, 0x73, 0xf4, 0x68, 0xa0, 0x08, 0xe7, 0x23, + 0x89, 0xfc, 0x03, 0x88, 0x0d, 0x78, 0x0c, 0xb0, 0x7f, 0xcf, 0xaa, 0xbe, 0x3f, 0x1a, + 0x84, 0xb2, 0x7d, 0xb5, 0x9a, 0x4a, 0x15, 0x3d, 0x88, 0x2d, 0x2b, 0x21, 0x03, 0x59, + 0x65, 0x55, 0xed, 0x94, 0x94, 0xc6, 0xac, 0x89, 0x3c, 0x49, 0x72, 0x38, 0x33, 0xec, + 0x89, 0x26, 0xc1, 0x03, 0x95, 0x86, 0xa7, 0xaf, 0xcf, 0x4a, 0x0d, 0x9c, 0x73, 0x1e, + 0x98, 0x5d, 0x99, 0x58, 0x9c, 0x8b, 0xb8, 0x38, 0xe8, 0xaa, 0xf7, 0x45, 0x53, 0x3e, + 0xd9, 0xe8, 0xae, 0x3a, 0x1c, 0xd0, 0x74, 0xa5, 0x1a, 0x20, 0xda, 0x8a, 0xba, 0x18, + 0xd1, 0xdb, 0xeb, 0xbc, 0x86, 0x2d, 0xed, 0x42, 0x43, 0x5e, 0x92, 0x47, 0x69, 0x30, + 0xd0, 0x69, 0x89, 0x6c, 0xff, 0x30, 0xeb, 0x41, 0x4f, 0x72, 0x7b, 0x89, 0xe0, 0x01, + 0xaf, 0xa2, 0xfb, 0x8d, 0xc3, 0x43, 0x6d, 0x75, 0xa4, 0xa6, 0xf2, 0x65, 0x72, 0x50, + 0x4b, 0x19, 0x22, 0x32, 0xec, 0xb9, 0xf0, 0x02, 0x24, 0x11, 0xe5, 0x25, 0x96, 0xbc, + 0x5e, 0x90, 0x45, 0x7e, 0x74, 0x59, 0x39, 0xff, 0xed, 0xbd, 0x12, 0x86, 0x3c, 0xe7, + 0x1a, 0x02, 0xaf, 0x11, 0x7d, 0x41, 0x7a, 0xdb, 0x3d, 0x15, 0xcc, 0x54, 0x02, 0xb1, + 0xfc, 0xe4, 0x67, 0x50, 0x0c, 0x6b, 0x8f, 0xb8, 0x6b, 0x12, 0xb5, 0x6d, 0xa9, 0xc3, + 0x82, 0x85, 0x7d, 0xee, 0xcc, 0x40, 0xa9, 0x8d, 0x5f, 0x29, 0x35, 0x39, 0x5e, 0xe4, + 0x76, 0x2d, 0xd2, 0x0a, 0xfd, 0xbb, 0x5d, 0x47, 0xfa, 0x9a, 0x6d, 0xd9, 0x84, 0xd5, + 0x67, 0xdb, 0x28, 0x57, 0xb9, 0x27, 0xb7, 0xfa, 0xe2, 0xdb, 0x58, 0x71, 0x05, 0x41, + 0x5d, 0x46, 0x42, 0x78, 0x9d, 0x38, 0xf5, 0x0b, 0x8d, 0xbc, 0xc1, 0x29, 0xca, 0xb3, + 0xd1, 0x7d, 0x19, 0xf3, 0x35, 0x5b, 0xcf, 0x73, 0xce, 0xcb, 0x8c, 0xb8, 0xa5, 0xda, + 0x01, 0x30, 0x71, 0x52, 0xf1, 0x39, 0x36, 0xa2, 0x70, 0x57, 0x26, 0x70, 0x02, 0x82, + 0xd3, 0x90, 0x26, 0xc6, 0xcb, 0x4c, 0xd4, 0xb0, 0xf7, 0xf5, 0xaa, 0x2a, 0x4f, 0x5a, + 0x53, 0x41, 0xec, 0x5d, 0xd7, 0x15, 0x40, 0x6f, 0x2f, 0xdd, 0x2a, 0xfa, 0x73, 0x3f, + 0x5f, 0x64, 0x1c, 0x02, 0x21, 0x86, 0x2a, 0x1b, 0xaf, 0xce, 0x26, 0x09, 0xd9, 0xee, + 0xcf, 0xa1, 0x58, 0xcf, 0xb5, 0xcd, 0x79, 0xf8, 0x80, 0x08, 0xe3, 0x15, 0xdc, 0x7d, + 0x83, 0x88, 0xe7, 0x6c, 0x17, 0x82, 0xfd, 0x27, 0x03, 0xd1, 0x8a, 0x76, 0x36, 0x24, + 0xc2, 0x5f, 0xa9, 0x59, 0xcc, 0x97, 0x48, 0x9c, 0xe7, 0x57, 0x45, 0x82, 0x4b, 0x77, + 0x86, 0x8c, 0x53, 0x23, 0x9c, 0xfb, 0xdf, 0x73, 0xca, 0xec, 0x65, 0x60, 0x40, 0x03, + 0x31, 0x4f, 0xaa, 0xce, 0xb5, 0x62, 0x18, 0xc6, 0xbd, 0x30, 0xf8, 0x37, 0x4a, 0xc1, + 0x33, 0x86, 0x79, 0x3f, 0x21, 0xa9, 0xfb, 0x80, 0xad, 0x03, 0xbc, 0x0c, 0xda, 0x4a, + 0x44, 0x94, 0x6c, 0x00, 0x03, 0xb1, 0xa1, 0xdf, 0x0e, 0x5b, 0x87, 0xb5, 0xbe, 0xce, + 0x47, 0x7a, 0x70, 0x96, 0x49, 0xe9, 0x50, 0x06, 0x05, 0x91, 0x39, 0x48, 0x12, 0x95, + 0x1e, 0x1f, 0xe3, 0x89, 0x5b, 0x8c, 0xc3, 0xd1, 0x4d, 0x2c, 0xf6, 0x55, 0x6d, 0xf6, + 0xed, 0x4b, 0x4d, 0xdd, 0x3d, 0x9a, 0x69, 0xf5, 0x33, 0x57, 0xd7, 0x76, 0x7f, 0x4f, + 0x5c, 0xcb, 0xdb, 0xc5, 0x96, 0x63, 0x12, 0x77, 0xf8, 0xfe, 0xcd, 0x08, 0xcb, 0x05, + 0x6b, 0x95, 0xe3, 0x02, 0x5b, 0x97, 0x92, 0xff, 0xf7, 0xf2, 0x44, 0xfc, 0x71, 0x62, + 0x69, 0xb9, 0x26, 0xd6, 0x2e, 0x95, 0x96, 0xfa, 0x82, 0x5c, 0x6b, 0xf2, 0x1a, 0xff, + 0x9e, 0x68, 0x62, 0x5a, 0x19, 0x24, 0x40, 0xea, 0x06, 0x82, 0x81, 0x23, 0xd9, 0x78, + 0x84, 0x80, 0x6f, 0x15, 0xfa, 0x08, 0xda, 0x52, 0x75, 0x4a, 0x10, 0x95, 0xe3, 0xff, + 0x1a, 0xbd, 0x5c, 0xe4, 0xfd, 0xdf, 0xcc, 0xfc, 0x3a, 0x61, 0x28, 0xae, 0xf7, 0x84, + 0xa6, 0x46, 0x10, 0xa8, 0x9d, 0x1a, 0x70, 0x99, 0x21, 0x6d, 0x08, 0x14, 0xd3, 0xa2, + 0xd4, 0x52, 0x43, 0x1c, 0x32, 0xd4, 0x11, 0xac, 0x1c, 0xce, 0x82, 0xad, 0x02, 0x29, + 0x40, 0x7b, 0xbc, 0x48, 0x98, 0x56, 0x75, 0xe3, 0xf8, 0x74, 0xa4, 0x53, 0x3f, 0x1d, + 0x63, 0xa8, 0x4d, 0xfa, 0x3e, 0x0f, 0x46, 0x0f, 0xe2, 0xf5, 0x7e, 0x34, 0xfb, 0xc7, + 0x54, 0x23, 0xc3, 0x73, 0x7f, 0x5b, 0x2a, 0x06, 0x15, 0xf5, 0x72, 0x2d, 0xb0, 0x41, + 0xa3, 0xef, 0x66, 0xfa, 0x48, 0x3a, 0xfd, 0x3c, 0x2e, 0x19, 0xe5, 0x94, 0x44, 0xa6, + 0x4a, 0xdd, 0x6d, 0xf1, 0xd9, 0x63, 0xf5, 0xdd, 0x5b, 0x50, 0x10, 0xd3, 0xd0, 0x25, + 0xf0, 0x28, 0x7c, 0x4c, 0xf1, 0x9c, 0x75, 0xf3, 0x3d, 0x51, 0xdd, 0xdd, 0xba, 0x5d, + 0x65, 0x7b, 0x43, 0xee, 0x8d, 0xa6, 0x45, 0x44, 0x38, 0x14, 0xcc, 0x73, 0x29, 0xf3, + 0xe9, 0xb4, 0xe5, 0x4c, 0x23, 0x6c, 0x29, 0xaf, 0x39, 0x23, 0x10, 0x17, 0x56, 0xd9, + 0xfa, 0x4b, 0xd0, 0xf7, 0xd2, 0xdd, 0xaa, 0xcb, 0x6b, 0x0f, 0x86, 0xa2, 0x65, 0x8e, + 0x0a, 0x07, 0xa0, 0x5a, 0xc5, 0xb9, 0x50, 0x05, 0x1c, 0xd2, 0x4c, 0x47, 0xa8, 0x8d, + 0x13, 0xd6, 0x59, 0xba, 0x2a, 0x46, 0xca, 0x18, 0x30, 0x81, 0x6d, 0x09, 0xcd, 0x76, + 0x46, 0xf7, 0x6f, 0x71, 0x6a, 0xbe, 0xc5, 0xde, 0x07, 0xfe, 0x9b, 0x52, 0x34, 0x10, + 0x80, 0x6e, 0xa6, 0xf2, 0x88, 0xf8, 0x73, 0x6c, 0x23, 0x35, 0x7c, 0x85, 0xf4, 0x57, + 0x91, 0xe1, 0x70, 0x80, 0x29, 0xd9, 0x82, 0x4d, 0x90, 0x70, 0x46, 0x07, 0xf3, 0x87, + 0xa0, 0x3e, 0x49, 0xbf, 0x98, 0x36, 0x57, 0x44, 0x31, 0x34, 0x5a, 0x78, 0x77, 0xef, + 0xaa, 0x8a, 0x08, 0xe7, 0x30, 0x81, 0xef, 0x8d, 0x62, 0xcb, 0x78, 0x0a, 0xb6, 0x88, + 0x3a, 0x50, 0xa0, 0xd4, 0x70, 0x19, 0x0d, 0xfb, 0xa1, 0x0a, 0x85, 0x7f, 0x82, 0x84, + 0x2d, 0x38, 0x25, 0xb3, 0xd6, 0xda, 0x05, 0x73, 0xd3, 0x16, 0xeb, 0x16, 0x0d, 0xc0, + 0xb7, 0x16, 0xc4, 0x8f, 0xbd, 0x46, 0x7f, 0x75, 0xb7, 0x80, 0x14, 0x9a, 0xe8, 0x80, + 0x8f, 0x4e, 0x68, 0xf5, 0x0c, 0x05, 0x36, 0xac, 0xdd, 0xf6, 0xf1, 0xae, 0xab, 0x01, + 0x6b, 0x6b, 0xc1, 0xec, 0x14, 0x4b, 0x4e, 0x55, 0x3a, 0xcf, 0xd6, 0x70, 0xf7, 0x7e, + 0x75, 0x5f, 0xc8, 0x8e, 0x06, 0x77, 0xe3, 0x1b, 0xa4, 0x59, 0xb4, 0x4e, 0x30, 0x77, + 0x68, 0x95, 0x8f, 0xe3, 0x78, 0x9d, 0x41, 0xc2, 0xb1, 0xff, 0x43, 0x4c, 0xb3, 0x0e, + 0x15, 0x91, 0x4f, 0x01, 0xbc, 0x6b, 0xc2, 0x30, 0x7b, 0x48, 0x8d, 0x25, 0x56, 0xd7, + 0xb7, 0x38, 0x0e, 0xa4, 0xff, 0xd7, 0x12, 0xf6, 0xb0, 0x2f, 0xe8, 0x06, 0xb9, 0x45, + 0x69, 0xcd, 0x40, 0x59, 0xf3, 0x96, 0xbf, 0x29, 0xb9, 0x9d, 0x0a, 0x40, 0xe5, 0xe1, + 0x71, 0x1c, 0xa9, 0x44, 0xf7, 0x2d, 0x43, 0x6a, 0x10, 0x2f, 0xca, 0x4b, 0x97, 0x69, + 0x3d, 0xa0, 0xb0, 0x86, 0xfe, 0x9d, 0x2e, 0x71, 0x62, 0x47, 0x0d, 0x02, 0xe0, 0xf0, + 0x5d, 0x4b, 0xec, 0x95, 0x12, 0xbf, 0xb3, 0xf3, 0x83, 0x27, 0x29, 0x6e, 0xfa, 0xa7, + 0x43, 0x28, 0xb1, 0x18, 0xc2, 0x74, 0x02, 0xc7, 0x0c, 0x3a, 0x90, 0xb4, 0x9a, 0xd4, + 0xbb, 0xc6, 0x8e, 0x37, 0xc0, 0xaa, 0x7d, 0x9b, 0x3f, 0xe1, 0x77, 0x99, 0xd7, 0x3b, + 0x84, 0x1e, 0x75, 0x17, 0x13, 0xa0, 0x29, 0x43, 0x90, 0x5a, 0xae, 0x08, 0x03, 0xfd, + 0x69, 0x44, 0x2e, 0xb7, 0x68, 0x1e, 0xc2, 0xa0, 0x56, 0x00, 0x05, 0x4e, 0x92, 0xee, + 0xd5, 0x55, 0x02, 0x8f, 0x21, 0xb6, 0xa1, 0x55, 0x26, 0x8a, 0x2d, 0xd6, 0x64, 0x0a, + 0x69, 0x30, 0x1a, 0x52, 0xa3, 0x8d, 0x4d, 0x9f, 0x9f, 0x95, 0x7a, 0xe3, 0x5a, 0xf7, + 0x16, 0x71, 0x18, 0x14, 0x1c, 0xe4, 0xc9, 0xbe, 0x0a, 0x6a, 0x49, 0x2f, 0xe7, 0x9f, + 0x15, 0x81, 0xa1, 0x55, 0xfa, 0x3a, 0x2b, 0x9d, 0xaf, 0xd8, 0x2e, 0x65, 0x0b, 0x38, + 0x6a, 0xd3, 0xa0, 0x8c, 0xb6, 0xb8, 0x31, 0x31, 0xac, 0x30, 0x0b, 0x08, 0x46, 0x35, + 0x4a, 0x7e, 0xef, 0x9c, 0x41, 0x0e, 0x4b, 0x62, 0xc4, 0x7c, 0x54, 0x26, 0x90, 0x7d, + 0xfc, 0x66, 0x85, 0xc5, 0xc9, 0x9b, 0x71, 0x41, 0xac, 0x62, 0x6a, 0xb4, 0x76, 0x1f, + 0xd3, 0xf4, 0x1e, 0x72, 0x8e, 0x1a, 0x28, 0xf8, 0x9d, 0xb8, 0x9f, 0xfd, 0xec, 0xa3, + 0x64, 0xdd, 0x2f, 0x0f, 0x07, 0x39, 0xf0, 0x53, 0x45, 0x56, 0x48, 0x31, 0x99, 0xc7, + 0x1f, 0x18, 0x93, 0x41, 0xac, 0x9b, 0x78, 0xa2, 0x69, 0x16, 0x42, 0x06, 0xa0, 0xea, + 0x1c, 0xe7, 0x3b, 0xfb, 0x2a, 0x94, 0x2e, 0x73, 0x70, 0xb2, 0x47, 0xc0, 0x46, 0xf8, + 0xe7, 0x5e, 0xf8, 0xe3, 0xf8, 0xbd, 0x82, 0x1c, 0xf5, 0x77, 0x49, 0x18, 0x64, 0xe2, + 0x0e, 0x6d, 0x08, 0xfd, 0x2e, 0x32, 0xb5, 0x55, 0xc9, 0x2c, 0x66, 0x1f, 0x19, 0x58, + 0x8b, 0x72, 0xa8, 0x95, 0x99, 0x71, 0x0a, 0x88, 0x06, 0x12, 0x53, 0xca, 0x28, 0x5b, + 0x63, 0x04, 0xb3, 0x7d, 0xa2, 0xb5, 0x29, 0x4f, 0x5c, 0xb3, 0x54, 0xa8, 0x94, 0x32, + 0x28, 0x48, 0xcc, 0xbd, 0xc7, 0xc2, 0x54, 0x5b, 0x7d, 0xa5, 0x68, 0xaf, 0xac, 0x87, + 0xff, 0xa0, 0x05, 0xc3, 0x12, 0x24, 0x1c, 0x2d, 0x57, 0xf4, 0xb4, 0x5d, 0x64, 0x19, + 0xf0, 0xd2, 0xe2, 0xc5, 0xaf, 0x33, 0xae, 0x24, 0x37, 0x85, 0xb3, 0x25, 0xcd, 0xab, + 0x95, 0x40, 0x4f, 0xc7, 0xae, 0xd7, 0x05, 0x25, 0xcd, 0xdb, 0x41, 0x87, 0x2c, 0xfc, + 0xc2, 0x14, 0xb1, 0x32, 0x32, 0xed, 0xc7, 0x86, 0x09, 0x75, 0x3d, 0xbf, 0xf9, 0x30, + 0xeb, 0x0d, 0xc1, 0x56, 0x61, 0x2b, 0x9c, 0xb4, 0x34, 0xbc, 0x4b, 0x69, 0x33, 0x92, + 0xde, 0xb8, 0x7c, 0x53, 0x04, 0x35, 0x31, 0x2e, 0xdc, 0xed, 0xc6, 0xa9, 0x61, 0x13, + 0x33, 0x38, 0xd7, 0x86, 0xc4, 0xa3, 0xe1, 0x03, 0xf6, 0x01, 0x10, 0xa1, 0x6b, 0x13, + 0x37, 0x12, 0x97, 0x04, 0xbf, 0x47, 0x54, 0xff, 0x6b, 0xa9, 0xfb, 0xe6, 0x59, 0x51, + 0xe6, 0x10, 0x62, 0x0f, 0x71, 0xcd, 0xa8, 0xfc, 0x87, 0x76, 0x25, 0xf2, 0xc5, 0xbb, + 0x04, 0xcb, 0xe1, 0x22, 0x8b, 0x1e, 0x88, 0x6f, 0x40, 0x50, 0xaf, 0xd8, 0xfe, 0x94, + 0xe9, 0x7d, 0x2e, 0x9e, 0x85, 0xc6, 0xbb, 0x74, 0x8c, 0x00, 0x42, 0xd3, 0x24, 0x9a, + 0xbb, 0x13, 0x42, 0xbb, 0x0e, 0xeb, 0xf6, 0x20, 0x58, 0xbf, 0x3d, 0xe0, 0x80, 0xd9, + 0x46, 0x11, 0xa3, 0x75, 0x09, 0x15, 0xb5, 0xdc, 0x6c, 0x0b, 0x38, 0x99, 0xd4, 0x12, + 0x22, 0xba, 0xce, 0x76, 0x0e, 0xe9, 0xc8, 0x81, 0x8d, 0xed, 0x59, 0x9e, 0x34, 0xc5, + 0x6d, 0x73, 0x72, 0xaf, 0x1e, 0xb8, 0x68, 0x52, 0xf2, 0xa7, 0x32, 0x10, 0x4b, 0xdb, + 0x75, 0x07, 0x39, 0xde, 0x6c, 0x2c, 0x6e, 0x0f, 0x9e, 0xb7, 0xcb, 0x17, 0xf1, 0x94, + 0x2b, 0xfc, 0x9f, 0x4f, 0xd6, 0xeb, 0xb6, 0xb4, 0xcd, 0xd4, 0xda, 0x2b, 0xca, 0x26, + 0xfa, 0xc4, 0x57, 0x8e, 0x9f, 0x54, 0x34, 0x05, 0xac, 0xc7, 0xd8, 0x6f, 0xf5, 0x91, + 0x58, 0xbd, 0x0c, 0xba, 0x3a, 0xef, 0x6f, 0x4a, 0x84, 0x72, 0xd1, 0x44, 0xd9, 0x9f, + 0x8b, 0x8d, 0x1d, 0xed, 0xaa, 0x90, 0x77, 0xd4, 0xf0, 0x1d, 0x4b, 0xb2, 0x7b, 0xbe, + 0x31, 0xd8, 0x8f, 0xbe, 0xfa, 0xc3, 0xdc, 0xd4, 0x79, 0x75, 0x63, 0xa2, 0x6b, 0x1d, + 0x61, 0xfc, 0xd9, 0xa4, 0x64, 0xab, 0x21, 0xed, 0x55, 0x0f, 0xe6, 0xfa, 0x09, 0x69, + 0x5b, 0xa0, 0xb2, 0xf1, 0x0e, 0xea, 0x64, 0x68, 0xcc, 0x6e, 0x20, 0xa6, 0x6f, 0x82, + 0x6e, 0x3d, 0x14, 0xc5, 0x00, 0x6f, 0x05, 0x63, 0x88, 0x7f, 0x5e, 0x12, 0x89, 0xbe, + 0x1b, 0x20, 0x04, 0xca, 0xca, 0x8d, 0x3f, 0x34, 0xd6, 0xe8, 0x4b, 0xf5, 0x9c, 0x1e, + 0x04, 0x61, 0x9a, 0x7c, 0x23, 0xa9, 0x96, 0x94, 0x1d, 0x88, 0x9e, 0x46, 0x22, 0xa9, + 0xb9, 0xb1, 0xd5, 0x9d, 0x5e, 0x31, 0x90, 0x94, 0x31, 0x8c, 0xd4, 0x05, 0xba, 0x27, + 0xb7, 0xe2, 0xc0, 0x84, 0x76, 0x2d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x3e, 0xc4, 0x54, 0x9a, + 0x4d, 0x97, 0x72, 0x9d, 0x03, 0x34, 0x60, 0xfc, 0xf8, 0x9d, 0x64, 0x94, 0xf2, 0xff, + 0xd7, 0x89, 0xe9, 0x80, 0x82, 0xea, 0x5c, 0xe9, 0x53, 0x4b, 0x3a, 0xcd, 0x60, 0xfe, + 0x49, 0xe3, 0x7e, 0x4f, 0x66, 0x69, 0x31, 0x67, 0x73, 0x19, 0xed, 0x89, 0xf8, 0x55, + 0x88, 0x74, 0x1b, 0x31, 0x28, 0x90, 0x1a, 0x93, 0xbd, 0x78, 0xe4, 0xbe, 0x02, 0x25, + 0xa9, 0xe2, 0x69, 0x2c, 0x77, 0xc9, 0x69, 0xed, 0x01, 0x76, 0xbd, 0xf9, 0x55, 0x59, + 0x48, 0xcb, 0xd5, 0xa3, 0x32, 0xd0, 0x45, 0xde, 0x6b, 0xa6, 0xbf, 0x44, 0x90, 0xad, + 0xfe, 0x74, 0x44, 0xcd, 0x46, 0x7a, 0x09, 0x07, 0x54, 0x17, 0xfc, 0xc0, 0x06, 0x2e, + 0x49, 0xf0, 0x08, 0xc5, 0x1a, 0xd4, 0x22, 0x74, 0x39, 0xc1, 0xb4, 0x47, 0x6c, 0xcd, + 0x8e, 0x97, 0x86, 0x2d, 0xab, 0x7b, 0xe1, 0xe8, 0xd3, 0x99, 0xc0, 0x5e, 0xf2, 0x7c, + 0x6e, 0x22, 0xee, 0x27, 0x3e, 0x15, 0x78, 0x6e, 0x39, 0x4c, 0x8f, 0x1b, 0xe3, 0x16, + 0x82, 0xa3, 0x01, 0x47, 0x96, 0x3a, 0xc8, 0xda, 0x8d, 0x41, 0xd8, 0x04, 0x25, 0x84, + 0x26, 0xa3, 0xf7, 0x02, 0x89, 0xb8, 0xad, 0x19, 0xd8, 0xde, 0x13, 0xbe, 0x4e, 0xeb, + 0xe3, 0xbd, 0x4c, 0x8a, 0x6f, 0x55, 0xd6, 0xe0, 0xc3, 0x73, 0xd4, 0x56, 0x85, 0x18, + 0x79, 0xf5, 0xfb, 0xc2, 0x82, 0xdb, 0x9e, 0x13, 0x48, 0x06, 0xbf, 0xf7, 0x1e, 0x11, + 0xbc, 0x33, 0xab, 0x75, 0xdd, 0x6c, 0xa0, 0x67, 0xfb, 0x73, 0xa0, 0x43, 0xb6, 0x46, + 0xa7, 0xcf, 0x39, 0xca, 0xb4, 0x92, 0x83, 0x86, 0x78, 0x6d, 0x2f, 0x24, 0x14, 0x1e, + 0xe1, 0x20, 0xfd, 0xc3, 0x4d, 0x67, 0x64, 0xea, 0xfc, 0x66, 0x88, 0x0e, 0xe0, 0x20, + 0x4f, 0x53, 0xcc, 0x11, 0x67, 0xed, 0x20, 0xb4, 0x3a, 0x52, 0xde, 0xa3, 0xca, 0x7c, + 0xff, 0x8e, 0xf3, 0x5c, 0xd8, 0xe6, 0xd7, 0xc1, 0x11, 0xa6, 0x8e, 0xf4, 0x4b, 0xcd, + 0x0c, 0x15, 0x13, 0xad, 0x47, 0xca, 0x61, 0xc6, 0x59, 0xcc, 0x5d, 0x32, 0x5b, 0x44, + 0x0f, 0x6b, 0x9f, 0x03, 0xaf, 0xf6, 0x68, 0x79, 0xbb, 0x66, 0x88, 0xfd, 0x28, 0x59, + 0x36, 0x2b, 0x18, 0x2f, 0x20, 0x7b, 0x31, 0x75, 0x96, 0x1f, 0x64, 0x11, 0xa4, 0x93, + 0xbf, 0xfd, 0x04, 0x8e, 0x7d, 0x0d, 0x87, 0xd8, 0x03, 0xe6, 0xf9, 0x90, 0xa2, 0xb0, + 0xa2, 0x5f, 0x5a, 0xa0, 0x11, 0x1a, 0x6e, 0x68, 0xf3, 0x7b, 0xf6, 0xf3, 0xac, 0x2d, + 0x26, 0xb8, 0x46, 0x86, 0xe5, 0x69, 0xd5, 0x8d, 0x99, 0xc1, 0x38, 0x35, 0x97, 0x0a, + 0xd8, 0x11, 0x93, 0xc4, 0xc1, 0xb1, 0x6e, 0x6a, 0x90, 0xe2, 0xd5, 0x07, 0xcd, 0xfe, + 0x6f, 0xbd, 0xaa, 0x86, 0x16, 0x3e, 0x9c, 0xf5, 0xde, 0x31, 0x00, 0xfb, 0xca, 0x7e, + 0x8d, 0xa0, 0x47, 0xb0, 0x90, 0xdb, 0x9f, 0x37, 0x95, 0x2f, 0xbf, 0xee, 0x76, 0xaf, + 0x61, 0x66, 0x81, 0x90, 0xbd, 0x52, 0xed, 0x49, 0x0e, 0x67, 0x7b, 0x51, 0x5d, 0x01, + 0x43, 0x84, 0xaf, 0x07, 0x21, 0x9c, 0x7c, 0x0e, 0x03, 0xfc, 0x7b, 0xfc, 0x79, 0xf3, + 0x25, 0x64, 0x4e, 0x4d, 0xf4, 0xc0, 0xd7, 0xdb, 0x08, 0xe9, 0xf0, 0xbd, 0x02, 0x49, + 0x43, 0xc7, 0x05, 0xab, 0xff, 0x89, 0x94, 0xbf, 0xa6, 0x05, 0xcf, 0xbc, 0x7e, 0x03, + 0x46, 0xa7, 0xd3, 0xf7, 0xc3, 0x7d, 0x9e, 0x8b, 0xdc, 0x43, 0x3b, 0x7d, 0x79, 0xe0, + 0x8a, 0x12, 0xf7, 0x38, 0xa8, 0xf0, 0xdb, 0xdd, 0xfe, 0xf2, 0xf2, 0x65, 0x7e, 0xf3, + 0xe4, 0x7d, 0x1b, 0x0f, 0x03, 0x1e, 0x6a, 0x13, 0x31, 0x1f, 0xb7, 0x99, 0xc7, 0x9c, + 0x64, 0x1d, 0x9d, 0xa4, 0x3b, 0x33, 0xe7, 0xad, 0x01, 0x2e, 0x28, 0x25, 0x53, 0x98, + 0x78, 0x92, 0x62, 0x27, 0x5f, 0x11, 0x75, 0xbe, 0x84, 0x02, 0xc0, 0x14, 0x91, 0xc4, + 0xd8, 0x42, 0x40, 0x6d, 0x0e, 0xc4, 0x28, 0x2c, 0x95, 0x26, 0x17, 0x4a, 0x09, 0x87, + 0x8f, 0xe8, 0xfd, 0xde, 0x33, 0xa2, 0x96, 0x04, 0xe5, 0xe5, 0xe7, 0xb2, 0xa0, 0x25, + 0x02, 0x65, 0x0b, 0x97, 0xdb, 0xb5, 0x2b, 0xef, 0xb5, 0x9b, 0x1d, 0x30, 0xa5, 0x74, + 0x33, 0xb0, 0xa3, 0x51, 0x47, 0x44, 0x44, 0x09, 0x9d, 0xaa, 0x37, 0x10, 0x46, 0x61, + 0x32, 0x60, 0xcf, 0x33, 0x54, 0xcf, 0xcd, 0xad, 0xa6, 0x63, 0xec, 0xe8, 0x24, 0xff, + 0xd7, 0xe4, 0x43, 0x93, 0x88, 0x6a, 0x86, 0x16, 0x5d, 0xdd, 0xdf, 0x2b, 0x4c, 0x41, + 0x77, 0x35, 0x54, 0xc8, 0x69, 0x95, 0x26, 0x94, 0x08, 0xb1, 0x1e, 0x67, 0x37, 0xa4, + 0xc4, 0x47, 0x58, 0x6f, 0x69, 0x17, 0x34, 0x46, 0xd8, 0xe4, 0x8b, 0xf8, 0x4c, 0xbc, + 0x00, 0x0a, 0x80, 0x78, 0x99, 0x97, 0x3e, 0xb9, 0x3c, 0x5e, 0x81, 0x9a, 0xad, 0x66, + 0x94, 0x13, 0xf8, 0x38, 0x79, 0x33, 0xad, 0x15, 0x84, 0xaa, 0x35, 0xe4, 0x3f, 0x4e, + 0xcd, 0x1e, 0x2d, 0x04, 0x07, 0xc0, 0xb1, 0xb8, 0x99, 0x20, 0xff, 0xdf, 0xdb, 0x9b, + 0xea, 0x51, 0xac, 0x95, 0xb5, 0x57, 0xaf, 0x71, 0xb8, 0x9f, 0x90, 0x3f, 0x5d, 0x98, + 0x48, 0xf1, 0x4f, 0xcb, 0xeb, 0x18, 0x37, 0x57, 0x0f, 0x54, 0x4d, 0x63, 0x59, 0xeb, + 0x23, 0xfa, 0xf3, 0x8a, 0x08, 0x22, 0xda, 0x36, 0xce, 0x42, 0x6c, 0x4a, 0x2f, 0xbe, + 0xff, 0xeb, 0x0a, 0x8a, 0x2e, 0x29, 0x7a, 0x9d, 0x19, 0xba, 0x15, 0x02, 0x45, 0x90, + 0xe3, 0x32, 0x9d, 0x9f, 0xa9, 0x26, 0x1f, 0x99, 0x38, 0xa4, 0x03, 0x2d, 0xd3, 0x46, + 0x06, 0xc9, 0xcf, 0x9f, 0x3d, 0xd3, 0x3e, 0x57, 0x6f, 0x05, 0xcd, 0x1d, 0xd6, 0x81, + 0x1c, 0x62, 0x98, 0x75, 0x7d, 0x77, 0xd9, 0xe8, 0x10, 0xab, 0xdb, 0x22, 0x6a, 0xfc, + 0xaa, 0x43, 0x46, 0xa6, 0x56, 0x0f, 0x89, 0x32, 0xb3, 0x18, 0x1f, 0xd3, 0x55, 0xd5, + 0xd3, 0x91, 0x97, 0x61, 0x83, 0xf8, 0xd9, 0x93, 0x88, 0x83, 0x96, 0x32, 0xd6, 0x35, + 0x4f, 0x66, 0x6d, 0x09, 0xd3, 0xe5, 0x62, 0x9e, 0xa1, 0x97, 0x37, 0x38, 0x86, 0x13, + 0xd3, 0x8a, 0x34, 0xfd, 0x0f, 0x6e, 0x50, 0xee, 0x5a, 0x0c, 0xc9, 0x67, 0x71, 0x77, + 0xf5, 0x00, 0x28, 0xc1, 0x41, 0x37, 0x81, 0x87, 0xbd, 0x28, 0x19, 0x40, 0x3f, 0xc5, + 0x34, 0xf8, 0x00, 0x76, 0xe9, 0x38, 0x0c, 0xb4, 0x96, 0x4d, 0x3b, 0x6b, 0x45, 0x81, + 0x9d, 0x3b, 0x8e, 0x9c, 0xaf, 0x54, 0xf0, 0x51, 0x85, 0x2d, 0x67, 0x1b, 0xf8, 0xc1, + 0xff, 0xde, 0x2d, 0x15, 0x10, 0x75, 0x64, 0x18, 0xcb, 0x48, 0x10, 0x93, 0x6a, 0xa5, + 0x7e, 0x69, 0x65, 0xd6, 0xfb, 0x65, 0x6a, 0x76, 0x0b, 0x7f, 0x19, 0xad, 0xf9, 0x6c, + 0x17, 0x34, 0x88, 0x55, 0x21, 0x93, 0xb1, 0x47, 0xee, 0x58, 0x85, 0x80, 0x33, 0xda, + 0xc7, 0xcd, 0x0e, 0xb2, 0x04, 0xc0, 0x64, 0x90, 0xbb, 0xde, 0xdf, 0x5f, 0x75, 0x71, + 0xac, 0xb2, 0xeb, 0xe7, 0x6a, 0xce, 0xf3, 0xf2, 0xa0, 0x1e, 0xe9, 0x87, 0x48, 0x6d, + 0xfe, 0x6c, 0x3f, 0x0a, 0x5e, 0x23, 0x4c, 0x12, 0x72, 0x58, 0xf9, 0x7a, 0x28, 0xfb, + 0x5d, 0x16, 0x4a, 0x81, 0x76, 0xbe, 0x94, 0x6b, 0x80, 0x97, 0xd0, 0xe3, 0x17, 0x28, + 0x7f, 0x33, 0xbf, 0x9c, 0x16, 0xf9, 0xa5, 0x45, 0x40, 0x9c, 0xe2, 0x9b, 0x1f, 0x42, + 0x73, 0x72, 0x5f, 0xc0, 0xdf, 0x02, 0xa0, 0x4e, 0xba, 0xe1, 0x78, 0xb3, 0x41, 0x4f, + 0xb0, 0xa8, 0x2d, 0x50, 0xde, 0xb0, 0x9f, 0xcf, 0x4e, 0x6e, 0xe9, 0xd1, 0x80, 0xff, + 0x4f, 0x56, 0xff, 0x3b, 0xc1, 0xd3, 0x60, 0x1f, 0xc2, 0xdc, 0x90, 0xd8, 0x14, 0xc3, + 0x25, 0x6f, 0x49, 0x67, 0xd3, 0xa8, 0xd6, 0x4c, 0x83, 0xfe, 0xa3, 0x39, 0xc5, 0x1f, + 0x5a, 0x8e, 0x58, 0x01, 0xfb, 0xb9, 0x78, 0x35, 0x58, 0x1b, 0x60, 0x24, 0x65, 0xde, + 0xe0, 0x4b, 0x59, 0x22, 0xc2, 0x76, 0x1b, 0x54, 0x24, 0x5b, 0xec, 0x0c, 0x9e, 0xef, + 0x2d, 0xb9, 0x7d, 0x22, 0xb2, 0xb3, 0x55, 0x6c, 0xc9, 0x69, 0xfb, 0xb1, 0x3d, 0x06, + 0x50, 0x97, 0x65, 0xa5, 0x2b, 0x3f, 0xac, 0x54, 0xb9, 0x3f, 0x42, 0x1b, 0xf0, 0x8e, + 0x18, 0xd5, 0x2d, 0xdd, 0x52, 0xcc, 0x1c, 0x8c, 0xa8, 0xad, 0xfa, 0xcc, 0xab, 0x7e, + 0x5c, 0xc2, 0xf4, 0x57, 0x3f, 0xbb, 0xf8, 0x23, 0x9b, 0xb0, 0xb8, 0xae, 0xdb, 0xf8, + 0xda, 0xd1, 0x62, 0x82, 0xda, 0x5c, 0x91, 0x25, 0xdb, 0xa1, 0xc0, 0x59, 0xd0, 0xdf, + 0x8a, 0xbf, 0x62, 0x10, 0x78, 0xf0, 0x2d, 0x6c, 0x4b, 0xc8, 0x6d, 0x40, 0x84, 0x5a, + 0xc1, 0xd5, 0x97, 0x10, 0xc4, 0x5f, 0x07, 0xd5, 0x85, 0xeb, 0x48, 0xb3, 0x2f, 0xc0, + 0x16, 0x7b, 0xa2, 0x56, 0xe7, 0x3c, 0xa3, 0xb9, 0x31, 0x1c, 0x62, 0xd1, 0x09, 0x49, + 0x79, 0x57, 0xd8, 0xdb, 0xe1, 0x0a, 0xa3, 0xe8, 0x66, 0xb4, 0x0c, 0x0b, 0xaa, 0x2b, + 0xc4, 0x92, 0xc1, 0x9a, 0xd1, 0xe6, 0x37, 0x2d, 0x96, 0x22, 0xbf, 0x16, 0x3f, 0xbf, + 0xfe, 0xae, 0xee, 0x79, 0x6a, 0x3c, 0xd9, 0xb6, 0xfb, 0xbf, 0xa4, 0xd7, 0x92, 0xf3, + 0x4d, 0x7f, 0xd6, 0xe7, 0x63, 0xcd, 0x58, 0x59, 0xdd, 0x26, 0x83, 0x3d, 0x21, 0xd9, + 0xbc, 0x54, 0x52, 0xbd, 0x19, 0x51, 0x5d, 0xff, 0x9f, 0x49, 0x95, 0xb3, 0x5b, 0xc0, + 0xc1, 0xf8, 0x76, 0xe6, 0xad, 0x11, 0xf2, 0x45, 0x2d, 0xc9, 0xae, 0x85, 0xae, 0xc0, + 0x1f, 0xc5, 0x6f, 0x8c, 0xbf, 0xda, 0x75, 0xa7, 0x72, 0x7b, 0x75, 0xeb, 0xbd, 0x6b, + 0xbf, 0xfb, 0x43, 0xb6, 0x3a, 0x3b, 0x1b, 0x67, 0x1e, 0x40, 0xfe, 0xb0, 0xdb, 0x00, + 0x29, 0x74, 0xa3, 0xc3, 0xb1, 0xa7, 0x88, 0x56, 0x72, 0x31, 0xbf, 0x63, 0x99, 0xff, + 0x89, 0x23, 0x69, 0x81, 0x14, 0x9d, 0x42, 0x38, 0x02, 0xd2, 0x34, 0x1a, 0x3b, 0xed, + 0xb9, 0xdd, 0xcb, 0xac, 0x1f, 0xe7, 0xb6, 0x43, 0x5e, 0x14, 0x79, 0xc7, 0x2e, 0x70, + 0x89, 0xd0, 0x29, 0xe7, 0xfb, 0xba, 0xf3, 0xcf, 0x37, 0xe9, 0xb9, 0xa6, 0xb7, 0x76, + 0x79, 0x1e, 0x4c, 0x5e, 0x6f, 0xda, 0x57, 0xe8, 0xd5, 0xf1, 0x4c, 0x8c, 0x35, 0xa2, + 0xd2, 0x70, 0x84, 0x6b, 0x9d, 0xbe, 0x00, 0x5c, 0xda, 0x16, 0xaf, 0x44, 0x08, 0xf3, + 0xab, 0x06, 0xa9, 0x16, 0xee, 0xeb, 0x9c, 0x95, 0x94, 0xb7, 0x04, 0x24, 0xa4, 0xc1, + 0xd1, 0x71, 0x29, 0x5b, 0x67, 0x63, 0xb2, 0x2f, 0x47, 0xf8, 0x0b, 0x53, 0xcc, 0xbb, + 0x90, 0x4b, 0xd6, 0x8f, 0xd6, 0x5f, 0xbd, 0x3f, 0xbd, 0xea, 0x10, 0x35, 0xe9, 0x8c, + 0x21, 0xa7, 0xdb, 0xc9, 0x1a, 0x9b, 0x5b, 0xc7, 0x69, 0x0f, 0x05, 0xec, 0x31, 0x7c, + 0x97, 0xf8, 0x76, 0x4e, 0xb4, 0x8e, 0x91, 0x1d, 0x42, 0x8e, 0xc8, 0xd8, 0x61, 0xb7, + 0x08, 0xe8, 0x29, 0x8a, 0xcb, 0x62, 0x15, 0x51, 0x45, 0x15, 0x5a, 0xe9, 0x5f, 0x0a, + 0x1d, 0x15, 0x01, 0x03, 0x47, 0x53, 0x14, 0x6e, 0x22, 0xd0, 0x5f, 0x58, 0x6d, 0x7f, + 0x6b, 0x4f, 0xe1, 0x2d, 0xad, 0x9a, 0x17, 0xf5, 0xdb, 0x70, 0xb1, 0xdb, 0x96, 0xb8, + 0xd9, 0xa8, 0x3e, 0xda, 0xdc, 0x96, 0x6c, 0x8a, 0x54, 0x66, 0xb6, 0x1f, 0xc9, 0x98, + 0xc3, 0x1f, 0x10, 0x70, 0xd9, 0xa5, 0xc9, 0xa6, 0xd2, 0x68, 0xd3, 0x04, 0xfe, 0x6b, + 0x8f, 0xd3, 0xb4, 0x01, 0x03, 0x48, 0x61, 0x1a, 0xbd, 0xcb, 0xd4, 0x9f, 0xe4, 0xf8, + 0x5b, 0x62, 0x3c, 0x78, 0x28, 0xc7, 0x13, 0x82, 0xe1, 0x03, 0x4e, 0xa6, 0x7b, 0xc8, + 0xae, 0x97, 0x40, 0x4b, 0x0c, 0x50, 0xb2, 0xa0, 0x4f, 0x55, 0x9e, 0x49, 0x95, 0x0a, + 0xfc, 0xb0, 0xef, 0x46, 0x2a, 0x2a, 0xe0, 0x24, 0xb0, 0xf0, 0x22, 0x4d, 0xfd, 0x73, + 0x68, 0x4b, 0x88, 0xc7, 0xfb, 0xe9, 0x2d, 0x02, 0xb6, 0x8f, 0x75, 0x9c, 0x47, 0x52, + 0x66, 0x3c, 0xd7, 0xb9, 0x7a, 0x14, 0x94, 0x36, 0x49, 0x30, 0x55, 0x21, 0x32, 0x6b, + 0xde, 0x08, 0x56, 0x30, 0x86, 0x46, 0x29, 0x29, 0x1b, 0xae, 0x25, 0xff, 0x88, 0x22, + 0xa1, 0x4c, 0x4b, 0x66, 0x6a, 0x92, 0x59, 0xad, 0x0d, 0xc4, 0x2a, 0x82, 0x90, 0xac, + 0x7b, 0xc7, 0xf5, 0x3a, 0x16, 0xf3, 0x79, 0xf7, 0x58, 0xe5, 0xde, 0x75, 0x0f, 0x04, + 0xfd, 0x7c, 0xad, 0x47, 0x70, 0x1c, 0x85, 0x97, 0xf9, 0x78, 0x88, 0xbe, 0xa6, 0xfa, + 0x0b, 0xf2, 0x99, 0x99, 0x56, 0xfb, 0xfd, 0x0e, 0xe6, 0x8e, 0xc3, 0x6e, 0x46, 0x88, + 0x80, 0x9a, 0xe2, 0x31, 0xeb, 0x8b, 0xc4, 0x36, 0x9f, 0x5f, 0xe1, 0x57, 0x3f, 0x57, + 0xe0, 0x99, 0xd9, 0xc0, 0x99, 0x01, 0xbf, 0x39, 0xca, 0xac, 0x48, 0xdc, 0x11, 0x95, + 0x6a, 0x8a, 0xe9, 0x05, 0xea, 0xd8, 0x69, 0x54, 0x54, 0x7c, 0x44, 0x8a, 0xe4, 0x3d, + 0x31, 0x5e, 0x66, 0x9c, 0x42, 0x42, 0xda, 0x56, 0x59, 0x38, 0xf4, 0x17, 0xbf, 0x43, + 0xce, 0x7b, 0x2b, 0x30, 0xb1, 0xcd, 0x40, 0x18, 0x38, 0x8e, 0x1a, 0x91, 0x0f, 0x0f, + 0xc4, 0x1f, 0xb0, 0x87, 0x7a, 0x59, 0x25, 0xe4, 0x66, 0x81, 0x9d, 0x37, 0x5b, 0x0a, + 0x91, 0x2d, 0x4f, 0xe8, 0x43, 0xb7, 0x6e, 0xf6, 0xf2, 0x23, 0xf0, 0xf7, 0xc8, 0x94, + 0xf3, 0x8f, 0x7a, 0xb7, 0x80, 0xdf, 0xd7, 0x5f, 0x66, 0x9c, 0x8c, 0x06, 0xcf, 0xfa, + 0x43, 0xeb, 0x47, 0x56, 0x5a, 0x50, 0xe3, 0xb1, 0xfa, 0x45, 0xad, 0x61, 0xce, 0x9a, + 0x1c, 0x47, 0x27, 0xb7, 0xaa, 0xa5, 0x35, 0x62, 0xf5, 0x23, 0xe7, 0x39, 0x52, 0xbb, + 0xf3, 0x3d, 0x8a, 0x41, 0x04, 0x07, 0x8a, 0xde, 0x3e, 0xaa, 0xa4, 0x96, 0x99, 0xa6, + 0x9f, 0xdf, 0x1c, 0x5a, 0xc7, 0x73, 0x21, 0x46, 0xee, 0x5e, 0x1d, 0x6b, 0x6c, 0xa9, + 0xb9, 0x18, 0x0f, 0x96, 0x4c, 0xc9, 0xd0, 0x87, 0x8a, 0xe1, 0x37, 0x35, 0x24, 0xd7, + 0xd5, 0x10, 0xe5, 0x82, 0x27, 0xdf, 0x6d, 0xe9, 0xd3, 0x0d, 0x27, 0x18, 0x67, 0x64, + 0x01, 0x77, 0xb0, 0xf1, 0x85, 0x6e, 0x28, 0xd5, 0xc8, 0xaf, 0xb0, 0x95, 0xef, 0x61, + 0x84 + ], + }, + TestVector { + description: "Sprout transaction v2 #2", + version: 2, + lock_time: 1450554372, + expiry_height: 0, + txid: [ + 0x57, 0x46, 0x4b, 0x6d, 0xe5, 0x06, 0x68, 0xcd, 0x6c, 0x58, 0xe7, 0xec, 0x23, 0x57, + 0xc4, 0x31, 0xfd, 0x0e, 0x31, 0x16, 0x2c, 0xa2, 0x0b, 0x07, 0x0c, 0xbd, 0x96, 0x7a, + 0xdb, 0x61, 0x8b, 0x45 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 1, + tx: vec![ + 0x02, 0x00, 0x00, 0x00, 0x01, 0xd6, 0x51, 0x58, 0x90, 0x22, 0xee, 0xae, 0xa4, 0xc0, + 0xce, 0x1f, 0xa6, 0xf0, 0x85, 0x09, 0x2b, 0x04, 0x97, 0x94, 0x89, 0x17, 0x2b, 0x3e, + 0xf8, 0x19, 0x4a, 0x79, 0x8d, 0xf5, 0x72, 0x4d, 0x6b, 0x05, 0xf1, 0xae, 0x00, 0x00, + 0x13, 0xa0, 0x8d, 0x61, 0x01, 0xd0, 0x03, 0xf8, 0x1a, 0x70, 0x6d, 0x03, 0x00, 0x09, + 0x6a, 0x51, 0x65, 0xac, 0x63, 0x00, 0x53, 0xac, 0x51, 0x04, 0xb4, 0x75, 0x56, 0x00 + ], + }, + TestVector { + description: "Sprout transaction v2 #3", + version: 2, + lock_time: 4256720683, + expiry_height: 0, + txid: [ + 0xef, 0xd1, 0x5e, 0x95, 0x4e, 0x01, 0xdb, 0x99, 0x24, 0x71, 0xe8, 0xf6, 0x47, 0x80, + 0x4a, 0x91, 0xbf, 0x36, 0x16, 0x36, 0xc9, 0x50, 0xb6, 0x97, 0xe8, 0x8a, 0x41, 0x94, + 0xce, 0x5d, 0x24, 0x04 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 0, + tx: vec![ + 0x02, 0x00, 0x00, 0x00, 0x01, 0x51, 0x44, 0x58, 0xe2, 0x32, 0x1d, 0x14, 0x60, 0x71, + 0x78, 0x9d, 0x23, 0x35, 0x93, 0x4a, 0x68, 0x06, 0x14, 0xe8, 0x35, 0x62, 0xf8, 0x2d, + 0xfd, 0x40, 0x5b, 0x54, 0xa4, 0x5e, 0xb3, 0x2c, 0x16, 0x54, 0x48, 0xd4, 0xd5, 0x08, + 0xac, 0x52, 0x63, 0x63, 0x63, 0x65, 0x6a, 0x53, 0xf1, 0xa1, 0x37, 0xe9, 0x00, 0x2b, + 0x67, 0xb8, 0xfd, 0x00 + ], + }, + TestVector { + description: "Sprout transaction v2 #4", + version: 2, + lock_time: 4048828293, + expiry_height: 0, + txid: [ + 0xf9, 0x01, 0x16, 0x8b, 0x67, 0xe9, 0xdc, 0x6d, 0xd5, 0x2d, 0x9c, 0x79, 0x75, 0x62, + 0xb5, 0xae, 0x4d, 0xd8, 0x59, 0xcd, 0x51, 0xf3, 0x8e, 0xe1, 0xe9, 0x5e, 0x47, 0xae, + 0xb7, 0x3b, 0xf2, 0xfc + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 2, + tx: vec![ + 0x02, 0x00, 0x00, 0x00, 0x01, 0xbd, 0xa5, 0x4a, 0x31, 0x73, 0x11, 0x89, 0x6a, 0xe1, + 0x02, 0x80, 0xa0, 0x32, 0x44, 0x0c, 0x42, 0x0a, 0x42, 0x1e, 0x94, 0x4d, 0x1e, 0x95, + 0x2b, 0x70, 0xd5, 0x82, 0x6c, 0xd3, 0xb0, 0x8b, 0x7d, 0xb9, 0x63, 0x0f, 0xe4, 0x07, + 0x6a, 0x52, 0x52, 0x63, 0x00, 0x00, 0xac, 0xc4, 0x0b, 0x98, 0x03, 0x02, 0xaf, 0x89, + 0x76, 0xf7, 0x25, 0x14, 0x02, 0x00, 0x04, 0x65, 0x53, 0x65, 0x51, 0xca, 0x57, 0x91, + 0x90, 0xdc, 0x97, 0x00, 0x00, 0x08, 0x6a, 0x63, 0x51, 0x51, 0xac, 0x52, 0x52, 0x63, + 0x85, 0x37, 0x54, 0xf1, 0x00 + ], + }, + TestVector { + description: "Sprout transaction v2 #5", + version: 2, + lock_time: 968133619, + expiry_height: 0, + txid: [ + 0xf9, 0x02, 0xae, 0x85, 0x73, 0xfc, 0x56, 0x55, 0x0e, 0xaf, 0xcb, 0x74, 0xce, 0xe5, + 0xa6, 0x4b, 0xe3, 0x14, 0xc0, 0x70, 0x1f, 0xc9, 0x8d, 0xca, 0xfa, 0x01, 0x41, 0x65, + 0xec, 0x14, 0xad, 0xf2 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 2, + tx: vec![ + 0x02, 0x00, 0x00, 0x00, 0x02, 0x71, 0xcf, 0xb7, 0x50, 0x30, 0x72, 0x65, 0x57, 0x53, + 0xfa, 0x3f, 0x54, 0xec, 0xc5, 0x87, 0xe9, 0xf8, 0x3b, 0x58, 0x19, 0x16, 0x09, 0x2d, + 0xf2, 0x6e, 0x63, 0xe1, 0x89, 0x94, 0xcb, 0x0d, 0xb9, 0x1a, 0x0b, 0xbd, 0xc7, 0x06, + 0x51, 0x53, 0x52, 0x52, 0x52, 0x6a, 0x5e, 0x61, 0xd8, 0xd8, 0xae, 0x89, 0xda, 0xe4, + 0x95, 0x4b, 0x54, 0x81, 0x3b, 0xb3, 0x3f, 0x08, 0xd5, 0x62, 0xba, 0x51, 0x3f, 0xee, + 0x1b, 0x09, 0xc0, 0xfc, 0xd5, 0x16, 0x05, 0x54, 0x19, 0x47, 0x4d, 0xd7, 0xfd, 0xa0, + 0x38, 0xa8, 0x9c, 0x84, 0x08, 0x53, 0xac, 0x00, 0x00, 0x6a, 0x65, 0x00, 0x51, 0x0c, + 0x4b, 0x13, 0x25, 0x02, 0xd3, 0xca, 0xc1, 0xe0, 0xe6, 0x30, 0x06, 0x00, 0x02, 0xac, + 0x63, 0x27, 0x73, 0x41, 0xc6, 0xad, 0xc1, 0x05, 0x00, 0x03, 0x00, 0x51, 0x51, 0xf3, + 0x8b, 0xb4, 0x39, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x53, 0xbd, 0xea, 0x35, 0x96, 0xd1, 0x5e, + 0x71, 0x3e, 0x1e, 0x2e, 0x7d, 0x3f, 0x1c, 0x38, 0x31, 0x35, 0xb4, 0x7f, 0xa7, 0xf8, + 0x1f, 0x46, 0xdf, 0x7a, 0x90, 0x2a, 0x40, 0x46, 0x99, 0xec, 0x91, 0x2f, 0x56, 0x56, + 0xc3, 0x5b, 0x85, 0x76, 0x3e, 0x4d, 0xe5, 0x83, 0xae, 0xca, 0xa1, 0xdf, 0xd5, 0xd2, + 0x67, 0x7d, 0x9c, 0x8f, 0xfe, 0xe8, 0x77, 0xf6, 0x3f, 0x40, 0xa5, 0xca, 0x0d, 0x67, + 0xf6, 0xe5, 0x54, 0x12, 0x47, 0x39, 0xf8, 0x05, 0xaf, 0x87, 0x6a, 0xee, 0xde, 0x53, + 0xaa, 0x8b, 0x0f, 0x8e, 0x56, 0x04, 0xa7, 0x3c, 0x30, 0xcb, 0xd0, 0x9d, 0xad, 0x96, + 0x3d, 0x6f, 0x8a, 0x5d, 0xcc, 0x40, 0xde, 0xf4, 0x07, 0x97, 0x34, 0x21, 0x13, 0xba, + 0x20, 0x6f, 0xae, 0x8e, 0xbe, 0x4f, 0x3b, 0xc3, 0xca, 0xf6, 0x92, 0x59, 0xe4, 0x62, + 0xef, 0xf9, 0xba, 0x8b, 0x3f, 0x4b, 0xfa, 0xa1, 0x30, 0x0c, 0x26, 0x92, 0x5a, 0x87, + 0x29, 0xcd, 0x32, 0x91, 0x5b, 0xfc, 0x96, 0x60, 0x86, 0xf0, 0xd5, 0x56, 0x0b, 0xbe, + 0x32, 0xa5, 0x98, 0xc2, 0x2a, 0xdf, 0xb4, 0x8c, 0xef, 0x72, 0xba, 0x5d, 0x42, 0x87, + 0xc0, 0xce, 0xfb, 0xac, 0xfd, 0x8c, 0xe1, 0x95, 0xb4, 0x96, 0x3c, 0x34, 0xa9, 0x4b, + 0xba, 0x7a, 0x17, 0x5d, 0xae, 0x4b, 0xbe, 0x3e, 0xf4, 0x86, 0x3d, 0x53, 0x70, 0x89, + 0x15, 0x09, 0x0f, 0x47, 0xa0, 0x68, 0xe2, 0x27, 0x43, 0x3f, 0x9e, 0x49, 0xd3, 0xaa, + 0x09, 0xe3, 0x56, 0xd8, 0xd6, 0x6d, 0x0c, 0x01, 0x21, 0xe9, 0x1a, 0x3c, 0x4a, 0xa3, + 0xf2, 0x7f, 0xa1, 0xb6, 0x33, 0x96, 0xe2, 0xb4, 0x1d, 0xb9, 0x08, 0xfd, 0xab, 0x8b, + 0x18, 0xcc, 0x73, 0x04, 0xe9, 0x4e, 0x97, 0x05, 0x68, 0xf9, 0x42, 0x1c, 0x0d, 0xbb, + 0xba, 0xf8, 0x45, 0x98, 0xd9, 0x72, 0xb0, 0x53, 0x4f, 0x48, 0xa5, 0xe5, 0x26, 0x70, + 0x43, 0x6a, 0xaa, 0x77, 0x6e, 0xd2, 0x48, 0x2a, 0xd7, 0x03, 0x43, 0x02, 0x01, 0xe5, + 0x34, 0x43, 0xc3, 0x6d, 0xcf, 0xd3, 0x4a, 0x0c, 0xb6, 0x63, 0x78, 0x76, 0x10, 0x5e, + 0x03, 0xbf, 0x3b, 0xd5, 0x8e, 0xc1, 0x48, 0xcb, 0x64, 0x97, 0x0e, 0x32, 0x23, 0xa9, + 0x1f, 0x71, 0xdf, 0xcf, 0xd5, 0xa0, 0x4b, 0x66, 0x7f, 0xba, 0xf3, 0xd4, 0xb3, 0xb9, + 0x08, 0xb9, 0x82, 0x88, 0x20, 0x03, 0xec, 0xdd, 0x75, 0x37, 0x50, 0xb5, 0xf9, 0xd2, + 0x21, 0x6e, 0x56, 0xc6, 0x15, 0x27, 0x2f, 0x85, 0x44, 0x64, 0xc0, 0xca, 0x4b, 0x1e, + 0x85, 0xae, 0xdd, 0x03, 0x82, 0x92, 0xc4, 0xe1, 0xa5, 0x77, 0x0a, 0xeb, 0xba, 0x01, + 0x0b, 0x9e, 0xbf, 0xbb, 0x01, 0x1b, 0xd6, 0xf0, 0xb7, 0x88, 0x05, 0x02, 0x5d, 0x27, + 0xf3, 0xc1, 0x77, 0x46, 0xba, 0xe1, 0x16, 0xc1, 0x5d, 0x9f, 0x47, 0x1f, 0x0f, 0x62, + 0x88, 0xa1, 0x50, 0x64, 0x7b, 0x2a, 0xfe, 0x9d, 0xf7, 0xcc, 0xcf, 0x01, 0xf5, 0xcd, + 0xe5, 0xf0, 0x46, 0x80, 0xbb, 0xfe, 0xd8, 0x7f, 0x6c, 0xf4, 0x29, 0xfb, 0x27, 0xad, + 0x6b, 0xab, 0xe7, 0x91, 0x76, 0x02, 0x11, 0xcf, 0x5b, 0xc2, 0x0e, 0x48, 0xbe, 0xf1, + 0x19, 0x25, 0x9b, 0x9b, 0x8a, 0x0e, 0x39, 0xc3, 0xdf, 0x28, 0xcb, 0x95, 0x82, 0xea, + 0x33, 0x86, 0x01, 0xcd, 0xc4, 0x81, 0xb3, 0x2f, 0xb8, 0x2a, 0x02, 0xeb, 0xb3, 0xda, + 0xde, 0x25, 0xd1, 0xa3, 0xdf, 0x20, 0xc3, 0x7e, 0x71, 0x25, 0x06, 0xb5, 0xd9, 0x96, + 0xc4, 0x9a, 0x9f, 0x0f, 0x30, 0xdd, 0xcb, 0x91, 0xfe, 0x90, 0x04, 0xe1, 0xe8, 0x32, + 0x94, 0x02, 0xc9, 0x20, 0x3d, 0x94, 0xe8, 0xdc, 0x2c, 0xbb, 0x44, 0x9d, 0xe4, 0x15, + 0x50, 0x32, 0x60, 0x4e, 0x47, 0x99, 0x70, 0x16, 0xb3, 0x04, 0xfd, 0x43, 0x7d, 0x82, + 0x35, 0x04, 0x5e, 0x25, 0x5a, 0x19, 0x03, 0x43, 0xa0, 0xa9, 0xf2, 0xe3, 0x36, 0xb4, + 0x4c, 0xae, 0x30, 0x7b, 0xb3, 0x98, 0x7b, 0xd3, 0xe4, 0xe7, 0x77, 0xfb, 0xb3, 0x4c, + 0x0a, 0xb8, 0xcc, 0x3d, 0x67, 0x46, 0x6c, 0x0a, 0x88, 0xdd, 0x4c, 0x02, 0xd1, 0x8a, + 0x07, 0xa8, 0xd1, 0x06, 0x8d, 0xf5, 0xb6, 0x29, 0xe5, 0x71, 0x8d, 0x0f, 0x6d, 0xf5, + 0xc9, 0x57, 0xcf, 0x71, 0xbb, 0x00, 0xa5, 0x17, 0x8f, 0x17, 0x5c, 0xac, 0xa9, 0x44, + 0xe6, 0x35, 0xc5, 0x15, 0x9f, 0x73, 0x8e, 0x24, 0x02, 0xa2, 0xd2, 0x1a, 0xa0, 0x81, + 0xe1, 0x0e, 0x45, 0x6a, 0xfb, 0x00, 0xb9, 0xf6, 0x24, 0x16, 0xc8, 0xb9, 0xc0, 0xf7, + 0x22, 0x8f, 0x51, 0x07, 0x29, 0xe0, 0xbe, 0x3f, 0x30, 0x53, 0x13, 0xd7, 0x7f, 0x73, + 0x79, 0xdc, 0x2a, 0xf2, 0x48, 0x69, 0xc6, 0xc7, 0x4e, 0xe4, 0x47, 0x14, 0x98, 0x86, + 0x1d, 0x19, 0x2f, 0x0f, 0xf0, 0xf5, 0x08, 0x28, 0x5d, 0xab, 0x6b, 0x6a, 0x36, 0xcc, + 0xf7, 0xd1, 0x22, 0x56, 0xcc, 0x76, 0xb9, 0x55, 0x03, 0x72, 0x0a, 0xc6, 0x72, 0xd0, + 0x82, 0x68, 0xd2, 0xcf, 0x77, 0x73, 0xb6, 0xba, 0x2a, 0x5f, 0x66, 0x48, 0x47, 0xbf, + 0x70, 0x7f, 0x2f, 0xc1, 0x0c, 0x98, 0xf2, 0xf0, 0x06, 0xec, 0x22, 0xcc, 0xb5, 0xa8, + 0xc8, 0xb7, 0xc4, 0x0c, 0x7c, 0x2d, 0x49, 0xa6, 0x63, 0x9b, 0x9f, 0x2c, 0xe3, 0x3c, + 0x25, 0xc0, 0x4b, 0xc4, 0x61, 0xe7, 0x44, 0xdf, 0xa5, 0x36, 0xb0, 0x0d, 0x94, 0xba, + 0xdd, 0xf4, 0xf4, 0xd1, 0x40, 0x44, 0xc6, 0x95, 0xa3, 0x38, 0x81, 0x47, 0x7d, 0xf1, + 0x24, 0xf0, 0xfc, 0xf2, 0x06, 0xa9, 0xfb, 0x2e, 0x65, 0xe3, 0x04, 0xcd, 0xbf, 0x0c, + 0x4d, 0x23, 0x90, 0x17, 0x0c, 0x13, 0x0a, 0xb8, 0x49, 0xc2, 0xf2, 0x2b, 0x5c, 0xdd, + 0x39, 0x21, 0x64, 0x0c, 0x8c, 0xf1, 0x97, 0x6a, 0xe1, 0x01, 0x0b, 0x0d, 0xfd, 0x9c, + 0xb2, 0x54, 0x3e, 0x45, 0xf9, 0x97, 0x49, 0xcc, 0x4d, 0x61, 0xf2, 0xe8, 0xaa, 0xbf, + 0xe9, 0x8b, 0xd9, 0x05, 0xfa, 0x39, 0x95, 0x1b, 0x33, 0xea, 0x76, 0x9c, 0x45, 0xab, + 0x95, 0x31, 0xc5, 0x72, 0x09, 0x86, 0x2a, 0xd1, 0x2f, 0xd7, 0x6b, 0xa4, 0x80, 0x7e, + 0x65, 0x41, 0x7b, 0x6c, 0xd1, 0x2f, 0xa8, 0xec, 0x91, 0x6f, 0x01, 0x3e, 0xbb, 0x87, + 0x06, 0xa9, 0x6e, 0xff, 0xed, 0xa0, 0x6c, 0x4b, 0xe2, 0x4b, 0x04, 0x84, 0x63, 0x92, + 0xe9, 0xd1, 0xe6, 0x93, 0x0e, 0xae, 0x01, 0xfa, 0x21, 0xfb, 0xd7, 0x00, 0x58, 0x3f, + 0xb5, 0x98, 0xb9, 0x2c, 0x8f, 0x4e, 0xb8, 0xa6, 0x1a, 0xa6, 0x23, 0x5d, 0xb6, 0x0f, + 0x28, 0x41, 0xcf, 0x3a, 0x1c, 0x6a, 0xb5, 0x4c, 0x67, 0x06, 0x68, 0x44, 0x71, 0x1d, + 0x09, 0x1e, 0xb9, 0x31, 0xa1, 0xbd, 0x62, 0x81, 0xae, 0xdf, 0x2a, 0x0e, 0x8f, 0xab, + 0x18, 0x81, 0x72, 0x02, 0xa9, 0xbe, 0x06, 0x40, 0x2e, 0xd9, 0xcc, 0x72, 0x0c, 0x16, + 0xbf, 0xe8, 0x81, 0xe4, 0xdf, 0x42, 0x55, 0xe8, 0x7a, 0xfb, 0x7f, 0xc6, 0x2f, 0x38, + 0x11, 0x6b, 0xbe, 0x03, 0xcd, 0x8a, 0x3c, 0xb1, 0x1a, 0x27, 0xd5, 0x68, 0x41, 0x47, + 0x82, 0xf4, 0x7b, 0x1a, 0x44, 0xc9, 0x7c, 0x68, 0x04, 0x67, 0x69, 0x4b, 0xc9, 0x70, + 0x9d, 0x32, 0x91, 0x6c, 0x97, 0xe8, 0x00, 0x6c, 0xbb, 0x07, 0xba, 0x0e, 0x41, 0x80, + 0xa3, 0x73, 0x80, 0x38, 0xc3, 0x74, 0xc4, 0xcc, 0xe8, 0xf3, 0x29, 0x59, 0xaf, 0xb2, + 0x5f, 0x30, 0x3f, 0x58, 0x15, 0xc4, 0x53, 0x31, 0x24, 0xac, 0xf9, 0xd1, 0x89, 0x40, + 0xe7, 0x75, 0x22, 0xac, 0x5d, 0xc4, 0xb9, 0x57, 0x0a, 0xae, 0x8f, 0x47, 0xb7, 0xf5, + 0x7f, 0xd8, 0x76, 0x7b, 0xea, 0x1a, 0x24, 0xae, 0x7b, 0xed, 0x65, 0xb4, 0xaf, 0xdc, + 0x8f, 0x12, 0x78, 0xc3, 0x0e, 0x2d, 0xb9, 0x8f, 0xd1, 0x72, 0x73, 0x0a, 0xc6, 0xbb, + 0xed, 0x4f, 0x11, 0x27, 0xcd, 0x32, 0xb0, 0x4a, 0x95, 0xb2, 0x05, 0x52, 0x6c, 0xfc, + 0xb4, 0xc4, 0xe1, 0xcc, 0x95, 0x51, 0x75, 0xb3, 0xe8, 0xde, 0x1f, 0x5d, 0x81, 0xb1, + 0x86, 0x69, 0x69, 0x23, 0x50, 0xaa, 0xa1, 0xa1, 0xd7, 0x97, 0x61, 0x75, 0x82, 0xe5, + 0x4d, 0x7a, 0x5b, 0x57, 0xa6, 0x83, 0xb3, 0x2f, 0xb1, 0x09, 0x80, 0x62, 0xda, 0xd7, + 0xb0, 0xc2, 0xeb, 0x51, 0x8f, 0x68, 0x62, 0xe8, 0x3d, 0xb2, 0x5e, 0x3d, 0xba, 0xf7, + 0xae, 0xd5, 0x04, 0xde, 0x93, 0x2a, 0xcb, 0x99, 0xd7, 0x35, 0x99, 0x2c, 0xe6, 0x2b, + 0xae, 0x9e, 0xf8, 0x93, 0xff, 0x6a, 0xcc, 0x0f, 0xfc, 0xf8, 0xe3, 0x48, 0x3e, 0x14, + 0x6b, 0x9d, 0x49, 0xdd, 0x8c, 0x78, 0x35, 0xf4, 0x3a, 0x37, 0xdc, 0xa0, 0x78, 0x7e, + 0x3e, 0xc9, 0xf6, 0x60, 0x52, 0x23, 0xd5, 0xba, 0x7a, 0xe0, 0xab, 0x90, 0x25, 0xb7, + 0x3b, 0xc0, 0x3f, 0x7f, 0xac, 0x36, 0xc0, 0x09, 0xa5, 0x6d, 0x4d, 0x95, 0xd1, 0xe8, + 0x1d, 0x3b, 0x3e, 0xbc, 0xa7, 0xe5, 0x4c, 0xc1, 0xa1, 0x2d, 0x12, 0x7b, 0x57, 0xc8, + 0x13, 0x89, 0x76, 0xe7, 0x91, 0x01, 0x3b, 0x01, 0x5f, 0x06, 0xa6, 0x24, 0xf5, 0x21, + 0xb6, 0xee, 0x04, 0xec, 0x98, 0x08, 0x93, 0xc7, 0xe5, 0xe0, 0x1a, 0x33, 0x62, 0x03, + 0x59, 0x40, 0x94, 0xf8, 0x28, 0x33, 0xd7, 0x44, 0x5f, 0xe2, 0xd0, 0x91, 0x30, 0xf6, + 0x35, 0x11, 0xda, 0x54, 0x83, 0x2d, 0xe9, 0x13, 0x6b, 0x39, 0xf4, 0x59, 0x9f, 0x5a, + 0xa5, 0xdf, 0xbb, 0x45, 0xda, 0x60, 0xcd, 0xce, 0xab, 0x7e, 0xef, 0xde, 0x89, 0xbe, + 0x63, 0xf3, 0xf7, 0xc0, 0xd2, 0x32, 0x48, 0x47, 0xcc, 0xe1, 0x40, 0x5d, 0xef, 0x7c, + 0x46, 0x9b, 0x0e, 0x27, 0x24, 0x94, 0xe5, 0xdf, 0x54, 0xf5, 0x68, 0x65, 0x6c, 0xb9, + 0xc8, 0x81, 0x8d, 0x92, 0xb7, 0x2b, 0x8b, 0xc3, 0x4d, 0xb7, 0xbb, 0x31, 0x12, 0x48, + 0x7e, 0x74, 0x6e, 0xef, 0xe4, 0xe8, 0x08, 0xbb, 0xb2, 0x87, 0xd9, 0x9b, 0xf0, 0x7d, + 0x00, 0xda, 0xbe, 0xde, 0xdc, 0x5e, 0x5f, 0x07, 0x4f, 0xfe, 0xae, 0x0c, 0xba, 0x7d, + 0xa3, 0xa5, 0x16, 0xc1, 0x73, 0xbe, 0x1c, 0x51, 0x33, 0x23, 0xe1, 0x19, 0xf6, 0x35, + 0xe8, 0x20, 0x9a, 0x07, 0x4b, 0x21, 0x6b, 0x70, 0x23, 0xfa, 0xdc, 0x2d, 0x25, 0x94, + 0x9c, 0x90, 0x03, 0x7e, 0x71, 0xe3, 0xe5, 0x50, 0x72, 0x6d, 0x21, 0x0a, 0x2c, 0x68, + 0x83, 0x42, 0xe5, 0x24, 0x40, 0x63, 0x5e, 0x9c, 0xc1, 0x4a, 0xfe, 0x10, 0x10, 0x26, + 0x21, 0xa9, 0xc9, 0xac, 0xcb, 0x78, 0x2e, 0x9e, 0x4a, 0x5f, 0xa8, 0x7f, 0x0a, 0x95, + 0x6f, 0x5b, 0x85, 0x50, 0x99, 0x60, 0x28, 0x5c, 0x22, 0x62, 0x7c, 0x59, 0x48, 0x3a, + 0x5a, 0x4c, 0x28, 0xcc, 0xe4, 0xb1, 0x56, 0xe5, 0x51, 0x40, 0x6a, 0x7e, 0xe8, 0x35, + 0x56, 0x56, 0xa2, 0x1e, 0x43, 0xe3, 0x8c, 0xe1, 0x29, 0xfd, 0xad, 0xb7, 0x59, 0xed, + 0xdf, 0xa0, 0x8f, 0x00, 0xfc, 0x8e, 0x56, 0x7c, 0xef, 0x93, 0xc6, 0x79, 0x2d, 0x01, + 0xdf, 0x05, 0xe6, 0xd5, 0x80, 0xf4, 0xd5, 0xd4, 0x8d, 0xf0, 0x42, 0x45, 0x1a, 0x33, + 0x59, 0x0d, 0x3e, 0x8c, 0xf4, 0x9b, 0x26, 0x27, 0x21, 0x8f, 0x0c, 0x29, 0x2f, 0xa6, + 0x6a, 0xda, 0x94, 0x5f, 0xa5, 0x5b, 0xb2, 0x35, 0x48, 0xe3, 0x3a, 0x83, 0xa5, 0x62, + 0x95, 0x7a, 0x31, 0x49, 0xa9, 0x93, 0xcc, 0x47, 0x23, 0x62, 0x29, 0x87, 0x36, 0xa8, + 0xb7, 0x78, 0xd9, 0x7c, 0xe4, 0x23, 0x01, 0x3d, 0x64, 0xb3, 0x2c, 0xd1, 0x72, 0xef, + 0xa5, 0x51, 0xbf, 0x7f, 0x36, 0x8f, 0x04, 0xbd, 0xae, 0xc6, 0x09, 0x1a, 0x30, 0x04, + 0xa7, 0x57, 0x59, 0x8b, 0x80, 0x1d, 0xcf, 0x67, 0x5c, 0xb8, 0x3e, 0x43, 0xa5, 0x3a, + 0xe8, 0xb2, 0x54, 0xd3, 0x33, 0xbc, 0xda, 0x20, 0xd4, 0x81, 0x7d, 0x34, 0x77, 0xab, + 0xfb, 0xa2, 0x5b, 0xb8, 0x3d, 0xf5, 0x94, 0x9c, 0x12, 0x6f, 0x14, 0x9b, 0x1d, 0x99, + 0x34, 0x1e, 0x4e, 0x6f, 0x91, 0x20, 0xf4, 0xd4, 0x1e, 0x62, 0x91, 0x85, 0x00, 0x2c, + 0x72, 0xc0, 0x12, 0xc4, 0x14, 0xd2, 0x38, 0x2a, 0x6d, 0x47, 0xc7, 0xb3, 0xde, 0xab, + 0xa7, 0x70, 0xc4, 0x00, 0xca, 0x96, 0xb2, 0x81, 0x4f, 0x6b, 0x26, 0xc3, 0xef, 0x17, + 0x42, 0x9f, 0x1a, 0x98, 0xc8, 0x5d, 0x83, 0xdb, 0x20, 0xef, 0xad, 0x48, 0xbe, 0x89, + 0x96, 0xfb, 0x1b, 0xff, 0x59, 0x1e, 0xff, 0xf3, 0x60, 0xfe, 0x11, 0x99, 0x05, 0x6c, + 0x56, 0xe5, 0xfe, 0xec, 0x61, 0xa7, 0xb8, 0xb9, 0xf6, 0x99, 0xd6, 0x01, 0x2c, 0x28, + 0x49, 0x23, 0x2f, 0x32, 0x9f, 0xef, 0x95, 0xc7, 0xaf, 0x37, 0x00, 0x98, 0xff, 0xe4, + 0x91, 0x8e, 0x0c, 0xa1, 0xdf, 0x47, 0xf2, 0x75, 0x86, 0x7b, 0x73, 0x9e, 0x0a, 0x51, + 0x4d, 0x32, 0x09, 0x32, 0x5e, 0x21, 0x70, 0x45, 0x92, 0x7b, 0x47, 0x9c, 0x1c, 0xe2, + 0xe5, 0xd5, 0x4f, 0x25, 0x48, 0x8c, 0xad, 0x15, 0x13, 0xe3, 0xf4, 0x4a, 0x21, 0x26, + 0x6c, 0xfd, 0x84, 0x16, 0x33, 0x32, 0x7d, 0xee, 0x6c, 0xf8, 0x10, 0xfb, 0xf7, 0x39, + 0x3e, 0x31, 0x7d, 0x9e, 0x53, 0xd1, 0xbe, 0x1d, 0x5a, 0xe7, 0x83, 0x9b, 0x66, 0xb9, + 0x43, 0xb9, 0xed, 0x18, 0xf2, 0xc5, 0x30, 0xe9, 0x75, 0x42, 0x23, 0x32, 0xc3, 0x43, + 0x9c, 0xce, 0x49, 0xa2, 0x9f, 0x2a, 0x33, 0x6a, 0x48, 0x51, 0x26, 0x3c, 0x5e, 0x9b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd1, 0x3d, 0x73, 0x11, 0x09, 0xe8, 0x44, 0xb7, 0xf8, 0xc3, 0x92, 0xa5, + 0xc1, 0xdc, 0xaa, 0x2a, 0xe5, 0xf5, 0x0f, 0xf6, 0x3f, 0xab, 0x97, 0x65, 0xe0, 0x16, + 0x70, 0x2c, 0x35, 0xa6, 0x7c, 0xd7, 0x36, 0x4d, 0x3f, 0xab, 0x55, 0x2f, 0xb3, 0x49, + 0xe3, 0x5c, 0x15, 0xc5, 0x02, 0x50, 0x45, 0x3f, 0xd1, 0x8f, 0x7b, 0x85, 0x59, 0x92, + 0x63, 0x2e, 0x2c, 0x76, 0xc0, 0xfb, 0xf1, 0xef, 0x96, 0x3e, 0xa8, 0x0e, 0x32, 0x23, + 0xde, 0x32, 0x77, 0xbc, 0x55, 0x92, 0x51, 0x72, 0x58, 0x29, 0xec, 0x03, 0xf2, 0x13, + 0xba, 0x89, 0x55, 0xca, 0xb2, 0x82, 0x2f, 0xf2, 0x1a, 0x9b, 0x0a, 0x49, 0x04, 0xd6, + 0x68, 0xfc, 0xd7, 0x72, 0x24, 0xbd, 0xe3, 0xdd, 0x01, 0xf6, 0xff, 0xc4, 0x82, 0x8f, + 0x6b, 0x64, 0x23, 0x0b, 0x35, 0xc6, 0xa0, 0x49, 0x87, 0x34, 0x94, 0x27, 0x6e, 0xa1, + 0xd7, 0xed, 0x5e, 0x92, 0xcb, 0x4f, 0x90, 0xba, 0x83, 0xa9, 0xe4, 0x96, 0x01, 0xb1, + 0x94, 0x04, 0x2f, 0x29, 0x00, 0xd9, 0x9d, 0x31, 0x2d, 0x7b, 0x70, 0x50, 0x8c, 0xf1, + 0x76, 0x06, 0x6d, 0x15, 0x4d, 0xbe, 0x96, 0xef, 0x9d, 0x43, 0x67, 0xe4, 0xc8, 0x40, + 0xe4, 0xa1, 0x7b, 0x5e, 0x51, 0x22, 0xe8, 0xeb, 0xe2, 0x15, 0x8a, 0x3c, 0x5f, 0x4c, + 0xba, 0xe2, 0x1e, 0xa3, 0xfa, 0x1a, 0xe6, 0xc2, 0x5a, 0x94, 0x62, 0xeb, 0xcb, 0xb0, + 0xfd, 0x5f, 0x14, 0x55, 0x4b, 0xc9, 0x77, 0x47, 0xc3, 0x3e, 0x34, 0xda, 0x90, 0xc8, + 0x16, 0xd8, 0xd0, 0xd5, 0x0b, 0xfe, 0x37, 0x61, 0x8c, 0x58, 0x12, 0x89, 0x14, 0x84, + 0xfa, 0x25, 0x93, 0x22, 0xc1, 0x50, 0x92, 0xd4, 0x15, 0x5d, 0x86, 0x96, 0xd6, 0xf1, + 0x2f, 0x24, 0xfd, 0x36, 0x44, 0x96, 0xb3, 0xbe, 0x08, 0x71, 0xca, 0x3d, 0xd9, 0x62, + 0x53, 0x48, 0xa6, 0x14, 0xb5, 0x9b, 0xde, 0x45, 0x88, 0x56, 0x49, 0xba, 0xe3, 0x6d, + 0xe3, 0x4d, 0xef, 0x8f, 0xce, 0xc8, 0x53, 0x43, 0x47, 0x5d, 0x97, 0x6a, 0xe1, 0xe9, + 0xb2, 0x78, 0x29, 0xce, 0x2a, 0xc5, 0xef, 0xd0, 0xb3, 0x99, 0x02, 0xb4, 0x48, 0xbe, + 0x65, 0x04, 0x29, 0x4e, 0xe6, 0xb3, 0xc1, 0xc6, 0xa5, 0x34, 0x2d, 0x7c, 0x01, 0xae, + 0x9d, 0x8a, 0xd3, 0x07, 0x0c, 0x2b, 0x1a, 0x91, 0x57, 0x3a, 0xf5, 0xe0, 0xc5, 0xe4, + 0xcb, 0x03, 0x4a, 0xcd, 0xc6, 0xb5, 0x4c, 0x92, 0x72, 0x20, 0x0d, 0x99, 0x70, 0x25, + 0x0c, 0x17, 0xc1, 0x03, 0x6f, 0x06, 0x08, 0x5c, 0x41, 0x85, 0x8e, 0xd3, 0xa0, 0xc4, + 0x81, 0x50, 0xbc, 0x69, 0x7e, 0x4a, 0x0b, 0x5f, 0xef, 0x33, 0x5f, 0x7a, 0xd0, 0x7e, + 0x1a, 0x46, 0xdc, 0x76, 0x7f, 0xf8, 0x22, 0xdb, 0x70, 0xe6, 0x66, 0x90, 0x80, 0xb9, + 0x81, 0x6b, 0x22, 0x32, 0xc8, 0x1a, 0x4c, 0x66, 0xcc, 0x58, 0x6a, 0xbf, 0xe1, 0xea, + 0xa8, 0xca, 0x6c, 0xf4, 0x1f, 0xc3, 0xc3, 0xe6, 0xc7, 0xb8, 0x86, 0xfb, 0x6d, 0xac, + 0x9f, 0x48, 0x22, 0xb4, 0xfc, 0x6f, 0xff, 0x9d, 0x05, 0x13, 0xd6, 0x1a, 0x21, 0xc8, + 0x0a, 0x03, 0x76, 0x71, 0xd1, 0x35, 0xa6, 0x68, 0xa0, 0xae, 0x2b, 0xb9, 0x34, 0xc8, + 0x2c, 0x41, 0x42, 0xda, 0x69, 0xd1, 0x2c, 0xa7, 0xde, 0x9a, 0x7d, 0xf7, 0x06, 0x40, + 0x0e, 0xc7, 0x98, 0x78, 0xd8, 0x68, 0x03, 0x7e, 0x8f, 0x71, 0xea, 0x31, 0x49, 0x5a, + 0xf8, 0x19, 0xa0, 0x16, 0xcc, 0x41, 0x9e, 0x07, 0xc5, 0x01, 0xaa, 0x83, 0x09, 0xb2, + 0xe6, 0xc8, 0x5b, 0x79, 0xb2, 0x76, 0x37, 0x33, 0xa3, 0x7b, 0xbc, 0x02, 0x20, 0xd4, + 0x25, 0x37, 0xb8, 0x71, 0xb4, 0x29, 0x4a, 0x65, 0xd3, 0xe0, 0x55, 0xff, 0x71, 0x8d, + 0xd9, 0xdc, 0x8c, 0x75, 0xe7, 0xe5, 0xb2, 0xef, 0xe4, 0x42, 0x63, 0x73, 0x71, 0xb7, + 0xc4, 0x8f, 0x02, 0xe9, 0x9e, 0x3e, 0xa3, 0x8a, 0x4b, 0x0f, 0x2f, 0x67, 0xfc, 0x2b, + 0x90, 0x8c, 0xda, 0x65, 0x7e, 0xae, 0x75, 0x4e, 0x03, 0x7e, 0x26, 0x2e, 0x9a, 0x9f, + 0x9b, 0xd7, 0xec, 0x42, 0x67, 0xed, 0x8e, 0x02, 0x93, 0x0e, 0x10, 0x84, 0x78, 0x3c, + 0x37, 0xd6, 0xf9, 0xdd, 0x15, 0xfd, 0x29, 0xf4, 0xcc, 0x47, 0x7e, 0x66, 0xf1, 0x30, + 0xd6, 0x30, 0x43, 0x0d, 0xcc, 0x01, 0x04, 0x89, 0x9b, 0x4f, 0x9f, 0x46, 0xeb, 0x09, + 0x0e, 0xf7, 0xfc, 0x90, 0xb4, 0x79, 0xab, 0xf6, 0x1f, 0x93, 0x95, 0x5e, 0xe0, 0x0e, + 0x6a, 0x18, 0x48, 0xf1, 0xab, 0x14, 0xad, 0x33, 0x4f, 0x2b, 0x68, 0x03, 0x58, 0x08, + 0xcd, 0xf1, 0xbb, 0x9e, 0x9d, 0x9a, 0x81, 0x6b, 0xaf, 0x72, 0x8a, 0x95, 0x5b, 0x96, + 0x0b, 0x77, 0x01, 0xfa, 0x62, 0x66, 0x87, 0xdc, 0x3c, 0x9c, 0xba, 0x64, 0x63, 0x37, + 0xb5, 0x3e, 0x29, 0x81, 0x6e, 0x94, 0x82, 0xdd, 0xf5, 0x57, 0x8a, 0x87, 0x68, 0xaa, + 0xe4, 0x77, 0xfc, 0xe4, 0x10, 0xac, 0x2d, 0x5d, 0xe6, 0x09, 0x58, 0x61, 0xc1, 0x11, + 0xd7, 0xfe, 0xb3, 0xe6, 0xbb, 0x4f, 0xbb, 0x5a, 0x54, 0x95, 0x54, 0x95, 0x97, 0x27, + 0x98, 0x35, 0x0a, 0x25, 0x3f, 0x05, 0xf6, 0x6c, 0x2e, 0xcf, 0xcb, 0xc0, 0xed, 0x43, + 0xf5, 0xec, 0x2e, 0x6d, 0x8d, 0xba, 0x15, 0xa5, 0x12, 0x54, 0xd9, 0x7b, 0x18, 0x21, + 0x10, 0x7c, 0x07, 0xdd, 0x9a, 0x16, 0xef, 0x84, 0x06, 0xf9, 0x43, 0xe2, 0x82, 0xb9, + 0x5d, 0x4b, 0x36, 0x25, 0x30, 0xc9, 0x13, 0xd6, 0xba, 0x42, 0x1d, 0xf6, 0x02, 0x7d, + 0xe5, 0xaf, 0x1e, 0x47, 0x45, 0xd5, 0x86, 0x81, 0x06, 0x95, 0x4b, 0xe6, 0xc1, 0x96, + 0x27, 0x80, 0xa2, 0x94, 0x10, 0x72, 0xe9, 0x51, 0x31, 0xb1, 0x67, 0x9d, 0xf0, 0x63, + 0x76, 0x25, 0x04, 0x2c, 0x37, 0xd4, 0x8f, 0xfb, 0x15, 0x2e, 0x5e, 0xbc, 0x18, 0x5c, + 0x8a, 0x2b, 0x7d, 0x43, 0x85, 0xf1, 0xc9, 0x5a, 0xf9, 0x37, 0xdf, 0x78, 0xdf, 0xd8, + 0x75, 0x7f, 0xab, 0x43, 0x49, 0x68, 0xb0, 0xb5, 0x7c, 0x66, 0x57, 0x44, 0x68, 0xf1, + 0x60, 0xb4, 0x47, 0xac, 0x82, 0x21, 0xe5, 0x06, 0x06, 0x76, 0xa8, 0x42, 0xa1, 0xc6, + 0xb7, 0x17, 0x2d, 0xd3, 0x34, 0x0f, 0x76, 0x40, 0x70, 0xab, 0x1f, 0xe0, 0x91, 0xc5, + 0xc7, 0x4c, 0x95, 0xa5, 0xdc, 0x04, 0x33, 0x90, 0x72, 0x3a, 0x4c, 0x12, 0x7d, 0xa1, + 0x4c, 0xdd, 0xe1, 0xdc, 0x26, 0x75, 0xa6, 0x23, 0x40, 0xb3, 0xe6, 0xaf, 0xd0, 0x52, + 0x2a, 0x31, 0xde, 0x26, 0xe7, 0xd1, 0xec, 0x3a, 0x9c, 0x8a, 0x09, 0x1f, 0xfd, 0xc7, + 0x5b, 0x7e, 0xcf, 0xdc, 0x7c, 0x12, 0x99, 0x5a, 0x5e, 0x37, 0xce, 0x34, 0x88, 0xbd, + 0x29, 0xf8, 0x62, 0x9d, 0x68, 0xf6, 0x96, 0x49, 0x24, 0x48, 0xdd, 0x52, 0x66, 0x97, + 0x47, 0x6d, 0xc0, 0x61, 0x34, 0x6e, 0xbe, 0x3f, 0x67, 0x72, 0x17, 0xff, 0x9c, 0x60, + 0xef, 0xce, 0x94, 0x3a, 0xf2, 0x8d, 0xfd, 0x3f, 0x9e, 0x59, 0x69, 0x25, 0x98, 0xa6, + 0x04, 0x7c, 0x23, 0xc4, 0xc0, 0x14, 0x00, 0xf1, 0xab, 0x57, 0x30, 0xea, 0xc0, 0xae, + 0x8d, 0x58, 0x43, 0xd5, 0x05, 0x1c, 0x37, 0x62, 0x40, 0x17, 0x2a, 0xf2, 0x18, 0xd7, + 0xa1, 0xec, 0xfe, 0x65, 0xb4, 0xf7, 0x51, 0x00, 0x63, 0x89, 0x83, 0xc1, 0x4d, 0xe4, + 0x97, 0x47, 0x55, 0xda, 0xde, 0x80, 0x18, 0xc9, 0xb8, 0xf4, 0x54, 0x3f, 0xb0, 0x95, + 0x96, 0x15, 0x13, 0xe6, 0x7c, 0x61, 0xdb, 0xc5, 0x9c, 0x60, 0x7f, 0x9b, 0x51, 0xf8, + 0xd0, 0x9b, 0xdc, 0xad, 0x28, 0xbc, 0xfb, 0x9e, 0x5d, 0x27, 0x44, 0xea, 0x88, 0x48, + 0xb2, 0x62, 0x3a, 0xc0, 0x7f, 0x8e, 0xf6, 0x1a, 0x81, 0xa3, 0x59, 0x10, 0xb8, 0xa1, + 0xba, 0xf3, 0x9a, 0x91, 0x9a, 0x7b, 0x60, 0xbc, 0x60, 0x4d, 0x63, 0x18, 0x5f, 0x75, + 0x92, 0x21, 0xd8, 0x47, 0xcc, 0x54, 0xa2, 0x27, 0x65, 0xa4, 0xc3, 0x34, 0x75, 0xb5, + 0x79, 0x1e, 0x9a, 0xf3, 0x27, 0x1f, 0xc8, 0xd9, 0x35, 0x06, 0x67, 0x09, 0x0d, 0x81, + 0x84, 0xec, 0x50, 0x52, 0x2d, 0x80, 0x4f, 0x23, 0xc4, 0xfb, 0x44, 0xff, 0xa4, 0x81, + 0xbc, 0x92, 0xae, 0x40, 0x8d, 0x1b, 0x9f, 0x2b, 0x13, 0x19, 0x04, 0xf9, 0x70, 0x5c, + 0x59, 0xe2, 0xf4, 0xbd, 0xe7, 0xa3, 0xb2, 0xc0, 0x85, 0xd9, 0x3f, 0xd2, 0xab, 0xc5, + 0xe1, 0x4d, 0x16, 0x30, 0x01, 0xa1, 0x2f, 0x51, 0x93, 0x8d, 0x02, 0x1a, 0xfa, 0x92, + 0x23, 0x9b, 0x87, 0x3d, 0xc6, 0xc3, 0x57, 0xea, 0xa8, 0xaf, 0x4e, 0xe6, 0xd0, 0x05, + 0x40, 0x65, 0x7f, 0xe3, 0x29, 0x14, 0x10, 0x3b, 0x5d, 0x98, 0xf6, 0x8b, 0xd3, 0xe2, + 0xb5, 0x35, 0x9f, 0x08, 0xcc, 0xd8, 0x8d, 0x0c, 0x81, 0x1e, 0x4c, 0x31, 0xfb, 0xb4, + 0x9f, 0x3a, 0x90, 0xbb, 0xd0, 0x5d, 0xce, 0x62, 0xf3, 0x44, 0xe7, 0x07, 0x75, 0x93, + 0x15, 0x9a, 0xe3, 0x50, 0x50, 0xb0, 0x4c, 0x9e, 0x6b, 0x86, 0xbc, 0x43, 0x2d, 0xc8, + 0xb0, 0x48, 0xc7, 0x3c, 0x00, 0x18, 0xca, 0x5b, 0x69, 0x41, 0x12, 0x97, 0x73, 0x2a, + 0x4e, 0x1a, 0xa9, 0x9a, 0x92, 0x8c, 0x71, 0xe7, 0xa2, 0x4f, 0xd2, 0x77, 0x85, 0x6a, + 0xa4, 0x25, 0x01, 0xe5, 0x1b, 0x01, 0x2a, 0xea, 0x94, 0x46, 0xa2, 0x10, 0x4e, 0x93, + 0xf8, 0x15, 0xa0, 0xb3, 0xa2, 0x9b, 0x45, 0x83, 0x14, 0xf3, 0xd8, 0xbe, 0x2b, 0x98, + 0x23, 0xd3, 0x42, 0xf4, 0x62, 0x13, 0xe9, 0x42, 0xa7, 0xe1, 0x9a, 0x46, 0xe9, 0x70, + 0xb5, 0xc5, 0x06, 0x70, 0x84, 0x30, 0x31, 0x7b, 0x1b, 0xb3, 0xb3, 0x5d, 0xf6, 0x8a, + 0xe3, 0x3a, 0x49, 0x26, 0xa0, 0x3e, 0x6b, 0xfe, 0xb5, 0x51, 0x04, 0x16, 0xfc, 0xbb, + 0x05, 0x24, 0xc9, 0xca, 0x50, 0x74, 0x15, 0x6c, 0xc5, 0xa5, 0xd6, 0xfe, 0x1c, 0x99, + 0x5e, 0xdc, 0x60, 0xa2, 0xf5, 0x50, 0x41, 0x1a, 0xa4, 0x1e, 0x3d, 0xa3, 0xbd, 0xcf, + 0x64, 0xbc, 0xf0, 0x4a, 0x05, 0x10, 0x57, 0x1b, 0x93, 0x6d, 0x47, 0xe5, 0x5c, 0xec, + 0x03, 0x30, 0xee, 0x8d, 0xfe, 0x73, 0x56, 0x34, 0x04, 0xf0, 0x47, 0xd7, 0xf3, 0xa8, + 0xa3, 0xd7, 0x74, 0x3b, 0xc5, 0x54, 0x95, 0x52, 0x10, 0xf1, 0xeb, 0x0d, 0x08, 0x59, + 0x9e, 0xa7, 0x7d, 0x5f, 0x97, 0x4d, 0x87, 0x17, 0x6d, 0x37, 0xd9, 0x8b, 0x9c, 0x0a, + 0xd4, 0x40, 0x40, 0x72, 0x09, 0xed, 0x6a, 0x9f, 0x08, 0x46, 0x4d, 0x56, 0x55, 0x93, + 0xe1, 0xa6, 0x3b, 0x93, 0x85, 0x36, 0xb4, 0x92, 0x44, 0xe9, 0x7d, 0x88, 0x01, 0x73, + 0xb6, 0x40, 0xf2, 0xdd, 0xb7, 0x4d, 0x06, 0x8e, 0xcb, 0x46, 0xcf, 0x28, 0x9b, 0x7d, + 0x89, 0x13, 0x07, 0xbb, 0xa3, 0x70, 0x54, 0xcf, 0x91, 0xb3, 0x1f, 0xc8, 0x2f, 0x74, + 0xd5, 0xfc, 0xc0, 0x00, 0x94, 0x2e, 0xde, 0x91, 0x18, 0x25, 0xf5, 0x3f, 0xe6, 0x09, + 0x68, 0x6f, 0x46, 0x32, 0x23, 0xb1, 0xe9, 0xbc, 0x03, 0xbd, 0xe8, 0x95, 0xd1, 0x23, + 0x8f, 0xad, 0x04, 0xa3, 0xbf, 0xce, 0x68, 0xa0, 0x75, 0xe8, 0xa3, 0x7c, 0x0e, 0x87, + 0xbf, 0x46, 0xdd, 0x01, 0x55, 0x45, 0xf9, 0xb4, 0xfb, 0x0e, 0xec, 0x64, 0x5f, 0xfc, + 0xbb, 0xe0, 0xca, 0x5f, 0x8c, 0x56, 0x1b, 0x25, 0x7d, 0x52, 0xd6, 0x02, 0xd8, 0xc9, + 0x4c, 0x50, 0x28, 0x73, 0xa0, 0x1d, 0x92, 0x51, 0xd8, 0xc8, 0x60, 0xc0, 0x41, 0x52, + 0x5b, 0x3b, 0xf4, 0xe3, 0xa2, 0xeb, 0x92, 0x72, 0x81, 0x5c, 0x75, 0x86, 0x76, 0x84, + 0x28, 0xb4, 0xc2, 0xb2, 0x5e, 0x37, 0x45, 0xf0, 0x09, 0xc5, 0xdc, 0xe2, 0x0b, 0x69, + 0xd5, 0xd7, 0xc4, 0x3c, 0xeb, 0x73, 0x6b, 0x68, 0x31, 0xe8, 0xc1, 0x10, 0xf1, 0x6c, + 0xfd, 0xb3, 0xa4, 0x67, 0xe9, 0x41, 0x4c, 0x00, 0xec, 0xf1, 0x37, 0x31, 0x50, 0x08, + 0x94, 0x55, 0x56, 0x78, 0xc4, 0x97, 0xfa, 0xba, 0x9a, 0x95, 0xd0, 0x1c, 0xc4, 0x64, + 0x39, 0x0f, 0xc4, 0xa7, 0x6b, 0xfa, 0x8b, 0x0e, 0x1c, 0x68, 0xa5, 0x25, 0xd7, 0x06, + 0xd6, 0x60, 0x4b, 0x23, 0x30, 0xb6, 0xb3, 0x48, 0x52, 0x15, 0xf6, 0x06, 0xf1, 0x88, + 0x3a, 0x75, 0x15, 0x88, 0xc7, 0xef, 0xa5, 0x06, 0xc3, 0xe8, 0xd0, 0xc6, 0x01, 0x92, + 0xe8, 0x47, 0x6b, 0xd1, 0x17, 0x5d, 0x95, 0x62, 0x08, 0x7b, 0xdb, 0x81, 0x8e, 0x66, + 0x21, 0x62, 0x86, 0xba, 0xfe, 0x47, 0xff, 0x4d, 0xbc, 0xce, 0xd5, 0x14, 0x44, 0x48, + 0x0a, 0x9a, 0x56, 0x73, 0xec, 0xe7, 0xfa, 0xc7, 0x3a, 0x0e, 0xd4, 0x1a, 0xb0, 0x05, + 0x17, 0x53, 0xa7, 0xca, 0xa8, 0x9b, 0xe3, 0x13, 0x9a, 0xfd, 0x97, 0x93, 0xb3, 0xe0, + 0x2f, 0x27, 0xf0, 0x40, 0x04, 0x65, 0x95, 0xac, 0xd4, 0x7b, 0xf1, 0x3f, 0xd0, 0xda, + 0x27, 0xf0, 0x9e, 0xda, 0x48, 0x03, 0x6d, 0x3e, 0xe4, 0x37, 0xf2, 0xee, 0x8f, 0x86, + 0x06, 0xea, 0x97, 0x34, 0x3c, 0x33, 0x58, 0x46, 0x57, 0xf4, 0x6d, 0xba, 0x99, 0xdb, + 0x5c, 0xfe, 0x6c, 0xa1, 0x76, 0xfa, 0xb7, 0xb0, 0xf3, 0xbf, 0xa0, 0xab, 0x61, 0xe3, + 0x40, 0xc3, 0x4e, 0xb9, 0xf1, 0x7c, 0x7e, 0xc2, 0xbe, 0x03, 0xb1, 0x80, 0xf0, 0xbb, + 0x6f, 0x43, 0x4c, 0x2a, 0x65, 0x42, 0xe0, 0x0e, 0x84, 0x37, 0x3f, 0x4f, 0x46, 0x49, + 0xcd, 0xa3, 0x2b, 0xf6, 0x86, 0x66, 0x61, 0x43, 0xf6, 0x22, 0xaa, 0x48, 0x04, 0x60, + 0xb5, 0xaf, 0xac, 0x51, 0x86, 0x07, 0xcd, 0x9a, 0xf8, 0xbc, 0xd6, 0xb5, 0x8c, 0x30, + 0x12, 0x73, 0x16, 0xb2, 0x5d, 0x5e, 0xa7, 0xbf, 0x6b, 0x0c, 0xab, 0x85, 0x42, 0xff, + 0x69, 0xd9, 0xb2, 0xf1, 0x80, 0xbe, 0x12, 0xed, 0x75, 0x34, 0x4a, 0x39, 0x5a, 0xa1, + 0x0f, 0x85, 0x2f, 0x08, 0x3a, 0xd6, 0x4e, 0xf4, 0x0e, 0x9c, 0x03, 0x09, 0xe9, 0xbb, + 0xa5, 0x4b, 0x8c, 0xb3, 0x3c, 0x95, 0x49, 0x8a, 0x69, 0x53, 0x8d, 0x3a, 0xe5, 0xb2, + 0x5e, 0x24, 0x70, 0x98, 0x30, 0x6f, 0xa8, 0xc7, 0x4a, 0x8e, 0xe5, 0xbc, 0xa9, 0x41, + 0x53, 0x1d, 0x61, 0xaa, 0xc2, 0x7a, 0xab, 0x3d + ], + }, + TestVector { + description: "Overwinter transaction #1", + version: 3, + lock_time: 3220387318, + expiry_height: 206841142, + txid: [ + 0xd3, 0x81, 0x57, 0x69, 0x97, 0x6a, 0x20, 0x43, 0xc4, 0xb0, 0xad, 0xa2, 0xfb, 0x12, + 0xdd, 0xa2, 0x55, 0xcb, 0x4d, 0x31, 0xfa, 0x10, 0xc4, 0xa7, 0x11, 0x0f, 0xa6, 0x35, + 0xde, 0x43, 0xcf, 0x39 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 1, + tx: vec![ + 0x03, 0x00, 0x00, 0x80, 0x70, 0x82, 0xc4, 0x03, 0x01, 0x61, 0x7d, 0x56, 0x06, 0xc9, + 0x57, 0x7a, 0x2a, 0x83, 0x46, 0xe8, 0xd8, 0x5b, 0x32, 0xb8, 0x50, 0x57, 0x75, 0x10, + 0x8d, 0xc8, 0x5e, 0x2a, 0xde, 0x2e, 0xac, 0x1e, 0x63, 0x6e, 0x1a, 0xf4, 0x05, 0x4c, + 0x8b, 0x6f, 0x57, 0x09, 0x63, 0x52, 0x51, 0x53, 0x52, 0x53, 0x52, 0x00, 0x52, 0xe4, + 0xc5, 0x7b, 0x21, 0x01, 0x94, 0xbc, 0x31, 0xcb, 0x19, 0x54, 0x06, 0x00, 0x03, 0x65, + 0x6a, 0x63, 0xf6, 0x35, 0xf3, 0xbf, 0x36, 0x25, 0x54, 0x0c, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, + 0xe1, 0x44, 0x7a, 0xf5, 0x08, 0x78, 0x72, 0xd6, 0x36, 0xe2, 0x75, 0x18, 0xa9, 0x87, + 0x6e, 0x15, 0xeb, 0x01, 0xf5, 0xe8, 0xde, 0xd8, 0x18, 0x92, 0x51, 0x1c, 0xc2, 0x85, + 0x1b, 0x00, 0xb8, 0x32, 0x71, 0x2a, 0x6d, 0x3b, 0xa5, 0x66, 0x65, 0x17, 0xbc, 0xd3, + 0x56, 0x76, 0x21, 0xa7, 0xcf, 0x84, 0x45, 0x58, 0x96, 0x53, 0x26, 0x20, 0x20, 0xc3, + 0x3b, 0xf7, 0x80, 0x31, 0xb8, 0xee, 0x07, 0x07, 0xde, 0x07, 0x20, 0x68, 0xc1, 0x70, + 0x57, 0x03, 0x27, 0xe6, 0xd9, 0xf5, 0xc6, 0xdd, 0xc3, 0x35, 0x40, 0x2e, 0xfc, 0x54, + 0x88, 0x62, 0xf5, 0xa0, 0x70, 0x94, 0xfd, 0x42, 0x8a, 0x7b, 0xbc, 0x15, 0xd7, 0xb3, + 0x8d, 0x05, 0x36, 0x2c, 0x9c, 0xa9, 0x85, 0xf5, 0x8a, 0x76, 0x64, 0x7d, 0x2b, 0xe4, + 0xc2, 0xcd, 0x6b, 0x3d, 0x17, 0xd6, 0x87, 0x09, 0x71, 0xd7, 0xa0, 0x98, 0xba, 0xf7, + 0x2c, 0x6f, 0x6f, 0x12, 0x14, 0xcf, 0x1f, 0xaa, 0xe4, 0x88, 0xbd, 0x7d, 0xe2, 0x59, + 0xd3, 0x41, 0x5c, 0x2f, 0x0d, 0xde, 0xc7, 0x45, 0x70, 0x04, 0xf3, 0x57, 0x08, 0xd1, + 0xec, 0xcc, 0xcc, 0x0d, 0xf6, 0x5a, 0x04, 0x94, 0x3a, 0xd5, 0xcb, 0xc1, 0x3f, 0x29, + 0x5f, 0x00, 0x0f, 0xe0, 0x56, 0xc4, 0x0b, 0x2d, 0x88, 0xf2, 0x7d, 0xc3, 0x4c, 0xfe, + 0xb8, 0x03, 0xbe, 0x34, 0x83, 0xa9, 0xeb, 0xf9, 0xb5, 0xa9, 0x02, 0x60, 0x57, 0x72, + 0x5d, 0x63, 0xea, 0xd2, 0xc0, 0xc0, 0xff, 0x1f, 0xe2, 0x6a, 0xc1, 0xe7, 0xbd, 0xfc, + 0xd6, 0xfa, 0xd8, 0x75, 0x84, 0x2d, 0x19, 0x4f, 0x33, 0x17, 0x50, 0x46, 0x2c, 0x06, + 0xb8, 0xd7, 0x98, 0x2d, 0x67, 0x99, 0x5e, 0xd5, 0xd3, 0xae, 0x96, 0xa0, 0x5a, 0xe0, + 0x06, 0x7f, 0x4e, 0xb1, 0xc7, 0xc9, 0x32, 0x31, 0xbd, 0x39, 0x77, 0x3c, 0xbe, 0x0a, + 0x9d, 0x66, 0xb0, 0xc9, 0xaa, 0x8c, 0xff, 0x6a, 0x37, 0x6e, 0x1f, 0x37, 0x2e, 0xac, + 0x6a, 0xc4, 0xe4, 0x6c, 0xc0, 0x94, 0x22, 0x45, 0xd4, 0xc2, 0xdc, 0xf0, 0x2d, 0x76, + 0x40, 0xff, 0xcc, 0x5a, 0x6a, 0xc3, 0xa8, 0x03, 0x5c, 0x41, 0x15, 0x51, 0xbc, 0xc2, + 0xf2, 0x6c, 0xb9, 0x49, 0x61, 0xd5, 0x3f, 0x95, 0xdd, 0xb1, 0x9a, 0xe9, 0x30, 0xc8, + 0xd7, 0x0f, 0x03, 0x1b, 0x29, 0xa5, 0xdf, 0x99, 0xff, 0x36, 0x69, 0x5e, 0x02, 0x2c, + 0xbc, 0xb6, 0xb5, 0x8c, 0x1b, 0xa7, 0xed, 0x5e, 0xac, 0xfa, 0x76, 0x41, 0x4a, 0x41, + 0xad, 0x4a, 0x44, 0xf7, 0x1f, 0x1b, 0x58, 0x0d, 0x34, 0xc3, 0xa9, 0x52, 0x92, 0x0b, + 0x25, 0x4a, 0x14, 0x0b, 0xea, 0x51, 0x7f, 0x5b, 0x42, 0xb2, 0xf6, 0x5e, 0xcd, 0x0f, + 0x82, 0x59, 0x54, 0x78, 0xd8, 0x0a, 0xe5, 0xc8, 0xce, 0xea, 0x12, 0xa1, 0x61, 0xcc, + 0xbb, 0x5e, 0xac, 0x09, 0x99, 0x0f, 0xc6, 0x19, 0xa4, 0x60, 0x80, 0x43, 0x6d, 0xbd, + 0x08, 0xd7, 0x47, 0x84, 0xaf, 0x00, 0x2d, 0x58, 0xe0, 0x6f, 0xaf, 0x7f, 0x3c, 0xea, + 0xe7, 0xd3, 0x41, 0x9b, 0x1f, 0xca, 0x26, 0x5a, 0x55, 0x59, 0xcf, 0x9e, 0x03, 0x3b, + 0x60, 0x97, 0x8d, 0x81, 0xa6, 0x78, 0xb9, 0xed, 0x8e, 0x44, 0x86, 0xb4, 0xd1, 0x46, + 0x09, 0xd6, 0xc1, 0x27, 0xc0, 0xc2, 0xfb, 0xff, 0xe3, 0x0a, 0x60, 0xf7, 0xbf, 0xf1, + 0xd9, 0xfb, 0x83, 0x02, 0xed, 0x00, 0x92, 0x53, 0xba, 0x9b, 0x99, 0x6f, 0xa0, 0x52, + 0x41, 0xb1, 0x0f, 0x5a, 0xc9, 0xa8, 0x40, 0x8e, 0x92, 0x5b, 0x62, 0x6b, 0xb2, 0x1a, + 0x47, 0x1f, 0xe3, 0xbe, 0xde, 0x52, 0xbb, 0xa0, 0x03, 0xb2, 0xa9, 0x9a, 0x9b, 0xa5, + 0xa8, 0x66, 0x58, 0xc3, 0xfd, 0x9e, 0xc5, 0x5b, 0xfa, 0x9b, 0x32, 0x85, 0x67, 0x25, + 0x4a, 0xb3, 0x6d, 0x2c, 0x7f, 0x44, 0xd2, 0xc7, 0xe1, 0x3e, 0xb5, 0x4b, 0xeb, 0x02, + 0xea, 0x8f, 0xa9, 0x4b, 0x6c, 0x6e, 0x01, 0x2d, 0x79, 0xe3, 0xf5, 0x36, 0x89, 0xc2, + 0xb1, 0xa1, 0x8e, 0xaf, 0x2d, 0x47, 0x1d, 0x13, 0xc1, 0xab, 0x39, 0xd9, 0x19, 0x4a, + 0xe8, 0x43, 0xab, 0x1d, 0x02, 0xff, 0xa8, 0xf6, 0x9d, 0xc7, 0xe1, 0x5c, 0xc3, 0x8b, + 0x12, 0xe8, 0xfc, 0xd7, 0x92, 0x55, 0xb7, 0x21, 0x60, 0x56, 0xd9, 0xed, 0xb7, 0x48, + 0x2f, 0xb9, 0x8a, 0xa0, 0x33, 0xb6, 0x5e, 0x51, 0xc1, 0xa0, 0x8b, 0x8a, 0x11, 0xd8, + 0x4d, 0x04, 0x09, 0xb7, 0x34, 0xf4, 0x52, 0xaa, 0xf0, 0xd6, 0xb1, 0x8f, 0x50, 0x25, + 0x86, 0x83, 0xd3, 0xf9, 0xa7, 0x6d, 0x39, 0x9f, 0xd0, 0x47, 0xee, 0xe2, 0x88, 0xbb, + 0x45, 0x85, 0x85, 0x1d, 0xc9, 0x3e, 0xcc, 0xc6, 0x23, 0x22, 0x92, 0x4c, 0xd1, 0x3b, + 0x5d, 0xd4, 0xee, 0xd6, 0x6e, 0xd8, 0xd9, 0x97, 0x2d, 0x77, 0x26, 0x29, 0xea, 0x64, + 0x74, 0x2e, 0x54, 0x73, 0x39, 0x81, 0xb0, 0x06, 0xc0, 0x62, 0x46, 0x8e, 0x4b, 0xd8, + 0xf7, 0xdd, 0x9a, 0xf6, 0x98, 0xf5, 0x2a, 0xe8, 0x14, 0x63, 0x4e, 0x81, 0xd7, 0xf3, + 0xe0, 0xc4, 0x20, 0x31, 0x7c, 0xac, 0xa9, 0xae, 0x48, 0x11, 0xc6, 0xaf, 0x06, 0xfe, + 0x80, 0xa8, 0xc0, 0x2a, 0xb7, 0xa0, 0x0e, 0x18, 0xe4, 0xa6, 0xaa, 0x1e, 0xa1, 0xb7, + 0x69, 0x45, 0xd2, 0x61, 0x5d, 0x43, 0xac, 0x11, 0x8b, 0x56, 0xc2, 0xf2, 0x96, 0x0f, + 0xe9, 0x3a, 0x02, 0x5f, 0x13, 0xec, 0x91, 0xff, 0xc6, 0xd2, 0xc3, 0x53, 0x69, 0x9a, + 0xbb, 0x09, 0x2d, 0xed, 0xc0, 0x65, 0xdb, 0x8f, 0xa2, 0x14, 0xdb, 0xc4, 0x64, 0x66, + 0xf8, 0x97, 0xb8, 0x8c, 0x58, 0xb3, 0x01, 0x52, 0x13, 0x3a, 0xa3, 0x83, 0x1a, 0xf3, + 0x7c, 0x74, 0xd9, 0x9e, 0x9e, 0x36, 0xff, 0x70, 0x11, 0xd3, 0x23, 0x83, 0x05, 0x69, + 0x15, 0x08, 0xa2, 0xc3, 0xa4, 0x3e, 0x75, 0x5d, 0xc0, 0x81, 0xb5, 0x11, 0xd6, 0x48, + 0x2a, 0x7d, 0xb6, 0x5f, 0xa9, 0x69, 0x9e, 0xa8, 0x7f, 0xf4, 0x70, 0x99, 0xed, 0x36, + 0x37, 0xdb, 0xb0, 0xa3, 0xd0, 0xef, 0x79, 0x79, 0x6a, 0x8e, 0xf1, 0xe4, 0xd9, 0x4d, + 0x42, 0xb4, 0xbc, 0x2b, 0x4a, 0x03, 0x8a, 0xe6, 0xe4, 0x6b, 0x24, 0xcf, 0xc8, 0x41, + 0x53, 0xd3, 0x1e, 0xaf, 0x89, 0x50, 0x63, 0xa5, 0xca, 0x95, 0x9b, 0xe6, 0x3f, 0x37, + 0xf2, 0xba, 0x0d, 0x43, 0x23, 0x66, 0x73, 0x6d, 0x86, 0x32, 0xfc, 0xe0, 0x72, 0xb6, + 0xae, 0x5b, 0x6f, 0x3f, 0xd5, 0x9d, 0x3f, 0xaf, 0xf6, 0x38, 0x27, 0x5a, 0x99, 0x2f, + 0xef, 0xc8, 0x7e, 0x60, 0xd4, 0x4c, 0x2c, 0xad, 0xc2, 0xb5, 0xc4, 0x94, 0xe3, 0xe7, + 0x2e, 0xb4, 0x59, 0x7c, 0x96, 0xb4, 0x01, 0x67, 0x79, 0x9a, 0x90, 0x01, 0xa2, 0xed, + 0x36, 0x76, 0xa8, 0xb4, 0x03, 0xae, 0x25, 0xff, 0xd7, 0x72, 0xf7, 0x08, 0x1e, 0x9a, + 0x32, 0xbc, 0xc1, 0xc5, 0xe2, 0xed, 0xd4, 0xe2, 0xa6, 0x57, 0x6b, 0x78, 0x3c, 0xce, + 0x3a, 0xae, 0x11, 0xfa, 0x43, 0x22, 0x62, 0x54, 0x88, 0x56, 0x18, 0x3e, 0xe6, 0x82, + 0xd5, 0xdc, 0x31, 0xbe, 0xb3, 0x8f, 0x06, 0x1c, 0xbd, 0xec, 0xa7, 0x02, 0x1a, 0x44, + 0x4e, 0x2d, 0xd4, 0x17, 0xdf, 0x26, 0xdc, 0xd2, 0x20, 0xf2, 0xb7, 0x31, 0x77, 0x2b, + 0x43, 0x9e, 0x96, 0xd6, 0x14, 0xe1, 0xfa, 0xcb, 0x48, 0x6c, 0x7a, 0x7d, 0x51, 0x71, + 0xb1, 0xde, 0x35, 0x9f, 0x6a, 0xd3, 0xa9, 0x6f, 0x64, 0x9c, 0x96, 0x91, 0x02, 0xa1, + 0x96, 0x4f, 0xb4, 0xb4, 0xa1, 0xa4, 0x27, 0x9c, 0x68, 0xe6, 0xc3, 0x72, 0xe4, 0x21, + 0x87, 0xd7, 0x54, 0xe8, 0x04, 0xa6, 0x16, 0x53, 0x09, 0x20, 0x69, 0xfb, 0x9b, 0x6d, + 0x25, 0x26, 0x68, 0x90, 0x80, 0x8b, 0x01, 0x5d, 0xf2, 0x8c, 0x80, 0x10, 0x65, 0xda, + 0x6f, 0xeb, 0xdc, 0x1a, 0x56, 0xbf, 0xd0, 0x02, 0x62, 0x5a, 0xcf, 0xaa, 0x53, 0x73, + 0xfd, 0xe1, 0x49, 0xc1, 0xcf, 0xc3, 0x64, 0x9b, 0x48, 0x69, 0x69, 0x6d, 0x44, 0xec, + 0xb1, 0x24, 0x79, 0xc5, 0xeb, 0xef, 0x99, 0x5f, 0x10, 0x02, 0x9f, 0x8b, 0x53, 0x0e, + 0xeb, 0x3f, 0xdc, 0x2e, 0x50, 0xe8, 0x75, 0x7f, 0xc0, 0xbb, 0x9e, 0x26, 0x30, 0x23, + 0xdb, 0x82, 0xf8, 0x78, 0xd9, 0xac, 0x7f, 0xfb, 0x0b, 0xd4, 0x39, 0x1d, 0xf1, 0xd8, + 0x79, 0x89, 0x9a, 0x3e, 0xf5, 0x7b, 0xfd, 0x0d, 0x1f, 0x77, 0x55, 0x64, 0x8e, 0xdd, + 0x85, 0xbb, 0x05, 0x2a, 0x6e, 0xdf, 0x71, 0xcd, 0x26, 0x28, 0xc9, 0x87, 0x42, 0x9f, + 0x36, 0xdc, 0x50, 0x5c, 0xcc, 0x43, 0xf3, 0x0e, 0x7a, 0x86, 0x9c, 0x9e, 0x25, 0x5e, + 0x2a, 0xf9, 0xfc, 0xf3, 0x0c, 0x12, 0x17, 0x96, 0xd1, 0x90, 0x00, 0x09, 0x60, 0xcb, + 0x6f, 0xe2, 0xf1, 0xbf, 0x24, 0x61, 0x18, 0xb4, 0x98, 0xf3, 0x24, 0x7f, 0x9d, 0x48, + 0x4c, 0x73, 0xcf, 0x09, 0x39, 0x30, 0x39, 0xe4, 0x53, 0x26, 0xb8, 0xff, 0xff, 0xb3, + 0xe7, 0xe6, 0x15, 0x9c, 0x46, 0x69, 0x9f, 0x10, 0x07, 0x92, 0xd4, 0x67, 0x29, 0x50, + 0x34, 0x8a, 0x90, 0x55, 0x2e, 0x45, 0x94, 0x3b, 0xee, 0xac, 0xf0, 0x3f, 0x32, 0x16, + 0xf9, 0x4e, 0x27, 0x4d, 0x63, 0xd6, 0x37, 0xd9, 0xf1, 0x90, 0xe8, 0xa2, 0x66, 0xcd, + 0xee, 0xf1, 0x53, 0x53, 0x0b, 0xee, 0x5c, 0xb8, 0x35, 0x52, 0x60, 0x50, 0x5c, 0x2c, + 0x2e, 0x5d, 0x99, 0x0f, 0xff, 0xdc, 0x34, 0xec, 0x0f, 0xf7, 0xf1, 0xaf, 0x81, 0xb2, + 0x4c, 0xed, 0x0e, 0xfa, 0x62, 0x13, 0xda, 0x6c, 0x7c, 0x60, 0xc4, 0x87, 0xf5, 0xf7, + 0xb0, 0x3f, 0x81, 0x60, 0xa0, 0x57, 0xf4, 0x6d, 0x05, 0xbf, 0x82, 0x18, 0xb3, 0xad, + 0xd9, 0xc0, 0x68, 0x93, 0xbd, 0x02, 0xdb, 0x9b, 0x61, 0x19, 0x1d, 0xfb, 0x13, 0x3b, + 0xfa, 0xbe, 0x48, 0x58, 0xe4, 0x7a, 0x4c, 0xc3, 0x2e, 0x41, 0x6e, 0xc0, 0x8b, 0x8a, + 0xc7, 0x91, 0x5a, 0x43, 0x73, 0x3f, 0x44, 0x06, 0xe9, 0xd9, 0x67, 0xc5, 0x60, 0xf3, + 0x44, 0xd7, 0xe9, 0x04, 0xa2, 0x80, 0x45, 0xd9, 0x9f, 0x3a, 0xf8, 0xc8, 0x2e, 0x97, + 0xe1, 0xb9, 0xc1, 0xb2, 0x05, 0xe5, 0x85, 0xfb, 0xeb, 0xb4, 0x8f, 0xaf, 0x58, 0xf1, + 0xb6, 0x5d, 0xca, 0x24, 0x97, 0xe0, 0x9a, 0x70, 0xaa, 0xd4, 0x86, 0x5f, 0x85, 0x71, + 0x5a, 0x28, 0x0e, 0x18, 0x6f, 0x3f, 0xc1, 0x74, 0x0d, 0x81, 0x84, 0xd3, 0x3e, 0x83, + 0x22, 0x16, 0x95, 0x21, 0xcd, 0xc1, 0x32, 0x21, 0x29, 0x39, 0xc8, 0x4a, 0x10, 0x89, + 0x64, 0xe2, 0xde, 0x74, 0xb6, 0xea, 0x55, 0xb4, 0xcb, 0x8f, 0x6f, 0x9b, 0xee, 0x98, + 0xb1, 0x0d, 0x41, 0x51, 0x09, 0x45, 0x5f, 0x48, 0xb7, 0x76, 0x08, 0x2d, 0xc3, 0x0b, + 0x4b, 0xc7, 0x34, 0x77, 0x07, 0x55, 0x11, 0x70, 0x03, 0x08, 0x15, 0x8c, 0xe2, 0xf2, + 0xf9, 0xbf, 0x0f, 0x69, 0x1b, 0x2c, 0xe5, 0x3e, 0x61, 0x14, 0x2c, 0xb7, 0x40, 0xc1, + 0x5b, 0x7b, 0x62, 0x3c, 0xf4, 0x8b, 0x3f, 0x7b, 0xfe, 0xfa, 0x31, 0xbc, 0xdc, 0x66, + 0x5c, 0x6d, 0x71, 0x23, 0xe9, 0x53, 0x50, 0x81, 0x13, 0x75, 0x94, 0x7b, 0x05, 0x5a, + 0x43, 0xdb, 0x07, 0xe0, 0x3f, 0x33, 0x62, 0x7d, 0xf5, 0xc6, 0x38, 0xbf, 0xad, 0x95, + 0x6d, 0xdc, 0x1e, 0xa7, 0xd7, 0x62, 0x0a, 0x20, 0xf2, 0x79, 0x2f, 0x63, 0x81, 0x7a, + 0x1c, 0xf3, 0x25, 0x80, 0xd0, 0x42, 0x74, 0x23, 0x4a, 0xf2, 0xa5, 0x1b, 0x56, 0xbb, + 0x68, 0xa2, 0x9e, 0x43, 0xa9, 0x54, 0x14, 0x2b, 0xa4, 0xca, 0x68, 0x23, 0xbd, 0xe9, + 0x05, 0x3d, 0x72, 0xfd, 0xad, 0xbc, 0x61, 0xad, 0x59, 0x36, 0xc5, 0x3f, 0xdd, 0x75, + 0x79, 0x44, 0x6d, 0x11, 0xc4, 0x46, 0x07, 0xf4, 0x16, 0x30, 0xe4, 0xc0, 0x89, 0x15, + 0xe6, 0x31, 0x77, 0x15, 0x50, 0xe9, 0xce, 0x1f, 0xca, 0x2c, 0x63, 0xfe, 0x06, 0xb7, + 0x98, 0x9d, 0x58, 0x4f, 0xa7, 0xd7, 0x82, 0xa8, 0x8c, 0x1e, 0x7d, 0x64, 0xb6, 0xfb, + 0xf5, 0x5e, 0x35, 0x96, 0xaf, 0x9b, 0xcb, 0x75, 0x85, 0xf8, 0xc7, 0xd3, 0xaa, 0x5c, + 0x20, 0x82, 0xb2, 0x65, 0x24, 0x9d, 0xf0, 0x57, 0x01, 0xda, 0xb0, 0x31, 0xc4, 0xba, + 0xc1, 0xea, 0x26, 0x7a, 0x29, 0x96, 0xa2, 0x02, 0x8d, 0x1e, 0x6a, 0x0f, 0x80, 0xa3, + 0x84, 0x7c, 0x53, 0x1d, 0xba, 0x96, 0xee, 0x65, 0xa2, 0x41, 0x89, 0xbd, 0x27, 0x12, + 0xe4, 0x0e, 0x95, 0x96, 0x64, 0x98, 0x1e, 0x58, 0xb2, 0xa4, 0xf9, 0x51, 0xef, 0x8f, + 0x49, 0x7d, 0xff, 0xf2, 0xf2, 0xf2, 0x71, 0xea, 0xb8, 0x9c, 0x62, 0x8e, 0x18, 0xb5, + 0xfc, 0xb4, 0x38, 0x82, 0x53, 0x7e, 0xaf, 0x6a, 0xd2, 0xa6, 0xb1, 0x75, 0x46, 0x33, + 0xca, 0xa8, 0x6b, 0xf2, 0xc7, 0x6f, 0x39, 0x93, 0x15, 0x4f, 0xc7, 0x3e, 0x6f, 0xbb, + 0xa2, 0x21, 0x0c, 0x27, 0x43, 0xf5, 0x30, 0xa4, 0x27, 0x84, 0x9a, 0x30, 0x1e, 0x00, + 0xe0, 0x11, 0x29, 0xf0, 0x3a, 0x46, 0x07, 0xf8, 0x7c, 0xbe, 0x07, 0x62, 0xc0, 0xb1, + 0xc6, 0x58, 0x55, 0xde, 0xba, 0x84, 0x22, 0xca, 0x4b, 0x88, 0xab, 0xee, 0xa6, 0xa4, + 0x38, 0x2c, 0xf1, 0x6c, 0xcd, 0x6d, 0xc7, 0xc3, 0x7c, 0x44, 0xe5, 0x49, 0xc4, 0x53, + 0x48, 0x19, 0xac, 0xd8, 0xbb, 0x0a, 0x02, 0xa5, 0xfa, 0x7a, 0x1c, 0x1d, 0x38, 0x06, + 0xfb, 0xc3, 0x40, 0x7f, 0xd7, 0xda, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x0d, 0xe6, 0x40, 0x0d, + 0x3a, 0xb8, 0x97, 0x74, 0x85, 0xcd, 0xdf, 0xbe, 0xd5, 0x93, 0x2f, 0x50, 0x7b, 0x79, + 0x94, 0x7a, 0xdb, 0x2f, 0xad, 0x37, 0x61, 0x5a, 0xa7, 0x17, 0xdb, 0x5f, 0x29, 0x80, + 0x99, 0xf2, 0x0f, 0x26, 0x3b, 0x35, 0x9a, 0x11, 0x51, 0xa6, 0xb7, 0x5c, 0x01, 0x36, + 0x5e, 0xb1, 0x54, 0xae, 0x42, 0x14, 0x0d, 0x6e, 0x10, 0x34, 0x2f, 0x14, 0xf3, 0x4d, + 0xc3, 0x3e, 0x07, 0xff, 0x0e, 0x4d, 0x1a, 0x6b, 0xe3, 0x75, 0xb3, 0x2f, 0x84, 0xb9, + 0x2e, 0x5d, 0x81, 0xeb, 0xb6, 0x39, 0xc4, 0xf2, 0x7e, 0x71, 0x5a, 0xa4, 0x2c, 0xc7, + 0x57, 0x07, 0xd4, 0xeb, 0xd1, 0xbb, 0xfb, 0xe8, 0xf9, 0x0f, 0xc7, 0xc9, 0x53, 0xe7, + 0xa9, 0x71, 0x5e, 0x65, 0xaf, 0x82, 0x67, 0x37, 0x3d, 0x34, 0x51, 0x67, 0x4f, 0xf0, + 0x84, 0xef, 0xd9, 0x2c, 0xcf, 0x3b, 0xcc, 0x7a, 0xca, 0x14, 0x67, 0xb6, 0x32, 0x7e, + 0x4f, 0x95, 0x22, 0xb2, 0xcc, 0x57, 0x9a, 0x7a, 0x8f, 0xff, 0x7c, 0xa7, 0xcf, 0x14, + 0x5d, 0xfc, 0x13, 0xea, 0xfc, 0x34, 0x15, 0x3b, 0x2c, 0x3e, 0x8a, 0xfb, 0xe5, 0x34, + 0x44, 0xd0, 0xc7, 0x3b, 0x3b, 0xd5, 0xbc, 0x87, 0x0b, 0x01, 0xcd, 0x45, 0x79, 0x11, + 0xe3, 0x56, 0x31, 0x3f, 0xd1, 0xda, 0xfb, 0x4c, 0x81, 0x51, 0x63, 0x4a, 0x01, 0xaf, + 0xf7, 0xcf, 0x11, 0x6d, 0x43, 0x3c, 0x3d, 0x2b, 0x3a, 0xdd, 0xa9, 0xce, 0xbe, 0x18, + 0xf7, 0xd1, 0x72, 0x44, 0x3e, 0x5e, 0x7b, 0x5a, 0xc9, 0xab, 0xe8, 0xdb, 0x22, 0x56, + 0xd7, 0xeb, 0xe2, 0xff, 0x28, 0x02, 0x09, 0x39, 0x50, 0x38, 0x70, 0x59, 0x7b, 0x9a, + 0x95, 0x58, 0x92, 0xc7, 0x38, 0x96, 0x50, 0xa2, 0xd4, 0x2e, 0xc9, 0x2b, 0xe7, 0x23, + 0xfe, 0xdf, 0x2f, 0x2e, 0xde, 0x5a, 0x47, 0x2a, 0xa1, 0xe7, 0x4f, 0x33, 0xad, 0x41, + 0x90, 0x15, 0x44, 0xed, 0xbb, 0xe3, 0xac, 0x46, 0x4c, 0xf4, 0x39, 0x19, 0x60, 0x15, + 0xf4, 0xf2, 0x2a, 0xc2, 0xb8, 0xfc, 0x01, 0x49, 0x6b, 0xea, 0xb4, 0xd4, 0x59, 0x07, + 0xf4, 0x79, 0x81, 0x02, 0x25, 0x94, 0x31, 0xa2, 0xcb, 0xc9, 0x3d, 0x4f, 0x3b, 0x84, + 0xe4, 0xdd, 0x36, 0x60, 0x20, 0x27, 0x3a, 0x67, 0x52, 0xe5, 0x01, 0xaf, 0x6f, 0xf1, + 0xb7, 0x8d, 0xdc, 0x81, 0x7e, 0x6e, 0xa3, 0x51, 0x02, 0x00, 0x6b, 0xec, 0xf8, 0xd2, + 0xff, 0xb0, 0x39, 0x90, 0xf6, 0x77, 0x74, 0xa8, 0x1e, 0x05, 0xb7, 0xf4, 0xbb, 0xad, + 0x85, 0x77, 0xfa, 0x27, 0xc9, 0xde, 0x64, 0xe1, 0xb1, 0x1d, 0xcf, 0x38, 0x4f, 0x0b, + 0x56, 0x44, 0x37, 0x48, 0x75, 0x5a, 0x9f, 0xc6, 0xf2, 0xa0, 0x0b, 0x10, 0xc3, 0x65, + 0x7e, 0xba, 0xc0, 0x3b, 0xfc, 0x0b, 0x58, 0x7b, 0xef, 0x2f, 0x45, 0xec, 0x8a, 0xcd, + 0xaa, 0x51, 0xc1, 0x43, 0xb0, 0xcb, 0x25, 0xb9, 0x14, 0x2c, 0x61, 0xbd, 0x79, 0x0a, + 0x80, 0xd7, 0xc2, 0x3f, 0x90, 0xcc, 0x03, 0x49, 0x5b, 0x51, 0xe4, 0xd2, 0x84, 0x3e, + 0x55, 0x7f, 0x9e, 0x25, 0x45, 0x10, 0x8c, 0x6c, 0x03, 0xae, 0x35, 0x9f, 0x64, 0x5c, + 0x27, 0x68, 0x91, 0xc0, 0xdc, 0xab, 0x3f, 0xaf, 0x18, 0x77, 0x00, 0xc0, 0x82, 0xdc, + 0x47, 0x77, 0x40, 0xfb, 0x3f, 0x2c, 0xd7, 0xbb, 0x59, 0xfb, 0x35, 0x85, 0x54, 0x03, + 0x4c, 0x7e, 0x67, 0x8c, 0xe0, 0x1a, 0xeb, 0xf9, 0x4e, 0x51, 0x5e, 0x49, 0x72, 0x29, + 0x67, 0x99, 0x5a, 0xea, 0x85, 0x8d, 0x64, 0xe7, 0x78, 0x9f, 0xf3, 0x06, 0x36, 0x95, + 0x77, 0x22, 0x81, 0x80, 0x02, 0x6a, 0x5b, 0x0a, 0xf4, 0x75, 0xe2, 0x7a, 0x54, 0xb2, + 0x07, 0xb4, 0x1f, 0x92, 0xe3, 0x76, 0x17, 0x0e, 0x3f, 0xb0, 0x05, 0x02, 0x82, 0x61, + 0xc9, 0x9c, 0x2d, 0xbd, 0x0e, 0xed, 0xee, 0x87, 0x1c, 0x02, 0x0f, 0x48, 0xb8, 0xe9, + 0xb8, 0xe4, 0xbe, 0x77, 0xd1, 0xb7, 0x37, 0xfe, 0x21, 0xf0, 0xfa, 0x5a, 0x18, 0xeb, + 0xb5, 0x27, 0x55, 0xb5, 0xa6, 0xcf, 0x61, 0x30, 0xfb, 0x56, 0x94, 0x4c, 0xfa, 0xb8, + 0x03, 0x27, 0xc2, 0x50, 0xd1, 0x13, 0xb2, 0x9b, 0xca, 0xc9, 0xaa, 0xa1, 0x0c, 0x2e, + 0x7d, 0xe4, 0x15, 0xed, 0xb0, 0x80, 0x6c, 0x6d, 0xa0, 0x30, 0x20, 0xa1, 0x34, 0xca, + 0x7e, 0xcd, 0xc8, 0xda, 0x1b, 0xd5, 0x7a, 0x37, 0xf5, 0x5a, 0x46, 0x94, 0x0b, 0x45, + 0xb2, 0x41, 0xb1, 0xc1, 0x6e, 0xe1, 0x00, 0x92, 0x7d, 0x1b, 0xd8, 0x60, 0xd4, 0x45, + 0xa9, 0xde, 0x50, 0xd4, 0xc3, 0x84, 0xd6, 0xe1, 0xd0, 0x01, 0x08, 0x02, 0x6c, 0x0e, + 0xa5, 0xeb, 0xbf, 0x0b, 0x72, 0xfb, 0xf5, 0xc3, 0x70, 0xbc, 0xe1, 0x8d, 0x3a, 0xcb, + 0xc4, 0x65, 0x99, 0x09, 0x9b, 0xaa, 0xe1, 0xd8, 0x02, 0xf7, 0x73, 0x33, 0x49, 0x4a, + 0x7a, 0xe1, 0x30, 0xfe, 0x86, 0xe8, 0xf8, 0x18, 0xf9, 0x26, 0x1a, 0x2d, 0xad, 0xb4, + 0x12, 0x52, 0x29, 0xba, 0x0f, 0xfc, 0x0e, 0x70, 0x90, 0x32, 0x44, 0x30, 0xb5, 0x21, + 0xa9, 0x0d, 0x22, 0x4a, 0xb7, 0xa1, 0x02, 0x4e, 0x1d, 0x89, 0x3e, 0x74, 0x04, 0xfe, + 0xdb, 0x34, 0x8e, 0x4d, 0x5e, 0x22, 0x35, 0xc5, 0x9a, 0x78, 0x76, 0xa0, 0xfc, 0x60, + 0x14, 0x5c, 0x6a, 0x00, 0x96, 0x87, 0x68, 0x44, 0x60, 0x27, 0x1e, 0xe1, 0x33, 0xa4, + 0x37, 0xfe, 0x52, 0xfb, 0x6c, 0xfb, 0xa9, 0x7f, 0xce, 0xc1, 0x61, 0xdf, 0x51, 0x5d, + 0xde, 0x90, 0x5a, 0x24, 0xda, 0x6d, 0x37, 0xbd, 0xc3, 0x40, 0x44, 0xa9, 0x55, 0xe6, + 0x82, 0xb4, 0x74, 0x71, 0xca, 0x1e, 0x8c, 0x78, 0xc5, 0x1e, 0xd3, 0x77, 0xcd, 0x4a, + 0xfa, 0x89, 0x4b, 0xd9, 0xbd, 0x12, 0xe7, 0x07, 0x15, 0x6d, 0xa0, 0x72, 0x6f, 0x7c, + 0xf5, 0x72, 0x9f, 0xab, 0xe3, 0x72, 0x16, 0x04, 0x63, 0xfe, 0x04, 0x29, 0x24, 0x4d, + 0x06, 0x74, 0x89, 0xba, 0x5d, 0x09, 0x47, 0x2e, 0xcd, 0x9b, 0xcd, 0xc4, 0xd5, 0xe4, + 0xdf, 0x10, 0x1e, 0x18, 0x9d, 0xb8, 0x46, 0x3e, 0xb5, 0x38, 0x30, 0x7b, 0x58, 0x7d, + 0xef, 0xf7, 0x8d, 0xe9, 0xc7, 0x3a, 0xf2, 0x80, 0x80, 0xb2, 0xfd, 0x05, 0x00, 0x3e, + 0x11, 0xd3, 0xe1, 0xb3, 0x29, 0x9d, 0xc9, 0x52, 0x1f, 0x8b, 0x51, 0x3b, 0xad, 0xb0, + 0x10, 0xe9, 0x1b, 0xfe, 0xb9, 0x1b, 0x0b, 0x2a, 0x6c, 0xb1, 0x29, 0xc2, 0xe8, 0x25, + 0xa5, 0x97, 0xb8, 0xfb, 0x75, 0xbc, 0x56, 0x2d, 0x65, 0x4d, 0x62, 0x10, 0x46, 0x40, + 0xdd, 0x74, 0xe5, 0x6c, 0xd1, 0x4b, 0xaa, 0xba, 0x56, 0x5b, 0x84, 0xb8, 0x45, 0xe1, + 0x63, 0xd1, 0xca, 0xef, 0x25, 0x33, 0xc3, 0x98, 0x16, 0x37, 0x20, 0x4f, 0x96, 0xa5, + 0x9c, 0x8e, 0x80, 0x24, 0xd9, 0x04, 0x1b, 0x20, 0x29, 0xe9, 0x4c, 0x15, 0x24, 0x5f, + 0x1a, 0x95, 0x88, 0x40, 0xba, 0x3f, 0x38, 0x0a, 0x4d, 0x20, 0xf1, 0x18, 0x4e, 0x77, + 0x82, 0x7d, 0xe3, 0xff, 0x8f, 0x3d, 0x73, 0x45, 0x9a, 0xfe, 0x24, 0x1f, 0x72, 0x3c, + 0x08, 0x48, 0x23, 0x23, 0x0e, 0x00, 0x3d, 0x3d, 0x21, 0xe5, 0x35, 0x01, 0xec, 0x04, + 0x99, 0xb0, 0x83, 0xa7, 0xda, 0xd6, 0x85, 0xc5, 0x71, 0x27, 0xf4, 0xde, 0x64, 0x73, + 0x3a, 0x88, 0x0c, 0x2d, 0xb2, 0x8f, 0xda, 0xab, 0xf1, 0xb5, 0x42, 0xd2, 0x05, 0xf6, + 0x64, 0xa3, 0x51, 0x35, 0x71, 0x27, 0x11, 0xdc, 0xcc, 0xd9, 0x31, 0xa5, 0x0b, 0x9c, + 0x56, 0x61, 0x88, 0x23, 0x60, 0xd4, 0xca, 0xc0, 0x04, 0x76, 0x81, 0xbc, 0x2e, 0x2b, + 0x3b, 0xf6, 0xc9, 0x97, 0x60, 0xd7, 0xcf, 0xb4, 0xfa, 0x21, 0x39, 0x43, 0x77, 0xa4, + 0x55, 0x1c, 0x76, 0xd1, 0xf7, 0x5a, 0xc0, 0x3c, 0x26, 0x20, 0x54, 0xdf, 0xfd, 0x79, + 0xa9, 0xde, 0xd0, 0x5e, 0x88, 0x89, 0x58, 0x19, 0x9e, 0xea, 0x45, 0x01, 0xe2, 0x99, + 0x0a, 0x53, 0xa5, 0xcd, 0x2a, 0x46, 0xa4, 0x01, 0x57, 0x65, 0x88, 0xfd, 0x7d, 0x05, + 0x8a, 0x26, 0xf2, 0x84, 0x38, 0xe5, 0x78, 0x2f, 0x45, 0xac, 0x1d, 0x07, 0xf6, 0xf6, + 0xf5, 0xed, 0x73, 0x74, 0x1d, 0x57, 0x85, 0x83, 0x7a, 0x6b, 0x84, 0x4b, 0x47, 0x47, + 0x75, 0x71, 0x8c, 0x29, 0xdd, 0x99, 0x08, 0x4e, 0x9f, 0x88, 0xef, 0x15, 0x3a, 0x83, + 0x29, 0xf5, 0x32, 0xa6, 0x90, 0x17, 0xdc, 0x3a, 0x97, 0xed, 0x75, 0x43, 0x67, 0x72, + 0x30, 0x98, 0xe5, 0x76, 0x58, 0x40, 0xb0, 0x22, 0x89, 0x72, 0x44, 0x74, 0x5f, 0xbb, + 0xbb, 0x30, 0xa7, 0xcb, 0x54, 0xfa, 0x05, 0x11, 0x16, 0x6e, 0x95, 0x44, 0x12, 0x20, + 0x00, 0x61, 0x0b, 0xd2, 0xaa, 0xcb, 0xd8, 0x23, 0x25, 0xa5, 0x9b, 0x95, 0x15, 0x4e, + 0xcd, 0x82, 0xc8, 0x8d, 0x23, 0xab, 0xd1, 0xe2, 0x07, 0x70, 0xff, 0xb8, 0xaa, 0xbf, + 0x83, 0xfc, 0x07, 0x34, 0x96, 0x4c, 0xcd, 0x41, 0x1d, 0x1c, 0x93, 0x57, 0x14, 0xe2, + 0x4a, 0xab, 0x56, 0x6f, 0x4f, 0x08, 0x42, 0x40, 0x14, 0xc4, 0xec, 0xa9, 0x1b, 0x59, + 0x0f, 0x08, 0x2b, 0x47, 0x3f, 0x36, 0x1c, 0x87, 0x41, 0x5d, 0x37, 0xbd, 0x20, 0xd7, + 0x0f, 0xd0, 0xb5, 0x2b, 0x6d, 0xdf, 0x18, 0x65, 0xf7, 0x66, 0x70, 0x2e, 0x32, 0xb0, + 0x5b, 0x3c, 0xf1, 0x63, 0x0e, 0xe8, 0x59, 0x7a, 0xae, 0x19, 0x63, 0x3f, 0x35, 0x16, + 0xa8, 0x55, 0x5a, 0xc5, 0xbe, 0x32, 0xc6, 0x75, 0xbe, 0x18, 0x17, 0xef, 0xbf, 0xfd, + 0x93, 0x69, 0x04, 0x1a, 0x08, 0x9c, 0x28, 0x3f, 0x19, 0x64, 0x99, 0x68, 0xc2, 0x49, + 0x8c, 0xde, 0x56, 0xf5, 0x00, 0x43, 0x4f, 0x28, 0x0d, 0x77, 0xa9, 0xc6, 0x2e, 0x43, + 0xcb, 0xd3, 0xf1, 0x36, 0xa4, 0xc6, 0xa0, 0x0a, 0x43, 0xe6, 0xed, 0x53, 0x0c, 0xb2, + 0xe8, 0xae, 0x83, 0x88, 0x60, 0xad, 0xc8, 0x8a, 0xac, 0xc7, 0xbd, 0x6a, 0x00, 0xae, + 0x0c, 0x19, 0xff, 0x45, 0x33, 0xa4, 0x85, 0xef, 0xde, 0x08, 0x2b, 0x5f, 0x4d, 0x1f, + 0x7a, 0x8e, 0xbe, 0x7e, 0xd8, 0x2b, 0x7b, 0x05, 0xa8, 0xcf, 0xe1, 0xe3, 0x73, 0x45, + 0x9f, 0x1b, 0xdc, 0xbf, 0x95, 0x25, 0x74, 0x7e, 0x8c, 0x95, 0x08, 0xa5, 0x55, 0xfa, + 0xcb, 0x79, 0x87, 0x40, 0xe0, 0xbd, 0xf9, 0x94, 0xd9, 0x73, 0x9b, 0xbe, 0x55, 0x38, + 0xa0, 0xae, 0x0f, 0x07, 0x6c, 0x58, 0x2c, 0x0f, 0x5b, 0xa8, 0x78, 0xb9, 0x9b, 0x82, + 0x49, 0xdb, 0x1d, 0x7e, 0x95, 0x05, 0x6c, 0x98, 0xaf, 0x08, 0x3d, 0x98, 0xcb, 0x0e, + 0xd9, 0xe3, 0xf7, 0x43, 0x6e, 0x1c, 0x76, 0x43, 0x76, 0x6f, 0x96, 0x6b, 0x83, 0xe9, + 0x99, 0x20, 0x6e, 0xbd, 0x13, 0x93, 0xb9, 0xb2, 0xa7, 0xf4, 0x14, 0x48, 0x0f, 0xa0, + 0x17, 0x48, 0x00, 0x69, 0xf8, 0x5c, 0x77, 0x49, 0xc4, 0x35, 0xae, 0x2f, 0xba, 0x2d, + 0xdc, 0x10, 0x38, 0xd5, 0x47, 0xd8, 0x48, 0x54, 0x81, 0x7e, 0xf3, 0x96, 0x35, 0xc2, + 0x98, 0x27, 0xaa, 0xd8, 0x67, 0x26, 0xc9, 0xad, 0xe3, 0xb2, 0x65, 0xb9, 0x08, 0x6c, + 0x8b, 0x5b, 0x75, 0xef, 0x56, 0xfe, 0x4b, 0xd8, 0xb4, 0xd6, 0x28, 0x93, 0x89, 0x5b, + 0x3f, 0xd2, 0x73, 0x4f, 0xda, 0xc4, 0x64, 0x15, 0x6d, 0x7e, 0x5e, 0xbc, 0x7e, 0xcf, + 0x1d, 0x83, 0xb8, 0x6f, 0x65, 0x96, 0x37, 0xe3, 0xb1, 0x42, 0xc1, 0x64, 0x96, 0x3b, + 0x8c, 0xdc, 0xf4, 0xba, 0x4f, 0x40, 0x35, 0xdf, 0xfc, 0x5a, 0x78, 0x94, 0x58, 0x84, + 0x77, 0x81, 0x91, 0x8a, 0xc7, 0x2f, 0xc1, 0x8b, 0xbb, 0xf5, 0x11, 0x00, 0x32, 0xe6, + 0x6d, 0x75, 0xb3, 0x17, 0x1e, 0xf4, 0xb5, 0x13, 0x29, 0x01, 0x64, 0xa7, 0x7b, 0x42, + 0xb0, 0xa4, 0xcf, 0xb8, 0x96, 0x39, 0xab, 0x23, 0x84, 0x5e, 0x1a, 0xa2, 0xa4, 0x52, + 0xf3, 0x73, 0x1c, 0x8c, 0xb6, 0x50, 0x82, 0xa6, 0x22, 0xa7, 0xc2, 0xe0, 0x01, 0x3e, + 0xa4, 0x7d, 0x0b, 0xdd, 0x42, 0xd6, 0x99, 0x04, 0x66, 0x64, 0x9a, 0x90, 0x5c, 0x68, + 0x4c, 0x32, 0x51, 0x71, 0x6d, 0x61, 0xf7, 0x60, 0xd5, 0x3d, 0xe6, 0xe3, 0xf7, 0x90, + 0xfb, 0xa7, 0xf5, 0xf1, 0xf4, 0xde, 0x26, 0x71, 0x13, 0xbd, 0xfc, 0xd7, 0x42, 0x28, + 0x22, 0x33, 0x0b, 0x32, 0xd5, 0x8e, 0x67, 0x77, 0x76, 0x5f, 0x22, 0xa4, 0x11, 0x63, + 0x44, 0xee, 0xb6, 0x5b, 0x2e, 0xc5, 0x16, 0x39, 0x3a, 0xb3, 0x75, 0x1b, 0x53, 0x56, + 0xd2, 0xb0, 0xc9, 0x50, 0x0c, 0x0f, 0x3e, 0x46, 0x91, 0x81, 0x03, 0x5b, 0xc3, 0x66, + 0x0f, 0x0b, 0x8f, 0x9f, 0xbe, 0x6e, 0x40, 0xb5, 0xe8, 0x9c, 0xb7, 0x9b, 0x06, 0x37, + 0x14, 0xca, 0x75, 0xe7, 0x2e, 0x2e, 0x10, 0x0a, 0x10, 0xd6, 0x3b, 0xf7, 0x84, 0xdf, + 0x08, 0x20, 0xef, 0x25, 0xf8, 0xef, 0x40, 0xfe, 0x5f, 0x05, 0xfb, 0x95, 0x68, 0x3f, + 0x91, 0x05, 0xff, 0x3c, 0xb2, 0xd2, 0x19, 0xab, 0x76, 0x60, 0x5a, 0x06, 0x4f, 0x69, + 0x21, 0x9f, 0x1d, 0xc0, 0xd0, 0x0b, 0x3b, 0x48, 0x64, 0x2f, 0x97, 0x0d, 0xc0, 0x0c, + 0xca, 0x4b, 0x8b, 0x43, 0x30, 0x8b, 0xe1, 0x82, 0x86, 0xec, 0x5a, 0x42, 0x88, 0xd6, + 0x00, 0xa3, 0x78, 0x5c, 0xb6, 0x22, 0xd4, 0x68, 0xa4, 0xc6, 0x96, 0x9b, 0x37, 0x92, + 0xf2, 0x48, 0x50, 0x27, 0xd0, 0xad, 0x9a, 0xa4, 0xa9, 0xc2, 0xcc, 0x97, 0x2f, 0x9e, + 0xe5, 0x19, 0x0a, 0x95, 0xb1, 0xeb, 0x05, 0x8d, 0xdd, 0xd8, 0xc0, 0x8e, 0x7d, 0x75, + 0x3f, 0x5e, 0x01, 0x1b, 0x2b, 0xcf, 0xee, 0x1d, 0x52, 0xc1, 0xc4, 0xf2, 0xca, 0xcd, + 0xa3, 0x0b, 0xdb, 0x69, 0x30, 0x65, 0x3c, 0x0c, 0xc4, 0x48, 0x6e, 0x60, 0xe8, 0x9f, + 0xa8, 0x49, 0xb3, 0x20, 0x83, 0xba, 0x9d, 0xb4, 0x53, 0xfb, 0x8d, 0xf6, 0x83, 0xcd, + 0x68, 0x75, 0x4c, 0x87, 0xda, 0xa7, 0x31, 0xf5, 0x70, 0xa7, 0xa4, 0x06, 0x0a, 0xf0, + 0xce, 0x70, 0x0d, 0x31, 0xbc, 0xa7, 0xe7, 0x4b, 0x3e, 0x3b, 0xa3, 0xd0, 0xe8, 0xa6, + 0x39, 0x2a, 0x06, 0x2b, 0x8e, 0x86, 0xd9, 0xd7, 0xd0, 0x0b, 0x21, 0x70, 0x1e, 0x7b, + 0x06 + ], + }, + TestVector { + description: "Overwinter transaction #2", + version: 3, + lock_time: 522993161, + expiry_height: 132825146, + txid: [ + 0x1b, 0xbe, 0x7f, 0xfc, 0x60, 0xaf, 0xb1, 0xf5, 0x11, 0x58, 0xa3, 0xf5, 0x3b, 0xde, + 0x26, 0x79, 0x68, 0x75, 0x49, 0xe0, 0xd6, 0x48, 0xda, 0x1f, 0xd1, 0x92, 0xef, 0x05, + 0x88, 0x78, 0x71, 0x93 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 0, + tx: vec![ + 0x03, 0x00, 0x00, 0x80, 0x70, 0x82, 0xc4, 0x03, 0x01, 0x06, 0xb1, 0xbc, 0xd8, 0x2a, + 0x01, 0xd3, 0x75, 0x62, 0x6f, 0xbf, 0x87, 0x2d, 0x27, 0xfa, 0x45, 0x11, 0xf5, 0xf8, + 0xcf, 0x8c, 0x9a, 0xbc, 0xef, 0x2a, 0x99, 0x01, 0x76, 0xae, 0x33, 0x93, 0x25, 0xd5, + 0xa5, 0x88, 0xda, 0x07, 0x65, 0x52, 0x65, 0x53, 0x53, 0xac, 0x52, 0x97, 0x7c, 0x0f, + 0xf7, 0x00, 0x09, 0x3e, 0x2c, 0x1f, 0x3a, 0xc0, 0xea, 0x07, 0x00 + ], + }, + TestVector { + description: "Overwinter transaction #3", + version: 3, + lock_time: 1447640168, + expiry_height: 261273787, + txid: [ + 0xa8, 0xb5, 0x7a, 0xca, 0x1d, 0xb8, 0x9d, 0xda, 0xb3, 0xf9, 0xad, 0xc3, 0xdc, 0x87, + 0x5d, 0xe4, 0xe2, 0xee, 0x59, 0x3a, 0xc9, 0x91, 0xf5, 0xcd, 0x93, 0x3b, 0x48, 0x0e, + 0x09, 0x6b, 0x6e, 0xc9 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 0, + tx: vec![ + 0x03, 0x00, 0x00, 0x80, 0x70, 0x82, 0xc4, 0x03, 0x02, 0x91, 0xe1, 0x31, 0x2f, 0xc6, + 0xb8, 0xa4, 0x35, 0x1a, 0x2e, 0xc0, 0x3e, 0x02, 0xe5, 0xd0, 0x2f, 0x53, 0x35, 0x4b, + 0x05, 0x2f, 0xd3, 0xda, 0x0d, 0xff, 0x82, 0xcd, 0x1f, 0x55, 0xeb, 0xca, 0x57, 0xb6, + 0x33, 0x7c, 0x85, 0x01, 0x52, 0x79, 0x81, 0x3d, 0x20, 0x21, 0xd6, 0x09, 0x4c, 0x68, + 0xb3, 0x75, 0xe9, 0x84, 0xf6, 0x83, 0x93, 0x30, 0x08, 0x71, 0xe3, 0x48, 0xfc, 0x52, + 0x36, 0xcc, 0xa6, 0x33, 0x05, 0x44, 0xe5, 0x46, 0x39, 0xb5, 0x41, 0x87, 0x01, 0xff, + 0x4c, 0xc4, 0x5a, 0x09, 0x65, 0x65, 0x63, 0xac, 0x63, 0x53, 0xac, 0x52, 0x6a, 0x27, + 0xab, 0x79, 0xb4, 0x00, 0x68, 0x3c, 0x49, 0x56, 0xbb, 0xb8, 0x92, 0x0f, 0x00 + ], + }, + TestVector { + description: "Overwinter transaction #4", + version: 3, + lock_time: 3343046044, + expiry_height: 360723307, + txid: [ + 0xe5, 0xdb, 0x07, 0x86, 0x48, 0xd5, 0x45, 0x56, 0xd5, 0xe1, 0xae, 0x85, 0xb7, 0x72, + 0x50, 0x87, 0x7b, 0xf1, 0x4e, 0x95, 0x0e, 0x52, 0x00, 0x6c, 0xad, 0x89, 0x63, 0xea, + 0xe8, 0xb6, 0xe0, 0x3e + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 0, + tx: vec![ + 0x03, 0x00, 0x00, 0x80, 0x70, 0x82, 0xc4, 0x03, 0x00, 0x00, 0x9c, 0xd5, 0x42, 0xc7, + 0x6b, 0x33, 0x80, 0x15, 0x00 + ], + }, + TestVector { + description: "Overwinter transaction #5", + version: 3, + lock_time: 2894292630, + expiry_height: 47546885, + txid: [ + 0x29, 0x63, 0x74, 0xcb, 0xdd, 0x6d, 0x9f, 0x1b, 0x9c, 0x0f, 0x52, 0x0e, 0xba, 0x1c, + 0xdb, 0x23, 0xec, 0x72, 0xab, 0x0c, 0xa0, 0x63, 0x05, 0x14, 0x23, 0x23, 0x9b, 0x65, + 0x06, 0xfb, 0x8e, 0xdd + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 1, + tx: vec![ + 0x03, 0x00, 0x00, 0x80, 0x70, 0x82, 0xc4, 0x03, 0x01, 0x90, 0x78, 0x99, 0x42, 0xf5, + 0x5c, 0x20, 0x0b, 0x77, 0x3e, 0xcd, 0xd7, 0x99, 0x2c, 0xff, 0x3e, 0xca, 0x24, 0xde, + 0x3e, 0x09, 0x84, 0xe1, 0x0e, 0x68, 0xae, 0x38, 0x75, 0x34, 0xb9, 0x6c, 0xde, 0x37, + 0x92, 0xf1, 0x35, 0x05, 0x6a, 0x00, 0x00, 0x63, 0x6a, 0x0c, 0xa8, 0xc4, 0xc4, 0x01, + 0x01, 0xba, 0xef, 0xf7, 0x74, 0x39, 0x01, 0x00, 0x05, 0x53, 0x52, 0xac, 0x52, 0x63, + 0x96, 0x66, 0x83, 0xac, 0x05, 0x82, 0xd5, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0xa7, 0xaa, + 0xb7, 0x40, 0x09, 0xe5, 0x7a, 0x85, 0xf7, 0xbf, 0x68, 0xa2, 0xe4, 0x82, 0x00, 0x0f, + 0x82, 0x9c, 0x54, 0x50, 0x73, 0xa1, 0x5d, 0x5c, 0xd0, 0xfc, 0xc5, 0x74, 0x39, 0xa4, + 0x35, 0x0e, 0xaf, 0x09, 0x8d, 0xfb, 0x82, 0xa0, 0x85, 0xea, 0x8a, 0x4a, 0xf6, 0xfa, + 0x83, 0x81, 0xf0, 0x65, 0x88, 0x19, 0xea, 0xb4, 0x83, 0xf6, 0x5b, 0x32, 0x5d, 0x5a, + 0xed, 0xa1, 0x52, 0x32, 0xcf, 0xad, 0xec, 0x75, 0xab, 0x18, 0x66, 0xe4, 0xc0, 0x15, + 0x5a, 0x9c, 0x74, 0xa7, 0xa5, 0x7c, 0xcf, 0x34, 0xc4, 0x83, 0xac, 0x7d, 0xa1, 0x58, + 0x8a, 0x1b, 0x6b, 0x99, 0x41, 0xf1, 0x10, 0x40, 0xf9, 0x4c, 0xf7, 0x8f, 0xad, 0x89, + 0xbf, 0x11, 0xfe, 0xd6, 0x9a, 0xa0, 0xd8, 0x31, 0x05, 0xad, 0xac, 0xdd, 0x4e, 0x5f, + 0x04, 0xa6, 0x24, 0x24, 0x02, 0x3c, 0x9b, 0x9e, 0x33, 0xc4, 0xfb, 0x7f, 0x12, 0xbd, + 0xf2, 0x1f, 0x07, 0xf2, 0x65, 0xc5, 0x37, 0xd5, 0x1c, 0x65, 0x51, 0xf4, 0x61, 0x7b, + 0x91, 0x5d, 0x21, 0x99, 0x18, 0x39, 0xc3, 0xd0, 0xd3, 0x63, 0x93, 0xd6, 0x46, 0xe0, + 0xa8, 0xa4, 0x15, 0x09, 0x21, 0x7d, 0x0e, 0x7d, 0x2c, 0xa1, 0xa0, 0xa0, 0xd6, 0x77, + 0xa3, 0xea, 0xca, 0x23, 0xed, 0xeb, 0x07, 0xb7, 0x4e, 0x65, 0x2a, 0x0b, 0xc5, 0x0c, + 0x6c, 0x08, 0x3a, 0x55, 0xd6, 0xc7, 0x30, 0x6e, 0x74, 0x08, 0x6f, 0x47, 0x68, 0x93, + 0x3a, 0xa2, 0x48, 0x73, 0x68, 0x18, 0x67, 0xa7, 0x89, 0x3d, 0x77, 0xcb, 0x7f, 0x29, + 0xb8, 0xc8, 0x47, 0xc5, 0x83, 0xf2, 0xd0, 0x71, 0xa6, 0x86, 0x61, 0x6e, 0x20, 0x67, + 0x19, 0xf7, 0x61, 0xae, 0x39, 0xc1, 0x10, 0x44, 0x2e, 0x06, 0x16, 0x3d, 0x2b, 0x84, + 0x59, 0x03, 0x60, 0x69, 0x5d, 0x4e, 0x19, 0x84, 0x9e, 0x63, 0x4f, 0x24, 0xd9, 0xad, + 0x39, 0x6c, 0x19, 0xff, 0x83, 0xce, 0x74, 0xf4, 0x6e, 0x64, 0x5f, 0x93, 0x2e, 0x14, + 0x1a, 0x41, 0x19, 0x59, 0x36, 0xc8, 0x5d, 0x51, 0x44, 0x14, 0xf1, 0x12, 0xe6, 0x0b, + 0x1a, 0x25, 0x37, 0xc3, 0x8d, 0x03, 0xc6, 0xc4, 0x63, 0x83, 0x05, 0xc9, 0xbd, 0x6c, + 0x62, 0xe3, 0x66, 0xbc, 0x63, 0x12, 0x3e, 0x3e, 0x6d, 0xd3, 0x6e, 0xed, 0xd3, 0x13, + 0x6f, 0xce, 0x8d, 0xee, 0xca, 0x2a, 0xa0, 0x9a, 0x32, 0x98, 0x03, 0x9d, 0x83, 0x85, + 0x9e, 0xfc, 0x9b, 0x2b, 0x69, 0xcf, 0x9a, 0x7d, 0xee, 0x08, 0xa9, 0x8e, 0x4b, 0xe5, + 0x58, 0xac, 0x79, 0x12, 0xfd, 0xcb, 0x42, 0x20, 0x90, 0x75, 0x42, 0x02, 0x60, 0xf7, + 0xca, 0x0a, 0xf2, 0xc0, 0x1f, 0x2a, 0xfe, 0x33, 0x07, 0x3f, 0x26, 0x24, 0x9d, 0x94, + 0x4f, 0x7a, 0x50, 0xdd, 0x84, 0x83, 0x9b, 0xc3, 0xea, 0x7f, 0xde, 0xe4, 0xed, 0x71, + 0x44, 0x9c, 0xf0, 0x75, 0x33, 0xd2, 0x6e, 0x1e, 0x27, 0xa3, 0xef, 0xb0, 0x32, 0xc3, + 0xa3, 0xb3, 0x4b, 0xd3, 0x09, 0x26, 0x22, 0xd2, 0x06, 0x2a, 0xe5, 0x36, 0xef, 0x51, + 0x49, 0xc4, 0x9b, 0x5b, 0xc9, 0x47, 0x5e, 0xaf, 0xab, 0x6e, 0x03, 0x57, 0x61, 0x00, + 0x8b, 0x0d, 0xad, 0xde, 0xec, 0xaa, 0x60, 0x44, 0x70, 0xbb, 0xe0, 0xfa, 0xda, 0x25, + 0x5d, 0x29, 0x0e, 0x92, 0xb1, 0x90, 0xc2, 0xc2, 0xd8, 0xc2, 0xde, 0xe5, 0x45, 0x5d, + 0x1f, 0x03, 0xa9, 0xf3, 0xdb, 0x77, 0x79, 0xb5, 0x84, 0x64, 0x34, 0x64, 0xaa, 0x80, + 0x14, 0xba, 0x66, 0x99, 0x4d, 0xe2, 0x55, 0x17, 0xf8, 0x39, 0x80, 0xe6, 0x6e, 0xe4, + 0xf6, 0x23, 0x14, 0xae, 0x6d, 0xbe, 0x02, 0x52, 0xd5, 0xd3, 0x8b, 0x0a, 0x16, 0xf3, + 0x99, 0x1f, 0x36, 0xd8, 0xa8, 0xb3, 0x9d, 0xdc, 0x0d, 0x55, 0x95, 0xee, 0xd9, 0x87, + 0x62, 0x87, 0x8c, 0xdf, 0x3f, 0x4a, 0x2e, 0xdc, 0x5c, 0xda, 0x77, 0x03, 0xfe, 0x4f, + 0xaf, 0x63, 0xa1, 0x5f, 0x56, 0x8a, 0x54, 0x0d, 0xa5, 0x7d, 0xd9, 0xbe, 0xb6, 0xfb, + 0x1a, 0x97, 0x7c, 0xcb, 0x91, 0xb4, 0xd7, 0x9c, 0xb3, 0x9b, 0x28, 0x91, 0x1a, 0x29, + 0xe7, 0xbf, 0x02, 0x8a, 0xc6, 0x10, 0x37, 0x96, 0xdf, 0xb6, 0xb2, 0x09, 0x67, 0x23, + 0x9a, 0xd3, 0x73, 0xc3, 0x8c, 0x53, 0xf6, 0xdf, 0x18, 0x23, 0xd4, 0x95, 0x0a, 0x02, + 0x83, 0xe9, 0x9b, 0x9c, 0x06, 0xab, 0x29, 0x66, 0x66, 0x7c, 0x9d, 0xf6, 0x77, 0x71, + 0x6b, 0x0c, 0xad, 0xed, 0x81, 0x8d, 0xf9, 0xe4, 0x49, 0xc0, 0x72, 0xe2, 0x2f, 0x9d, + 0x98, 0xbb, 0x0f, 0x9b, 0x03, 0xbd, 0x5f, 0xd0, 0x13, 0xfc, 0xef, 0x3e, 0xd6, 0xa4, + 0x9a, 0xeb, 0x98, 0x72, 0x02, 0x54, 0x08, 0x7e, 0xf7, 0x28, 0xe3, 0x19, 0x47, 0xff, + 0xe8, 0xf7, 0x66, 0xe6, 0x3e, 0xe4, 0x6f, 0xf2, 0x08, 0x16, 0xd5, 0xfa, 0x8f, 0xf5, + 0x5a, 0x26, 0x39, 0x89, 0x61, 0x49, 0x0a, 0xb9, 0xae, 0x36, 0x6f, 0xc5, 0xa2, 0xd1, + 0x99, 0x6e, 0xd6, 0x93, 0xcc, 0xca, 0x82, 0x35, 0x6f, 0x60, 0x0a, 0xb0, 0x99, 0xf6, + 0xec, 0xa8, 0xbf, 0xe6, 0x45, 0x27, 0x0d, 0x3f, 0x95, 0xed, 0xba, 0x5b, 0x0d, 0xe7, + 0xa3, 0x28, 0x19, 0x23, 0x3b, 0xcc, 0x75, 0x4a, 0x5c, 0xe2, 0xe5, 0xea, 0x07, 0x84, + 0x2e, 0x5f, 0xf2, 0xce, 0xbe, 0x62, 0xad, 0x76, 0xe8, 0xef, 0xf8, 0xd1, 0x5e, 0xa4, + 0xc2, 0x4a, 0x5f, 0x20, 0x78, 0x68, 0x31, 0x9a, 0x5a, 0xf6, 0xb0, 0x35, 0xbe, 0x3f, + 0x44, 0xf4, 0x34, 0x09, 0x4f, 0x6e, 0x52, 0x5b, 0xe6, 0x14, 0xda, 0xc9, 0x20, 0xa3, + 0x30, 0xbd, 0xfb, 0x26, 0xd7, 0x5f, 0xe7, 0xb4, 0xb3, 0x65, 0xd0, 0x94, 0x45, 0x92, + 0x50, 0xaa, 0xa5, 0x54, 0x44, 0x89, 0xfb, 0x1d, 0x99, 0x25, 0x81, 0x80, 0x0a, 0x77, + 0xb8, 0x91, 0x21, 0x57, 0xfc, 0x97, 0x13, 0xaa, 0xac, 0x25, 0xb4, 0xc2, 0x6e, 0xb0, + 0x3f, 0x71, 0x66, 0x46, 0x61, 0x9a, 0xf0, 0x24, 0x56, 0xae, 0x69, 0x59, 0x62, 0xfe, + 0x5e, 0x93, 0x1a, 0x63, 0xb5, 0xc7, 0x90, 0x52, 0xec, 0xd3, 0x33, 0xe1, 0x84, 0x12, + 0xdb, 0x91, 0xe1, 0x5f, 0x7c, 0xbc, 0x70, 0xb4, 0xcd, 0x7e, 0x8e, 0x3c, 0x95, 0x1f, + 0x35, 0x85, 0x72, 0xe3, 0x77, 0x67, 0xe7, 0xd5, 0x27, 0x04, 0xa6, 0x72, 0x1b, 0x30, + 0xef, 0xc4, 0x10, 0x17, 0xae, 0x4d, 0x23, 0x15, 0x58, 0xc5, 0xc8, 0x2c, 0xc7, 0xdd, + 0x7e, 0x33, 0x56, 0xc0, 0x9d, 0xc2, 0x49, 0x06, 0xf0, 0x43, 0x8d, 0xfc, 0xc3, 0x00, + 0x85, 0x6a, 0xc2, 0xce, 0xd8, 0xf7, 0x7f, 0xa8, 0x01, 0x57, 0x36, 0xc6, 0x61, 0xe8, + 0x02, 0x48, 0xae, 0xeb, 0x77, 0x48, 0x74, 0xaa, 0x79, 0xd2, 0x90, 0xb8, 0xf5, 0x02, + 0x7a, 0x0a, 0x50, 0x95, 0x37, 0xfc, 0x7c, 0x68, 0x9b, 0x7a, 0xd8, 0x61, 0x16, 0xcf, + 0xec, 0x26, 0x47, 0xcc, 0xaa, 0xe1, 0xc7, 0x4b, 0x41, 0x6f, 0x3e, 0x6a, 0xe8, 0xf7, + 0xcc, 0x60, 0xea, 0xaf, 0x7b, 0x6a, 0x59, 0x0d, 0x51, 0x54, 0x41, 0x38, 0xe1, 0x73, + 0x29, 0x45, 0x60, 0x3a, 0x53, 0x46, 0x2c, 0x60, 0xe1, 0xf6, 0xcb, 0x0c, 0x9c, 0xa0, + 0x39, 0x0c, 0x48, 0x82, 0x24, 0xc3, 0x13, 0x26, 0x9f, 0xcd, 0x59, 0xfc, 0xb6, 0x11, + 0xfb, 0x2d, 0x9b, 0x4c, 0x8f, 0xa6, 0x01, 0xbb, 0x1c, 0xb8, 0xd0, 0x7d, 0x79, 0x7b, + 0xf5, 0xde, 0x52, 0xbc, 0xee, 0xb0, 0x23, 0x01, 0xc8, 0x96, 0x2a, 0xc1, 0xfc, 0x04, + 0x91, 0xdc, 0x81, 0xaf, 0xfd, 0x6c, 0x1e, 0xbf, 0x89, 0xa1, 0x3d, 0x6f, 0x29, 0x0e, + 0xda, 0x5d, 0x5c, 0xef, 0x38, 0x22, 0x15, 0xc5, 0xe9, 0x51, 0xd7, 0x13, 0x05, 0xef, + 0x33, 0xd9, 0x73, 0x71, 0x26, 0xd0, 0xe6, 0x62, 0x90, 0x5f, 0x12, 0x50, 0x92, 0x6f, + 0x6a, 0x22, 0x99, 0x90, 0xe3, 0x8f, 0x69, 0xad, 0x9a, 0x91, 0x92, 0xb3, 0x02, 0xf2, + 0x6b, 0xdd, 0xa4, 0x65, 0xd9, 0x0b, 0x94, 0xb1, 0x2c, 0x57, 0xfa, 0x3f, 0xd6, 0x93, + 0x00, 0x83, 0xf1, 0x84, 0x43, 0x8d, 0x8a, 0x88, 0x9d, 0x3f, 0x5e, 0xce, 0xa2, 0xc6, + 0xd2, 0x3d, 0x67, 0x36, 0xf2, 0xa0, 0xf1, 0x8e, 0x26, 0xf4, 0xfa, 0x45, 0xd1, 0xbe, + 0x8f, 0x3d, 0xc4, 0xa7, 0x07, 0x13, 0x7e, 0x95, 0xd2, 0xad, 0x59, 0x4f, 0x6c, 0x03, + 0xd2, 0x49, 0x23, 0x06, 0x7a, 0xe4, 0x7f, 0xd6, 0x42, 0x5e, 0xfb, 0x9c, 0x1d, 0x50, + 0x4e, 0x6f, 0xd5, 0x57, 0x53, 0x40, 0x94, 0x56, 0x01, 0xfe, 0x80, 0x6f, 0x57, 0x56, + 0xac, 0xb5, 0x62, 0xf1, 0x3c, 0x0c, 0xa1, 0xd8, 0x03, 0xa1, 0x95, 0xc2, 0xeb, 0xb2, + 0xef, 0x02, 0xac, 0x33, 0xe6, 0xa8, 0x8d, 0xea, 0x07, 0x5b, 0xa9, 0x96, 0xd3, 0xc3, + 0x36, 0x64, 0x8e, 0x86, 0x94, 0xd3, 0xa1, 0x9d, 0x3d, 0xca, 0x53, 0x1b, 0xeb, 0x50, + 0xd4, 0x32, 0x7c, 0x5c, 0x0c, 0x23, 0xcb, 0x7c, 0xfd, 0xb0, 0x8c, 0xa7, 0xcf, 0x2c, + 0xac, 0x6b, 0xc1, 0x39, 0xd0, 0x74, 0x14, 0x73, 0xd3, 0x76, 0x02, 0x9c, 0xb4, 0xab, + 0x6b, 0xf0, 0x54, 0x55, 0x7c, 0xe2, 0x94, 0xc7, 0x28, 0xa4, 0x68, 0x7d, 0x57, 0xec, + 0x89, 0x09, 0xff, 0x51, 0xa4, 0xd0, 0x2f, 0x9d, 0xcd, 0x11, 0x19, 0x3d, 0x7d, 0x1c, + 0x9f, 0xda, 0xe6, 0xa1, 0x73, 0x96, 0xa1, 0xbf, 0x57, 0xa9, 0x94, 0x93, 0x4f, 0x5e, + 0x7a, 0x59, 0xf0, 0x45, 0xde, 0xbe, 0xaf, 0xf6, 0x2e, 0xf3, 0x26, 0xb9, 0x47, 0xf2, + 0xa8, 0xb4, 0x95, 0x55, 0xe4, 0xd9, 0x9b, 0x3b, 0xf5, 0xc8, 0x1f, 0xf9, 0xfe, 0x31, + 0x4e, 0x04, 0x7a, 0xf1, 0x52, 0x50, 0x8f, 0x57, 0x01, 0x5c, 0xa4, 0x02, 0xc6, 0x7d, + 0x92, 0x5c, 0x99, 0xac, 0xea, 0x3e, 0xe8, 0xcc, 0x4b, 0x00, 0x8c, 0x5c, 0xb4, 0x39, + 0x66, 0xe7, 0x14, 0xef, 0x48, 0x0f, 0xd0, 0x5e, 0x07, 0xc7, 0xb2, 0xdd, 0xa9, 0xaa, + 0x39, 0x66, 0x11, 0x3e, 0xaa, 0x29, 0x3d, 0x3f, 0x62, 0x2b, 0x30, 0x9d, 0x64, 0x80, + 0x3c, 0xe1, 0xe6, 0x37, 0x8b, 0x6a, 0xac, 0x4f, 0xab, 0x52, 0x7c, 0x43, 0xcd, 0x45, + 0xed, 0x0a, 0x3c, 0x1a, 0x4b, 0x9f, 0xb1, 0x8d, 0xcc, 0xcf, 0xcd, 0xb6, 0xac, 0x0c, + 0x24, 0x21, 0x63, 0x9c, 0xda, 0x00, 0x75, 0xa2, 0x0d, 0xc5, 0x11, 0x1b, 0x8d, 0x3d, + 0x31, 0x99, 0x49, 0x5b, 0xd9, 0x13, 0x3d, 0xba, 0xb9, 0x45, 0x41, 0x41, 0x0e, 0x4f, + 0xba, 0x92, 0xc7, 0xb6, 0x06, 0xa5, 0xcb, 0x12, 0x2f, 0x14, 0x0c, 0xf1, 0xa3, 0x59, + 0x6f, 0x27, 0x88, 0xf3, 0xc8, 0xb9, 0x26, 0x60, 0xf1, 0x4c, 0xb6, 0x5a, 0xf5, 0xdd, + 0x23, 0xdf, 0xdb, 0xac, 0x13, 0x71, 0xec, 0xf4, 0xb3, 0x37, 0x12, 0xfe, 0xd2, 0x29, + 0x2c, 0x44, 0xf7, 0x08, 0x34, 0xcf, 0x96, 0xc0, 0x5d, 0x58, 0x82, 0x7e, 0x69, 0xbf, + 0xc2, 0xe6, 0x96, 0xfa, 0x08, 0x74, 0x86, 0x9c, 0x02, 0xf3, 0xdc, 0xa1, 0x1c, 0x3b, + 0x90, 0xcb, 0x21, 0x4e, 0x68, 0xbc, 0x1c, 0xae, 0x03, 0x9d, 0x7a, 0x14, 0x6c, 0xdc, + 0x1d, 0x60, 0x9d, 0x7a, 0x6b, 0x3f, 0xd5, 0xd4, 0x61, 0xb0, 0x95, 0x1c, 0x82, 0xcf, + 0xb3, 0xe7, 0x63, 0xfa, 0xd2, 0xd1, 0xbc, 0x76, 0x78, 0xcd, 0xf8, 0x27, 0x79, 0xf8, + 0xfd, 0x5a, 0x1c, 0xe2, 0x2a, 0x8d, 0x3c, 0x45, 0x47, 0xab, 0xd9, 0x59, 0x83, 0x8a, + 0x46, 0xfb, 0x80, 0xaf, 0xe0, 0x1f, 0x8e, 0xcc, 0x99, 0x31, 0x51, 0x3b, 0x19, 0x62, + 0xec, 0x54, 0x08, 0x56, 0xcb, 0x18, 0x93, 0x87, 0xcf, 0xbf, 0xcc, 0x0f, 0x7c, 0x68, + 0x22, 0x3c, 0xba, 0x47, 0xfb, 0x0c, 0x9b, 0x48, 0x6e, 0x4d, 0x99, 0x17, 0x19, 0x41, + 0xf7, 0x67, 0x5a, 0x8b, 0x46, 0x32, 0x8a, 0x3b, 0xc1, 0x09, 0xbf, 0x07, 0xc6, 0x6d, + 0x5e, 0xde, 0x77, 0x1c, 0xc4, 0xc7, 0x4c, 0xe8, 0x03, 0x33, 0x82, 0x91, 0x91, 0xee, + 0xdc, 0x49, 0x35, 0x08, 0xa6, 0x44, 0x53, 0x0a, 0x61, 0x44, 0xf2, 0x2d, 0xcf, 0x97, + 0x52, 0x5a, 0x4c, 0xdc, 0xa1, 0xad, 0x71, 0x07, 0x3b, 0x08, 0x0b, 0x73, 0xea, 0x45, + 0x49, 0xf5, 0x40, 0x1b, 0xff, 0x43, 0x18, 0x26, 0x8e, 0x6a, 0xd6, 0x37, 0x36, 0x31, + 0x57, 0xa1, 0x9a, 0x53, 0xf1, 0x23, 0xa0, 0xb0, 0xe1, 0x6d, 0x0b, 0x77, 0xf0, 0x20, + 0x28, 0xda, 0x46, 0x41, 0x00, 0xfd, 0xe7, 0x6d, 0x83, 0xdd, 0x0b, 0xb2, 0x24, 0xf7, + 0xb5, 0x7a, 0x00, 0xc0, 0x2f, 0x68, 0xae, 0x64, 0x8f, 0xdc, 0x52, 0x99, 0x57, 0xa1, + 0x04, 0x90, 0xdc, 0xe1, 0xfd, 0xdb, 0xb0, 0x90, 0x4f, 0x0d, 0x51, 0x8b, 0xb3, 0x87, + 0x54, 0x40, 0x19, 0x98, 0x3b, 0x61, 0x69, 0x75, 0xa7, 0x8e, 0x74, 0xd8, 0x54, 0xfd, + 0xdc, 0x49, 0xb2, 0x55, 0x16, 0x7b, 0x55, 0xef, 0x4b, 0xee, 0x46, 0x56, 0x68, 0xb2, + 0x0e, 0xa4, 0x11, 0x8c, 0xa5, 0x69, 0xae, 0x48, 0x0e, 0x0f, 0x6e, 0x5e, 0x04, 0x3a, + 0x35, 0x7b, 0x36, 0xd3, 0xab, 0x36, 0xc8, 0x61, 0xf2, 0x27, 0x83, 0x01, 0xdc, 0xe5, + 0x76, 0x74, 0xd5, 0x07, 0x3b, 0x3a, 0x6f, 0x51, 0x03, 0xa0, 0x79, 0x3a, 0xf1, 0xb7, + 0xd4, 0x6f, 0x95, 0x7e, 0x22, 0xd8, 0xd2, 0x58, 0x3b, 0xf1, 0x81, 0x83, 0x6c, 0x3b, + 0xe9, 0x93, 0x0b, 0xac, 0x8f, 0xa4, 0x60, 0xe9, 0x68, 0xaa, 0x71, 0x09, 0x87, 0x0b, + 0xbe, 0xd1, 0x7d, 0xf5, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xc8, 0xca, 0x14, 0x67, 0xae, 0x17, + 0xdb, 0xbc, 0xde, 0x31, 0xc1, 0x10, 0x5c, 0xb5, 0xbd, 0xa8, 0x8a, 0xc6, 0xc6, 0x27, + 0x00, 0x2c, 0xe2, 0x1c, 0x02, 0x14, 0x0f, 0xfe, 0x81, 0xec, 0x58, 0xbf, 0x1e, 0x6d, + 0x1b, 0xb7, 0xaa, 0xad, 0xa4, 0x1f, 0xba, 0x0b, 0xb5, 0x88, 0x77, 0x8a, 0x7f, 0x65, + 0x20, 0x2a, 0xd8, 0x11, 0xea, 0x73, 0xd2, 0x6c, 0x74, 0x55, 0x03, 0x95, 0xaf, 0xf7, + 0x53, 0x25, 0x10, 0x7c, 0x9b, 0x3f, 0x9a, 0xe9, 0xdc, 0xdc, 0xd8, 0x6e, 0xd0, 0x81, + 0xa2, 0xe7, 0x42, 0x47, 0x19, 0xa3, 0xd1, 0x85, 0xb7, 0xe0, 0xa4, 0x3a, 0x47, 0x2e, + 0x29, 0x8a, 0xc0, 0xaf, 0xdc, 0x52, 0x87, 0xd7, 0xad, 0x12, 0x4c, 0xd9, 0x40, 0x5a, + 0x62, 0xcd, 0x1c, 0xa0, 0x8b, 0x28, 0x2e, 0xfe, 0xf7, 0xf9, 0x28, 0xdf, 0x76, 0xe2, + 0x82, 0x1a, 0x41, 0x84, 0x13, 0xeb, 0x7c, 0xea, 0xa5, 0xff, 0x12, 0x90, 0xb0, 0x3e, + 0xc9, 0x1c, 0xe6, 0xdd, 0x28, 0x13, 0x0c, 0x3a, 0xb0, 0xb2, 0x3b, 0x60, 0x2b, 0xd5, + 0xbe, 0x5d, 0xc2, 0x60, 0x03, 0xaa, 0xe0, 0x4b, 0x33, 0xd7, 0xbd, 0x25, 0x90, 0xe9, + 0x0c, 0x8c, 0x38, 0x8e, 0xa7, 0x95, 0x51, 0x22, 0xdb, 0xac, 0xa6, 0x7b, 0x30, 0x39, + 0x5a, 0x92, 0x8b, 0x57, 0xb8, 0x57, 0x51, 0x23, 0x20, 0x5a, 0xe1, 0x91, 0x52, 0xe4, + 0x1e, 0x00, 0x29, 0x31, 0xb4, 0x57, 0x46, 0x19, 0x8e, 0x5d, 0xd9, 0x57, 0x1a, 0x56, + 0xa7, 0xe0, 0xd4, 0x23, 0xff, 0x27, 0x98, 0x9d, 0x3e, 0xb4, 0x17, 0xec, 0xd3, 0xc3, + 0x09, 0x3f, 0xb8, 0x2c, 0x56, 0x58, 0xe2, 0x96, 0x24, 0xc5, 0x32, 0x19, 0xa6, 0x0c, + 0xd0, 0xa8, 0xc4, 0xda, 0x36, 0x7e, 0x29, 0xa7, 0x17, 0x79, 0xa7, 0x30, 0x32, 0x98, + 0x5a, 0x3d, 0x1f, 0xd0, 0x3d, 0xd4, 0xd0, 0x6e, 0x05, 0x56, 0x6f, 0x3b, 0x84, 0x36, + 0x7c, 0xf0, 0xfa, 0xee, 0x9b, 0xc3, 0xbd, 0x7a, 0x3a, 0x60, 0x6a, 0x9f, 0xdb, 0x84, + 0x9c, 0x5d, 0x82, 0xd0, 0xa6, 0x19, 0x23, 0xc2, 0xe5, 0xd8, 0xaa, 0x63, 0xa8, 0xa5, + 0x0c, 0x02, 0xbd, 0x03, 0x87, 0x72, 0xc4, 0x14, 0x3d, 0x8b, 0x7a, 0xcf, 0xd7, 0x4e, + 0x72, 0xc0, 0x4d, 0x89, 0x24, 0x8d, 0xff, 0x20, 0xfe, 0x8d, 0xc5, 0xec, 0x21, 0x49, + 0x05, 0x4e, 0xa2, 0x41, 0x64, 0xe8, 0x03, 0x67, 0x44, 0xad, 0x0c, 0xac, 0xf1, 0xa8, + 0xb7, 0x01, 0x26, 0xf4, 0x82, 0xc0, 0x92, 0xed, 0x9f, 0x61, 0x27, 0xd2, 0x05, 0x0d, + 0x12, 0xe8, 0x78, 0xa7, 0x96, 0x53, 0xa1, 0xe8, 0x4d, 0xae, 0xc3, 0x0b, 0xe6, 0x2d, + 0x5f, 0x6c, 0x4a, 0xbe, 0x5c, 0xe9, 0x0a, 0x7f, 0xe2, 0xe5, 0x2a, 0x8d, 0x78, 0x46, + 0xe8, 0xed, 0xf2, 0xf2, 0xbc, 0xe0, 0x5a, 0x03, 0x7c, 0x82, 0x6f, 0x22, 0xca, 0xad, + 0x12, 0x61, 0x46, 0x7d, 0xcf, 0xb7, 0xd6, 0xb6, 0x13, 0x3d, 0xc2, 0x1e, 0x80, 0x96, + 0xc7, 0xe9, 0xf8, 0xe9, 0xe1, 0x0c, 0x1e, 0x3f, 0xac, 0x40, 0x58, 0xb6, 0x82, 0xc6, + 0x8e, 0x54, 0xfa, 0xca, 0xe0, 0xf9, 0x02, 0xdd, 0x4d, 0x64, 0xd9, 0x04, 0x61, 0x52, + 0xb4, 0x76, 0x23, 0x32, 0x93, 0x9f, 0x17, 0xe6, 0xaa, 0xf7, 0xd8, 0xb9, 0xd3, 0x58, + 0xe2, 0x21, 0x8d, 0x4e, 0x0d, 0x69, 0xa4, 0xf1, 0x19, 0xe1, 0xc6, 0x02, 0xec, 0x4c, + 0x8b, 0x53, 0x28, 0x09, 0x70, 0x71, 0x31, 0xf0, 0x1f, 0x55, 0xc7, 0xad, 0x04, 0xcf, + 0xb6, 0x3f, 0x7c, 0x4a, 0x3d, 0x0a, 0x2b, 0x0f, 0xfb, 0x0b, 0x05, 0xa6, 0xbe, 0x05, + 0x5b, 0x8c, 0x02, 0xca, 0x80, 0xbb, 0x0a, 0x1d, 0x13, 0xcd, 0x4c, 0xd6, 0x9a, 0xb9, + 0x83, 0x04, 0xae, 0x25, 0x15, 0xd5, 0xf7, 0x69, 0x9d, 0x4a, 0xbe, 0xe5, 0xc2, 0x0b, + 0xe6, 0x09, 0xd8, 0x73, 0x51, 0x10, 0x12, 0x02, 0x34, 0xbd, 0x85, 0xa7, 0xef, 0xf5, + 0xfb, 0x63, 0x4c, 0xff, 0x26, 0x58, 0xba, 0x65, 0x16, 0x04, 0x85, 0x63, 0x09, 0x5e, + 0xce, 0xfb, 0x30, 0x15, 0xee, 0x3f, 0x03, 0xca, 0x52, 0xa1, 0x77, 0xf2, 0x03, 0xec, + 0xdc, 0x26, 0xbc, 0x08, 0x9d, 0x34, 0xc6, 0x40, 0x48, 0x46, 0xe9, 0xc6, 0x47, 0xfc, + 0xfe, 0x98, 0xcc, 0x6a, 0xcd, 0xbb, 0x46, 0x4f, 0x64, 0x27, 0x8a, 0xd8, 0xce, 0x9d, + 0x1a, 0xe0, 0xd4, 0x15, 0xbc, 0x0c, 0x05, 0x24, 0x5f, 0xdd, 0xaf, 0x4e, 0xbc, 0x8d, + 0xc7, 0x03, 0xa8, 0x5c, 0xb2, 0x70, 0xf7, 0x96, 0xad, 0x2d, 0x93, 0x7e, 0x2a, 0xc0, + 0xd5, 0xe0, 0xa3, 0x48, 0x21, 0x75, 0x80, 0x00, 0xaa, 0x59, 0xc9, 0xd4, 0x65, 0x24, + 0x85, 0x29, 0x4e, 0xe0, 0xab, 0x29, 0x69, 0x6b, 0x21, 0x43, 0x0f, 0xa5, 0x4d, 0xcf, + 0xbf, 0x2b, 0x9c, 0x49, 0xd1, 0x42, 0x06, 0x42, 0x09, 0xee, 0xee, 0xd4, 0xd4, 0x71, + 0xff, 0xc0, 0x17, 0xd4, 0xe2, 0x0a, 0x79, 0x6b, 0x09, 0x27, 0x80, 0x4c, 0x06, 0x1b, + 0x9f, 0x4a, 0x70, 0x91, 0xfe, 0x01, 0x5a, 0xda, 0x68, 0xfd, 0x84, 0x42, 0xe0, 0x18, + 0x25, 0xc8, 0x8d, 0xfe, 0x55, 0xcf, 0x5d, 0xe3, 0x89, 0x36, 0xf7, 0xce, 0x25, 0x31, + 0x1b, 0x90, 0x2b, 0xa9, 0x7a, 0x3c, 0x12, 0xa9, 0x5c, 0xfa, 0x1c, 0x3a, 0x59, 0x1b, + 0x81, 0x8f, 0x60, 0x83, 0x27, 0x09, 0xd9, 0xe4, 0x83, 0x9e, 0x41, 0x0f, 0xb3, 0x6b, + 0x84, 0xf3, 0xac, 0x4f, 0x07, 0x0f, 0xc3, 0x5e, 0x16, 0x19, 0x78, 0x25, 0x9e, 0x5b, + 0x8e, 0xdc, 0x74, 0x4d, 0x90, 0x91, 0x9a, 0xa7, 0x70, 0xbb, 0x36, 0x21, 0x51, 0x28, + 0xe5, 0x82, 0xb5, 0x96, 0x41, 0xe2, 0x38, 0x52, 0xe9, 0x58, 0xeb, 0x8f, 0xc3, 0xc0, + 0xaa, 0x96, 0x15, 0x2b, 0xa4, 0xf7, 0x7f, 0x13, 0x8d, 0x6a, 0x67, 0x12, 0xa3, 0xae, + 0x32, 0x26, 0x01, 0x58, 0x83, 0xf8, 0x1d, 0xb2, 0x3e, 0x58, 0x3c, 0x86, 0x9c, 0x4c, + 0x71, 0x14, 0x3a, 0x6f, 0xff, 0xd6, 0x5e, 0x8d, 0xfd, 0xc5, 0x0c, 0x99, 0xa2, 0xf1, + 0xf3, 0x14, 0xcd, 0xcc, 0x71, 0x35, 0x9e, 0x23, 0x5f, 0x1d, 0x7d, 0xc2, 0xb5, 0xf3, + 0x8e, 0xf7, 0xb9, 0x70, 0x84, 0x31, 0x63, 0xc0, 0x3f, 0x9d, 0xd4, 0x0a, 0x80, 0x15, + 0xef, 0xdc, 0x87, 0x91, 0x95, 0x6a, 0x3f, 0x3c, 0xed, 0xd9, 0xea, 0x64, 0xf8, 0xef, + 0xa7, 0xa0, 0x81, 0x5a, 0x70, 0x38, 0x1d, 0x71, 0x46, 0x78, 0x17, 0xbd, 0x04, 0xca, + 0x52, 0x9a, 0xed, 0xe0, 0x7f, 0xf6, 0x0d, 0x17, 0x6a, 0xed, 0x0f, 0x85, 0x5a, 0x2e, + 0xae, 0xa8, 0x9e, 0xae, 0xac, 0xa8, 0x93, 0x58, 0xc0, 0x81, 0x82, 0x6a, 0x08, 0x12, + 0xa5, 0xbc, 0xa2, 0x8b, 0xe1, 0x37, 0x3f, 0x08, 0x6d, 0xbd, 0xba, 0x7e, 0x43, 0xe2, + 0x03, 0x21, 0x2c, 0x9f, 0xed, 0x21, 0x47, 0x4b, 0xa1, 0x9a, 0x05, 0x5f, 0xfc, 0xc1, + 0x79, 0x41, 0x2e, 0x89, 0x3a, 0x74, 0x48, 0x32, 0x29, 0x8c, 0x5f, 0xe2, 0x4c, 0xc6, + 0xb1, 0x86, 0x67, 0xf4, 0x9b, 0x34, 0xdf, 0xb1, 0x23, 0x79, 0x26, 0x74, 0x19, 0xa9, + 0xcb, 0x94, 0x03, 0xd8, 0x16, 0x7d, 0x8d, 0x1e, 0x91, 0xd2, 0x81, 0x1a, 0x04, 0x3b, + 0x29, 0x24, 0x3b, 0x06, 0x9b, 0x37, 0x58, 0x78, 0x47, 0xdc, 0x6f, 0xcd, 0xdb, 0x18, + 0x31, 0xbd, 0x1c, 0xc2, 0x56, 0x7c, 0xa0, 0x33, 0xac, 0x40, 0xf7, 0x4a, 0xb6, 0x95, + 0x5f, 0x68, 0x3b, 0x12, 0xe4, 0xe8, 0x25, 0x4e, 0x4e, 0xa7, 0x60, 0xd3, 0x8b, 0x3f, + 0x46, 0x79, 0x1c, 0x5c, 0x4c, 0xb1, 0x2b, 0xc7, 0xcc, 0xb0, 0xed, 0x18, 0x65, 0xf2, + 0x5d, 0x60, 0x1c, 0x30, 0x3f, 0x81, 0xfb, 0x1f, 0xa1, 0xdb, 0x48, 0x53, 0x3d, 0x3d, + 0x6b, 0x28, 0x8e, 0x4d, 0x9a, 0x4d, 0xff, 0x8e, 0xc2, 0x1c, 0x96, 0xf5, 0x78, 0x39, + 0x97, 0x10, 0xc8, 0x25, 0xfe, 0x7e, 0x32, 0xf9, 0x3a, 0x8c, 0x07, 0x43, 0xf9, 0xeb, + 0xd5, 0x4c, 0xc1, 0x51, 0xc7, 0x61, 0x03, 0x37, 0xae, 0xbf, 0x7e, 0x9b, 0x91, 0x57, + 0x20, 0xa5, 0x43, 0x51, 0xd4, 0x9a, 0xb8, 0xc2, 0x2f, 0xa3, 0x49, 0x98, 0xdc, 0xf5, + 0x83, 0xd4, 0x38, 0x73, 0x61, 0xef, 0x3f, 0xf8, 0x6f, 0x50, 0xec, 0x53, 0xf4, 0x92, + 0x49, 0xe4, 0xad, 0x34, 0x96, 0x03, 0x06, 0x6f, 0xc9, 0xc6, 0x61, 0xd6, 0x9f, 0x91, + 0x1d, 0xfa, 0x72, 0x41, 0xc8, 0xd5, 0x79, 0x2d, 0x43, 0xc4, 0x57, 0xd5, 0xde, 0x96, + 0x52, 0x3a, 0x53, 0xd6, 0x67, 0xec, 0x5c, 0x4e, 0xf9, 0xd5, 0x02, 0xa1, 0x6f, 0x15, + 0x22, 0x47, 0x58, 0x96, 0xd7, 0x9b, 0xc5, 0x78, 0x33, 0xe9, 0x77, 0x17, 0x1c, 0x32, + 0x4d, 0xce, 0x2a, 0x1e, 0xa1, 0xe4, 0x30, 0x4f, 0x49, 0xe4, 0x3a, 0xe0, 0x65, 0xe3, + 0xfb, 0x19, 0x6f, 0x76, 0xd9, 0xb8, 0x79, 0xc7, 0x20, 0x08, 0x62, 0xea, 0xd1, 0x8d, + 0xea, 0x5f, 0xb6, 0xa1, 0x7a, 0xce, 0xa3, 0x33, 0x86, 0xeb, 0x4c, 0xa1, 0xb5, 0x14, + 0x86, 0xa9, 0x14, 0x8f, 0xbd, 0xf9, 0xa9, 0x53, 0x32, 0xaa, 0x60, 0x5c, 0x5d, 0x54, + 0x83, 0xce, 0x4b, 0xa8, 0xec, 0xe0, 0x1a, 0x8f, 0xf2, 0xb7, 0xef, 0x82, 0xd0, 0x5c, + 0x0b, 0x6e, 0x86, 0x1b, 0x91, 0x5f, 0x13, 0xca, 0x0e, 0xb3, 0xea, 0x13, 0xd5, 0x07, + 0x08, 0x07, 0xa2, 0xcb, 0x66, 0x80, 0xa2, 0x49, 0xea, 0x9c, 0x72, 0x24, 0x39, 0x2c, + 0xbc, 0x8a, 0xb8, 0x25, 0x01, 0xb2, 0x6f, 0x11, 0x2a, 0xc7, 0x89, 0xa1, 0x2a, 0x31, + 0xad, 0x13, 0x14, 0xe2, 0xed, 0xe0, 0x8f, 0xad, 0x31, 0x43, 0xaf, 0x30, 0xc2, 0x7f, + 0x40, 0x3b, 0xc8, 0x66, 0xc7, 0x55, 0x17, 0x78, 0x52, 0xaf, 0xd0, 0xab, 0xb9, 0x0a, + 0xde, 0x1d, 0x68, 0x27, 0x26, 0xf4, 0x20, 0x08, 0xb4, 0x6a, 0xd7, 0xf8, 0xab, 0xdb, + 0x18, 0x11, 0x7f, 0x72, 0x64, 0x13, 0x90, 0xf0, 0x86, 0xb6, 0xe1, 0x49, 0x8b, 0xe6, + 0x95, 0x48, 0x52, 0x7e, 0x6a, 0xda, 0x2b, 0x38, 0xb9, 0xfe, 0x12, 0x1e, 0xf6, 0x70, + 0xaf, 0x74, 0x37, 0xd3, 0x25, 0x36, 0xd5, 0xcf, 0x5c, 0x4a, 0xb1, 0x9d, 0xd9, 0x97, + 0x71, 0x58, 0x2d, 0x03, 0x81, 0x04, 0xb7, 0xe0, 0x39, 0xa3, 0x76, 0xf7, 0xac, 0xbb, + 0xea, 0xdb, 0x34, 0xf9, 0x45, 0xbe, 0xb9, 0xd7, 0xca, 0x0e, 0x4e, 0x3d, 0x5c, 0x5e, + 0x4e, 0xb1, 0xd8, 0x52, 0x6e, 0xbd, 0x13, 0xda, 0xcb, 0x1b, 0xa3, 0x57, 0x35, 0xc6, + 0xd0, 0x4a, 0x45, 0x55, 0xac, 0xf4, 0xbf, 0x11, 0x76, 0x26, 0x50, 0x0d, 0x77, 0xb3, + 0x81, 0x89, 0xdd, 0x48, 0x88, 0x04, 0x12, 0x25, 0xac, 0xbe, 0x38, 0x74, 0xa4, 0xc0, + 0xf6, 0x07, 0xfe, 0x67, 0x45, 0xf9, 0x35, 0x5b, 0x3f, 0xa1, 0x88, 0xf1, 0xd6, 0x5c, + 0x09, 0xf3, 0x89, 0xaf, 0x1b, 0x9d, 0x62, 0x32, 0xaa, 0x79, 0x44, 0x79, 0x19, 0xc5, + 0x50, 0xf6, 0xf3, 0x1f, 0xec, 0x35, 0x48, 0x1c, 0xb9, 0x22, 0xde, 0x2d, 0xb5, 0xb4, + 0xda, 0x2f, 0x81, 0x94, 0x86, 0x17, 0x02, 0x8e, 0x32, 0x17, 0x06, 0xa3, 0xa7, 0x78, + 0xc1, 0x93, 0x8c, 0x44, 0x3b, 0xb0, 0x0e, 0x5b, 0x0f, 0xf0, 0x6a, 0xd8, 0xab, 0x9b, + 0x1a, 0xb0, 0xc1, 0x14, 0x77, 0x67, 0x3f, 0x85, 0xdf, 0x95, 0x61, 0xdb, 0xea, 0x45, + 0xd5, 0xf9, 0x78, 0x1e, 0xbe, 0x31, 0x7a, 0x07, 0x10, 0xae, 0x54, 0x61, 0xe3, 0x4f, + 0xe6, 0xf1, 0xb1, 0xaa, 0x9b, 0x4e, 0x67, 0xb1, 0x49, 0x10, 0x98, 0x48, 0x02, 0xc2, + 0xa7, 0xe3, 0x81, 0x93, 0xbc, 0x7b, 0xdc, 0x8b, 0xa3, 0xe4, 0xe3, 0xd1, 0xd9, 0x33, + 0xbf, 0xb5, 0x80, 0xf5, 0xb3, 0xe8, 0x7a, 0x2a, 0x06, 0x51, 0x70, 0x51, 0x41, 0x0f, + 0xe1, 0xb4, 0xff, 0x1e, 0xa0, 0xad, 0xe8, 0x24, 0xf3, 0x38, 0x51, 0x54, 0x56, 0xa5, + 0x7c, 0x7a, 0x91, 0x6a, 0x74, 0x38, 0x8e, 0xe8, 0xf1, 0x28, 0x1f, 0x9a, 0xde, 0x0a, + 0xe2, 0xa2, 0x61, 0x3a, 0x06, 0x12, 0xc4, 0x69, 0xdf, 0x79, 0x2b, 0x8d, 0xf4, 0xca, + 0xe4, 0xfc, 0x25, 0xc1, 0xca, 0xdb, 0xa9, 0x5a, 0x80, 0x7c, 0xe6, 0x1e, 0x5a, 0x53, + 0x03, 0xfa, 0xaf, 0x9e, 0x14, 0x65, 0x39, 0x96, 0xb5, 0xa8, 0xad, 0xc3, 0x4f, 0xd4, + 0x75, 0xef, 0x14, 0x99, 0x09, 0x4b, 0xab, 0xaf, 0x1f, 0x3f, 0x07, 0xda, 0x9a, 0x39, + 0x0b, 0x1d, 0x9f, 0xc9, 0xa0, 0x83, 0x27, 0x98, 0x7a, 0xdf, 0xe9, 0x56, 0x48, 0x63, + 0xfb, 0xdf, 0xa8, 0xf6, 0xb4, 0x6a, 0x88, 0x41, 0x58, 0x30, 0x99, 0xaf, 0xb7, 0x87, + 0x01, 0x18, 0xfa, 0xce, 0x76, 0x34, 0x7e, 0x40, 0xb6, 0xfd, 0x8c, 0xd1, 0x55, 0x82, + 0xae, 0x8e, 0x23, 0xbe, 0x9a, 0x02, 0x19, 0xbc, 0x3e, 0x4e, 0x45, 0x46, 0xa3, 0x0d, + 0x3b, 0xbb, 0xbd, 0x16, 0x86, 0x08, 0x68, 0x76, 0xbe, 0x0e, 0x4c, 0x85, 0x9b, 0xe7, + 0x1f, 0xb5, 0x8f, 0x4f, 0xab, 0x3d, 0x28, 0xc0, 0xb4, 0xf7, 0xe7, 0x5a, 0xd1, 0xed, + 0xb7, 0xf8, 0x89, 0x46, 0xfb, 0x40, 0xcf, 0xa5, 0x78, 0x6a, 0x0f, 0xcb, 0xa1, 0x30, + 0x3c, 0x83, 0x47, 0xec, 0xee, 0x93, 0xd4, 0x6d, 0x14, 0x0b, 0xb5, 0xf6, 0x95, 0x31, + 0xd6, 0x66, 0x54, 0x8b, 0x10, 0x9c, 0xe7, 0x64, 0xbe, 0xad, 0x7c, 0x87, 0xbd, 0x4c, + 0x87, 0x64, 0x94, 0xde, 0x82, 0xdb, 0x6e, 0x50, 0x73, 0xa6, 0xc9, 0x4f, 0x7c, 0x09, + 0x9a, 0x40, 0xd7, 0xa3, 0x1c, 0x4a, 0x04, 0xb6, 0x9c, 0x9f, 0xcc, 0xf3, 0xc7, 0xdd, + 0x56, 0xf5, 0x54, 0x47, 0x76, 0xc5, 0x3b, 0x4d, 0xf7, 0x95, 0x39, 0x81, 0xd5, 0x5a, + 0x96, 0xa6, 0xdc, 0xff, 0x99, 0x04, 0xa9, 0x08, 0x42, 0xe5, 0xba, 0xfe, 0xc8, 0x84, + 0x0c, 0x2d, 0x25, 0x5b, 0xf5, 0xad, 0x61, 0xc4, 0x60, 0xf9, 0x8f, 0xeb, 0x82, 0xa1, + 0x0f, 0xa1, 0xc0, 0x99, 0xf6, 0x27, 0x76, 0x79, 0x82, 0x36, 0xc5, 0xca, 0x7f, 0x1e, + 0x46, 0xeb, 0xdb, 0x2b, 0x14, 0x4d, 0x87, 0x13, 0xe5, 0x6c, 0x77, 0x2f, 0x2c, 0x3b, + 0x86, 0x0e, 0xa5, 0xb0, 0x3a, 0x88, 0x54, 0xbc, 0x6e, 0x65, 0x90, 0xd6, 0x3c + ], + }, + TestVector { + description: "Sapling transaction #1", + version: 4, + lock_time: 272867497, + expiry_height: 495961908, + txid: [ + 0x34, 0xc1, 0xc2, 0xed, 0x68, 0xed, 0xbd, 0xae, 0xd4, 0x00, 0x42, 0x59, 0xb5, 0xfd, + 0xec, 0x78, 0xa5, 0x3b, 0x2e, 0xd9, 0xc7, 0x18, 0xe7, 0xe0, 0xaf, 0x26, 0x93, 0xc6, + 0x21, 0x0e, 0x4e, 0xc5 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 2, + tx: vec![ + 0x04, 0x00, 0x00, 0x80, 0x85, 0x20, 0x2f, 0x89, 0x02, 0xea, 0x54, 0xf1, 0x0b, 0x73, + 0xba, 0x24, 0x1b, 0xf7, 0x4b, 0x63, 0x55, 0x51, 0xa2, 0xaa, 0xca, 0x96, 0x87, 0xac, + 0x52, 0x69, 0xfd, 0x36, 0x8b, 0x26, 0xd7, 0x0a, 0x73, 0x7f, 0x26, 0x76, 0x85, 0x99, + 0x8a, 0x3f, 0x7d, 0x08, 0x6a, 0x51, 0x51, 0x51, 0x6a, 0x65, 0x51, 0x63, 0x24, 0xc4, + 0x98, 0x63, 0x5e, 0xf9, 0x7a, 0xc6, 0x6a, 0x40, 0x08, 0x94, 0xc0, 0x9f, 0x73, 0x48, + 0x8e, 0xb7, 0xcf, 0x33, 0xf6, 0xda, 0xd1, 0x66, 0x6a, 0x05, 0xf9, 0x1a, 0xd7, 0x75, + 0x79, 0x65, 0xc2, 0x99, 0x36, 0xe7, 0xfa, 0x48, 0xd7, 0x7e, 0x01, 0x65, 0x09, 0x62, + 0xf5, 0x8c, 0x02, 0xff, 0x7a, 0x76, 0x29, 0x15, 0x69, 0x02, 0x00, 0x08, 0x65, 0x00, + 0x00, 0x52, 0x53, 0x65, 0x00, 0x00, 0x2a, 0xf0, 0x45, 0xe0, 0x89, 0xd2, 0x03, 0x00, + 0x09, 0x51, 0x52, 0x63, 0x51, 0xac, 0x63, 0x63, 0x65, 0x63, 0xa9, 0xa0, 0x43, 0x10, + 0x34, 0xc7, 0x8f, 0x1d, 0xc1, 0x29, 0xf0, 0xe6, 0x39, 0x3c, 0x06, 0x00, 0x00, 0x01, + 0xf6, 0xd5, 0x2c, 0xf5, 0x5f, 0x36, 0xe3, 0x73, 0x42, 0xd8, 0x3a, 0x91, 0x0f, 0x21, + 0xb4, 0x2e, 0x15, 0x3d, 0x92, 0x1a, 0x7a, 0x17, 0x00, 0x46, 0xcd, 0xae, 0x06, 0x50, + 0xbd, 0x02, 0x4b, 0xea, 0xa8, 0x38, 0x2c, 0x6b, 0x04, 0xe7, 0xd8, 0x08, 0x5f, 0x34, + 0x6e, 0xf8, 0x49, 0xfe, 0x03, 0xcd, 0x76, 0xaf, 0x00, 0xb3, 0xda, 0x30, 0xd0, 0x64, + 0x49, 0xe9, 0xef, 0x3f, 0x5d, 0x9d, 0xd0, 0x5a, 0xfc, 0x70, 0x82, 0xb1, 0xe8, 0x5b, + 0x5b, 0xa5, 0x46, 0x63, 0x11, 0x3e, 0x0c, 0xf2, 0x33, 0xf4, 0xe0, 0xb2, 0xbf, 0xa9, + 0x25, 0x5c, 0xe7, 0xe5, 0x09, 0x05, 0xfb, 0x89, 0xf2, 0xf9, 0xb8, 0x3f, 0x87, 0x63, + 0xfb, 0x96, 0xd7, 0xca, 0x33, 0x3a, 0x12, 0xde, 0x3c, 0xef, 0xa9, 0x1c, 0x6c, 0x98, + 0xf9, 0x47, 0x3b, 0x8e, 0x10, 0x4a, 0x71, 0x29, 0x3e, 0x46, 0x37, 0x47, 0x05, 0xba, + 0xf6, 0x5f, 0xa4, 0x13, 0x84, 0xba, 0x5c, 0x8e, 0x0c, 0x88, 0xa3, 0xeb, 0x07, 0xe0, + 0xbe, 0x34, 0xda, 0xdd, 0xfa, 0xbb, 0x7b, 0x65, 0x54, 0x3b, 0x5f, 0x39, 0xcb, 0x20, + 0x23, 0xd4, 0x67, 0x89, 0xeb, 0x7d, 0x98, 0x9a, 0xf7, 0x79, 0xe5, 0xb8, 0xd2, 0x83, + 0x85, 0xa8, 0x5b, 0x0d, 0xa2, 0xab, 0xe0, 0x7f, 0x0c, 0x2b, 0xb4, 0x25, 0x5f, 0xce, + 0xa0, 0x31, 0x88, 0x52, 0x7a, 0x30, 0x7d, 0x40, 0x91, 0x59, 0xe9, 0x01, 0x66, 0xfa, + 0xc6, 0xa0, 0x70, 0xba, 0x05, 0xb3, 0xe4, 0xdb, 0xfd, 0x3a, 0x2b, 0xfc, 0xc9, 0xee, + 0x6e, 0xd0, 0x16, 0xc0, 0xf6, 0x65, 0xbe, 0x81, 0x33, 0xb7, 0xdc, 0x1d, 0x86, 0x04, + 0x4d, 0xb0, 0xf9, 0xdb, 0x40, 0xfb, 0x0e, 0x9f, 0x8b, 0xc2, 0xe4, 0xdb, 0x53, 0x82, + 0xa8, 0xb4, 0xf8, 0x15, 0xb4, 0xe8, 0x43, 0x4a, 0xd0, 0xdf, 0xbc, 0x51, 0xa5, 0xe9, + 0xb1, 0x45, 0xe1, 0x59, 0x6c, 0xbf, 0x46, 0x70, 0xb7, 0xe0, 0x5d, 0xfd, 0xaf, 0xbb, + 0x0c, 0xf3, 0xdd, 0xee, 0x28, 0xd7, 0x6a, 0x82, 0x42, 0x8e, 0x8a, 0xba, 0x43, 0x64, + 0xe8, 0x4b, 0xac, 0x37, 0x92, 0x98, 0xdf, 0x29, 0x32, 0xe6, 0x9b, 0xb5, 0xd0, 0x45, + 0x51, 0x6e, 0xfc, 0x33, 0xae, 0x6c, 0xc3, 0x94, 0x7c, 0xeb, 0x09, 0xed, 0x37, 0x16, + 0x67, 0x21, 0x2a, 0x83, 0x1b, 0x54, 0x85, 0xea, 0xfc, 0xe8, 0x48, 0x81, 0x88, 0xea, + 0x4e, 0x27, 0xd0, 0xcd, 0xf7, 0xdd, 0xd3, 0x48, 0xab, 0xff, 0x77, 0x7f, 0x4a, 0x13, + 0xbb, 0xc7, 0x16, 0xb6, 0xa5, 0x94, 0x4e, 0xe7, 0x27, 0x96, 0x56, 0x90, 0xe2, 0x09, + 0xb4, 0x9e, 0xb9, 0x62, 0xc0, 0x39, 0x97, 0x5f, 0x93, 0x9e, 0xd5, 0xc6, 0xe4, 0xc4, + 0x00, 0xd8, 0x87, 0x75, 0x94, 0x33, 0xd3, 0xad, 0x71, 0x6d, 0xa0, 0xcb, 0x44, 0x61, + 0x13, 0xc7, 0x72, 0x7a, 0x64, 0xb5, 0x8c, 0x3f, 0x8a, 0x0f, 0x81, 0x18, 0x9f, 0x98, + 0x00, 0x52, 0x33, 0xa8, 0x13, 0x66, 0xae, 0xe7, 0x3c, 0xec, 0x85, 0x22, 0x8e, 0xbc, + 0xfd, 0x5e, 0xe3, 0xc3, 0xfb, 0x44, 0xdb, 0x76, 0xba, 0x24, 0x3f, 0x28, 0x42, 0xb7, + 0xb5, 0xfc, 0x74, 0x6a, 0xe5, 0x1b, 0x0b, 0xc4, 0xbd, 0x4f, 0xc9, 0xfd, 0x83, 0x35, + 0x65, 0xea, 0x85, 0x2b, 0x92, 0xb2, 0x24, 0xf6, 0x99, 0x03, 0x18, 0xad, 0x8c, 0x7d, + 0x94, 0x37, 0xe2, 0x0e, 0x2a, 0x1f, 0x20, 0xe8, 0x18, 0xf9, 0x05, 0x7c, 0x5a, 0xba, + 0xaa, 0x2e, 0x5c, 0x15, 0xb9, 0x49, 0x45, 0xcd, 0x42, 0x4c, 0x28, 0xa5, 0xfa, 0x38, + 0x5d, 0xad, 0xfe, 0x49, 0x07, 0xb2, 0x74, 0xd8, 0x42, 0x70, 0x7d, 0xb3, 0x69, 0x7a, + 0x5a, 0xe6, 0xc8, 0xf5, 0x42, 0xe5, 0xec, 0xc0, 0x7f, 0xe4, 0x73, 0x50, 0xd1, 0x01, + 0x46, 0x70, 0x21, 0x2e, 0xfe, 0x81, 0xfb, 0x7c, 0x73, 0xe8, 0x45, 0x0d, 0xf8, 0x14, + 0xef, 0x62, 0x32, 0xf7, 0x49, 0x0f, 0x63, 0xcc, 0xf0, 0x74, 0x80, 0xf8, 0x84, 0xa6, + 0x6e, 0xaf, 0xfc, 0x28, 0xfe, 0xa4, 0x48, 0xd7, 0xb4, 0x01, 0xcd, 0xae, 0x10, 0xe7, + 0xc0, 0xc7, 0xf9, 0xa7, 0xb1, 0x53, 0x31, 0x96, 0x9f, 0xc8, 0xcb, 0x36, 0x39, 0x67, + 0x73, 0xde, 0x19, 0x19, 0x31, 0xc7, 0x50, 0xf6, 0xce, 0x5c, 0xaa, 0xf2, 0x97, 0x68, + 0xeb, 0xb2, 0x7d, 0xac, 0xc7, 0x38, 0x05, 0x6a, 0x81, 0x25, 0xb4, 0x77, 0x2b, 0xf8, + 0x7a, 0xe1, 0x0a, 0x8a, 0x30, 0x9b, 0x9b, 0xd6, 0x55, 0x04, 0x3c, 0xfc, 0x31, 0x59, + 0x49, 0x43, 0x68, 0xc5, 0xab, 0x8c, 0xad, 0xb7, 0xf6, 0x71, 0xe9, 0x62, 0x6b, 0xd2, + 0x63, 0xe3, 0x11, 0x81, 0xa6, 0x04, 0xb5, 0x06, 0xa0, 0x3b, 0x43, 0x9a, 0x7f, 0xfe, + 0x43, 0x55, 0x89, 0x24, 0x77, 0xe2, 0xbd, 0xf3, 0x38, 0xc6, 0x2c, 0x39, 0x22, 0xf7, + 0xd3, 0xc9, 0xa5, 0x6c, 0x71, 0x03, 0xd9, 0x11, 0x94, 0x8a, 0x84, 0xb5, 0xae, 0x2d, + 0xbb, 0x16, 0xa3, 0x76, 0x1a, 0xdd, 0x05, 0x3a, 0x0f, 0x96, 0x7e, 0x6b, 0x5b, 0xc9, + 0x42, 0x11, 0xb6, 0x54, 0x71, 0x53, 0x26, 0x7c, 0x6e, 0xe1, 0xca, 0xd0, 0xd9, 0x74, + 0xa7, 0x10, 0x88, 0x58, 0x37, 0x35, 0xe4, 0xf6, 0x3d, 0x33, 0x15, 0x6d, 0xad, 0xd5, + 0x4c, 0x2f, 0xaf, 0x89, 0x11, 0x4a, 0x12, 0x7b, 0x97, 0xb9, 0x4c, 0xc2, 0xa2, 0x2e, + 0xf3, 0x03, 0xf4, 0x59, 0xd0, 0x4f, 0xc0, 0xb5, 0x3a, 0xce, 0x59, 0x18, 0xd4, 0x7f, + 0xf3, 0x3a, 0x55, 0x8b, 0xd7, 0x1a, 0x75, 0xf3, 0x55, 0xfb, 0xd0, 0x6b, 0xbc, 0xcf, + 0x4e, 0x02, 0xc3, 0xc0, 0xa4, 0xb6, 0x3d, 0x0c, 0xc9, 0x49, 0x80, 0x1d, 0x63, 0xa6, + 0x4c, 0xb2, 0xd3, 0x23, 0x73, 0xb2, 0xc7, 0xb2, 0x74, 0xab, 0x2d, 0xb4, 0x68, 0x21, + 0x42, 0xc8, 0xb2, 0x1d, 0x84, 0xc4, 0x81, 0xf5, 0xef, 0x21, 0xe4, 0xb5, 0xe3, 0x60, + 0x34, 0x51, 0xbf, 0x94, 0x77, 0x4d, 0x0e, 0xf4, 0x7f, 0x63, 0xfa, 0x6a, 0xbb, 0x78, + 0xd2, 0x1c, 0x19, 0x3c, 0xbe, 0x65, 0xb6, 0x95, 0xfe, 0x67, 0x42, 0x3c, 0x1e, 0x2d, + 0x31, 0x2e, 0x27, 0x76, 0xfa, 0x24, 0xec, 0xe8, 0x46, 0x83, 0xe7, 0x48, 0x76, 0xc5, + 0x5e, 0xa0, 0x36, 0x9e, 0x4e, 0xa0, 0xe8, 0x64, 0x94, 0xe0, 0x0d, 0xde, 0x23, 0x6a, + 0x16, 0x89, 0x73, 0x1f, 0x0a, 0x5d, 0x82, 0x03, 0xaf, 0xde, 0x5c, 0x42, 0x36, 0x40, + 0xb8, 0x1e, 0x4f, 0x63, 0x1c, 0x98, 0x1c, 0x11, 0xa2, 0xe1, 0xd1, 0x84, 0xc6, 0x7c, + 0x52, 0x8d, 0xf9, 0x2d, 0x53, 0xae, 0xc4, 0x4a, 0x40, 0xa4, 0xea, 0x2a, 0x13, 0x1b, + 0x47, 0x33, 0xcf, 0xe4, 0x5c, 0x6b, 0x00, 0x12, 0xc3, 0xe9, 0xe2, 0x09, 0x75, 0xba, + 0xae, 0xcb, 0x02, 0x32, 0xdf, 0x88, 0x0b, 0xd7, 0xd1, 0xde, 0x13, 0xe1, 0x34, 0x94, + 0x62, 0xec, 0x8d, 0x5d, 0xf3, 0xe7, 0x80, 0xff, 0xa7, 0x2e, 0xba, 0x8a, 0x8d, 0xf7, + 0xfc, 0xf3, 0x98, 0xec, 0x23, 0x05, 0x13, 0xca, 0x9d, 0x61, 0x23, 0xf8, 0xb9, 0xd8, + 0x17, 0x85, 0x60, 0xda, 0xf9, 0x75, 0x11, 0x19, 0x55, 0xa2, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, + 0x42, 0x3e, 0xee, 0xfc, 0x52, 0x7b, 0xe3, 0xa8, 0x54, 0x3e, 0xb9, 0x0a, 0x5e, 0xc0, + 0x2f, 0x35, 0xa7, 0xc6, 0x4b, 0x7d, 0xd5, 0x9a, 0x72, 0xda, 0x00, 0x74, 0x63, 0x4e, + 0x01, 0xd2, 0xab, 0xf3, 0x63, 0x7a, 0xdd, 0x77, 0xc7, 0x35, 0x0f, 0x12, 0xb0, 0x11, + 0xb2, 0x94, 0x16, 0x8e, 0xc7, 0x55, 0x76, 0xe4, 0x7d, 0x16, 0x9e, 0x39, 0x38, 0xbf, + 0x6a, 0xe2, 0xaa, 0x8f, 0xf7, 0xcf, 0xba, 0x7c, 0xac, 0xb1, 0xf9, 0x2b, 0x6e, 0x4c, + 0x24, 0x97, 0xbf, 0xfa, 0x9f, 0x17, 0xca, 0xd2, 0x42, 0xfa, 0x9c, 0x31, 0x79, 0xc1, + 0xa3, 0xaa, 0x81, 0xf7, 0x36, 0x16, 0x49, 0x57, 0x2c, 0x71, 0x5c, 0x25, 0xa1, 0xf6, + 0xcd, 0x5a, 0xce, 0x82, 0xc0, 0x0a, 0xb2, 0x34, 0x2b, 0x9c, 0x3c, 0xb4, 0xff, 0xfd, + 0xda, 0x16, 0x0c, 0xa5, 0xab, 0x9e, 0x9b, 0xaf, 0x21, 0x39, 0xef, 0x9a, 0xfb, 0xe1, + 0xb1, 0xf3, 0x09, 0x46, 0x2a, 0xfc, 0xe4, 0x62, 0xa7, 0x9b, 0xb9, 0x69, 0x8e, 0x22, + 0xc9, 0x57, 0xc5, 0x90, 0xa7, 0x53, 0xa7, 0x6b, 0x87, 0xe0, 0x09, 0x12, 0x1e, 0x06, + 0xf6, 0xa1, 0xbf, 0x62, 0xa0, 0x8b, 0xf4, 0x35, 0xd9, 0x2e, 0x2f, 0xff, 0xe8, 0x6e, + 0x2a, 0x9c, 0xbb, 0xa9, 0x13, 0x3a, 0x68, 0xe4, 0xae, 0xbf, 0x33, 0xc3, 0x84, 0x36, + 0xf2, 0x54, 0x5f, 0xc2, 0xd5, 0x28, 0x32, 0xd1, 0x65, 0xaf, 0x41, 0x5b, 0x24, 0x4a, + 0xdc, 0x5f, 0x57, 0x37, 0x7d, 0xee, 0xdf, 0x46, 0x0a, 0xa3, 0xbe, 0xb4, 0x34, 0x19, + 0xc6, 0xb0, 0x82, 0xe8, 0x35, 0xce, 0x84, 0xca, 0x13, 0xb6, 0x90, 0x8a, 0x88, 0x13, + 0xc0, 0x21, 0xde, 0x9f, 0xa9, 0xa4, 0x4e, 0x4c, 0x18, 0xdc, 0xb3, 0xd2, 0x1f, 0xaa, + 0xbd, 0xb4, 0x19, 0x31, 0xb2, 0xfd, 0x49, 0x76, 0x44, 0xdc, 0x3a, 0x15, 0x07, 0xfa, + 0x5a, 0xc7, 0xc7, 0x6b, 0xee, 0xbb, 0xdb, 0xd1, 0xd4, 0x92, 0x99, 0xa5, 0x5b, 0xd4, + 0x99, 0x27, 0xe9, 0xd7, 0xf4, 0x88, 0x4e, 0x6e, 0xd3, 0xfd, 0x5e, 0x4b, 0x7c, 0xb8, + 0x35, 0xb8, 0x33, 0x08, 0x96, 0x4e, 0x3c, 0x46, 0x87, 0x3f, 0xd6, 0x13, 0x31, 0x7b, + 0x91, 0xd2, 0x92, 0x36, 0xea, 0x90, 0xe3, 0x65, 0xd1, 0x62, 0xcc, 0x05, 0x1c, 0x84, + 0x6d, 0x24, 0x21, 0x76, 0xda, 0xf6, 0xd2, 0x86, 0x18, 0xae, 0x31, 0xfb, 0xaa, 0xe9, + 0x99, 0xa9, 0x3f, 0x17, 0x5c, 0x69, 0x38, 0xe6, 0x31, 0xa0, 0x81, 0xf2, 0xc1, 0xf3, + 0xfd, 0x78, 0x25, 0x49, 0xd3, 0xf3, 0x24, 0x57, 0x59, 0x60, 0x6d, 0x9f, 0x92, 0xd5, + 0x54, 0x8a, 0xcf, 0xea, 0xdb, 0xaf, 0x9c, 0xaa, 0x6b, 0x93, 0xdc, 0x08, 0x82, 0x8d, + 0x74, 0xf6, 0xd5, 0xfd, 0xd8, 0x33, 0x31, 0xf0, 0x96, 0x91, 0x45, 0x95, 0x52, 0x97, + 0xe6, 0x9f, 0x00, 0xfd, 0x29, 0x87, 0xf2, 0xda, 0x2b, 0x94, 0xb9, 0x95, 0xfe, 0xcb, + 0xe6, 0x22, 0xa7, 0x35, 0xef, 0x7f, 0x12, 0x07, 0xf6, 0x71, 0x62, 0x94, 0x89, 0x20, + 0x2b, 0xea, 0x0b, 0x47, 0x5e, 0x51, 0x68, 0x1a, 0xa1, 0x67, 0x78, 0xb3, 0x9b, 0xd9, + 0x23, 0xc9, 0x8d, 0xc6, 0xff, 0x83, 0x73, 0xc7, 0x9b, 0xb1, 0x70, 0x30, 0x41, 0x7b, + 0xc2, 0x00, 0xc8, 0xf0, 0xb8, 0x55, 0xac, 0xfe, 0xc1, 0x79, 0xf7, 0x67, 0x4c, 0xec, + 0x27, 0x21, 0xa1, 0x0f, 0xca, 0x69, 0x3d, 0x83, 0xcf, 0xe5, 0xb8, 0xcd, 0xcc, 0x18, + 0xf8, 0x1a, 0xd6, 0x17, 0xfa, 0x26, 0xf0, 0xdf, 0xb8, 0x36, 0x55, 0xb8, 0xa2, 0x9a, + 0x7f, 0x83, 0x42, 0x32, 0x42, 0x5e, 0x8c, 0x47, 0x45, 0x88, 0xf1, 0x8d, 0xd3, 0x26, + 0xaa, 0x39, 0x6c, 0x3e, 0x47, 0x75, 0xe0, 0x02, 0x05, 0xfc, 0x9e, 0x45, 0xf7, 0xb7, + 0xd2, 0xe6, 0xd5, 0x5d, 0xcb, 0x90, 0xe2, 0x3f, 0xf6, 0xb5, 0x08, 0x45, 0x9a, 0xa6, + 0x99, 0xbf, 0xcb, 0xd5, 0x6f, 0x10, 0x99, 0x77, 0x64, 0xd0, 0x87, 0x40, 0x89, 0x86, + 0xe7, 0x3d, 0x6e, 0x28, 0x4f, 0xea, 0x9a, 0x23, 0xc3, 0x93, 0x11, 0x78, 0x2f, 0x86, + 0xca, 0xbf, 0xf9, 0x45, 0x5e, 0x4c, 0xf6, 0x99, 0xe5, 0xf5, 0xd4, 0xbc, 0x0b, 0x39, + 0x05, 0xa4, 0xe3, 0xbd, 0x01, 0xc5, 0x4d, 0xf8, 0x64, 0x34, 0x43, 0xbe, 0x0f, 0x88, + 0x90, 0x32, 0xea, 0x32, 0x5b, 0xf0, 0x71, 0x07, 0xfd, 0x41, 0xd6, 0x73, 0xee, 0xba, + 0xe6, 0xfa, 0x63, 0x7b, 0x70, 0xcc, 0x0e, 0xd3, 0xf0, 0x09, 0x58, 0xdf, 0xb8, 0xdc, + 0xf0, 0x0e, 0x85, 0xa1, 0xd0, 0xa6, 0xa8, 0x90, 0x81, 0x40, 0xc2, 0xf4, 0x34, 0xc2, + 0xe2, 0x60, 0xef, 0xb0, 0xbc, 0xa2, 0x00, 0x35, 0x04, 0xc9, 0x99, 0x93, 0xa9, 0xe1, + 0xc0, 0xff, 0x9c, 0xef, 0xe6, 0xa6, 0x65, 0xd7, 0x91, 0x42, 0x86, 0x90, 0xe4, 0x7e, + 0xf8, 0xc1, 0x31, 0xa8, 0xe9, 0xbf, 0xb4, 0xc3, 0x08, 0x02, 0x35, 0x03, 0x2d, 0x73, + 0x1b, 0x0d, 0x38, 0x41, 0x22, 0x5f, 0x1c, 0x11, 0xe2, 0xc2, 0x8e, 0xe8, 0x4d, 0x35, + 0xf9, 0x22, 0x61, 0x00, 0x56, 0x59, 0x72, 0xeb, 0x26, 0x9d, 0x27, 0x8e, 0xf6, 0x49, + 0x79, 0xbf, 0x65, 0x15, 0xed, 0x4a, 0x68, 0x40, 0xb0, 0x88, 0x3a, 0x9e, 0x6e, 0xf6, + 0x4a, 0x0e, 0xfc, 0xae, 0x1c, 0xf2, 0x1d, 0xfe, 0x74, 0x85, 0x4e, 0x84, 0xc2, 0x74, + 0x9f, 0xac, 0x03, 0x82, 0x52, 0x75, 0xc9, 0xb6, 0x30, 0x21, 0x84, 0xc7, 0x2d, 0xf4, + 0xc4, 0xbb, 0x28, 0x62, 0xe4, 0xe8, 0xa7, 0xd9, 0xa4, 0xa2, 0x82, 0x86, 0x6f, 0x9a, + 0x7b, 0x2c, 0xfc, 0x9a, 0x56, 0x31, 0x3d, 0xa0, 0xc4, 0x7a, 0x34, 0xb7, 0xb9, 0xcd, + 0xa3, 0xac, 0xe8, 0x18, 0x5f, 0x07, 0xdf, 0x36, 0xe4, 0x48, 0xa7, 0x6a, 0xa4, 0x77, + 0xf2, 0x24, 0xd8, 0x7a, 0x07, 0x4f, 0x43, 0xaf, 0x5d, 0x5f, 0x79, 0xb3, 0xab, 0x11, + 0x28, 0xf0, 0x81, 0x91, 0x44, 0x7f, 0xa6, 0x46, 0xbf, 0xdd, 0xe5, 0xb5, 0x1e, 0x23, + 0x3c, 0xa6, 0x15, 0x5d, 0x10, 0x15, 0x85, 0xbc, 0x2c, 0x40, 0x15, 0x8a, 0xc2, 0x10, + 0x6e, 0x66, 0xa2, 0x6e, 0x46, 0x42, 0x33, 0x70, 0x63, 0x68, 0x76, 0xb4, 0x34, 0xa7, + 0x4f, 0x8c, 0xe8, 0x06, 0x00, 0x50, 0xb0, 0x82, 0xa7, 0x9b, 0x61, 0xbb, 0x5d, 0x34, + 0x4e, 0xb5, 0xa1, 0x15, 0x83, 0x26, 0xce, 0xd9, 0xa9, 0xd9, 0xf5, 0x4f, 0xb2, 0xfe, + 0x8f, 0x9f, 0x05, 0xcd, 0x11, 0x1e, 0xe4, 0x6c, 0x47, 0x10, 0xf6, 0xf6, 0x3a, 0x62, + 0x69, 0x45, 0x57, 0xef, 0x1b, 0x12, 0xc8, 0x80, 0x06, 0xb6, 0x78, 0x72, 0x50, 0x5f, + 0x4e, 0x88, 0x3b, 0x58, 0x59, 0x07, 0x92, 0x9a, 0x2f, 0x3f, 0xdb, 0x0d, 0x8f, 0x79, + 0x14, 0xc4, 0x2d, 0xde, 0x2d, 0x20, 0x00, 0xf5, 0xae, 0x02, 0xd4, 0x18, 0x21, 0xc8, + 0xe1, 0xee, 0x01, 0x38, 0xeb, 0xcb, 0x72, 0x8d, 0x7c, 0x6c, 0x3c, 0x80, 0x02, 0x7e, + 0x43, 0x75, 0x94, 0xc6, 0x70, 0xfd, 0x6f, 0x39, 0x08, 0x22, 0x2e, 0xe7, 0xa1, 0xb9, + 0x17, 0xf8, 0x27, 0x1a, 0xbe, 0x66, 0x0e, 0x39, 0xe0, 0x51, 0xaa, 0xa6, 0xfc, 0xa1, + 0x86, 0x22, 0x76, 0xe2, 0xba, 0xa0, 0xfe, 0x0b, 0x16, 0x2a, 0xeb, 0xcf, 0xe3, 0xd9, + 0x34, 0x9c, 0x8d, 0x15, 0x4b, 0xb7, 0xee, 0x28, 0x21, 0x2c, 0x1b, 0xaa, 0x70, 0x5d, + 0x82, 0x07, 0x0d, 0x70, 0x32, 0xf2, 0x69, 0x5d, 0x17, 0x96, 0x80, 0x9f, 0xab, 0x41, + 0x24, 0x69, 0x26, 0xaf, 0x99, 0x2b, 0x6e, 0xee, 0x95, 0xa9, 0xa0, 0x6b, 0xc4, 0x56, + 0x2c, 0x5f, 0x2f, 0x1b, 0x19, 0x54, 0x95, 0x00, 0x37, 0x2e, 0x7a, 0xd5, 0x79, 0xa6, + 0xd6, 0xd7, 0x8b, 0x33, 0x15, 0x31, 0x30, 0xfb, 0x44, 0x8f, 0xb7, 0x9e, 0x8a, 0x66, + 0x9d, 0xb8, 0xa0, 0xf3, 0x5c, 0xdf, 0x9a, 0xe5, 0xd3, 0x2d, 0x73, 0x2f, 0xc7, 0x94, + 0x18, 0xe2, 0x3b, 0x45, 0x1d, 0xdc, 0x95, 0xa2, 0x2a, 0xba, 0xbb, 0x05, 0x6e, 0xc6, + 0xb5, 0xe8, 0xba, 0x4f, 0x52, 0x4d, 0xfa, 0xfe, 0x87, 0x52, 0x62, 0xdd, 0x7b, 0xe4, + 0x1c, 0xbb, 0xc6, 0x24, 0x20, 0xd4, 0xad, 0x6d, 0xf5, 0xc9, 0xb7, 0x13, 0x60, 0x4f, + 0x65, 0x60, 0x88, 0xa4, 0x48, 0x5e, 0x93, 0xbe, 0x19, 0x07, 0xd2, 0x7a, 0xc6, 0xec, + 0x3c, 0x57, 0x25, 0x9b, 0xd6, 0x98, 0x1d, 0x42, 0xc1, 0xb7, 0x8a, 0x29, 0xad, 0x96, + 0x85, 0xe6, 0x3c, 0x49, 0x4d, 0x41, 0x29, 0x62, 0x3e, 0xa1, 0xa7, 0xff, 0xec, 0x85, + 0xfa, 0x29, 0x41, 0x10, 0x73, 0xed, 0xb2, 0x97, 0x8e, 0xf4, 0xe4, 0x69, 0xdd, 0xd5, + 0xcd, 0xa9, 0x86, 0x18, 0x99, 0x95, 0xf8, 0x8d, 0x6a, 0xb3, 0x66, 0xdb, 0x01, 0x90, + 0x01, 0xf5, 0xb2, 0x52, 0x88, 0xcf, 0x86, 0x0f, 0xd9, 0x98, 0xee, 0x57, 0x3c, 0x8c, + 0xc4, 0x8a, 0xa9, 0xef, 0xcf, 0x9b, 0x61, 0x7e, 0x04, 0x3c, 0x32, 0x9c, 0xd1, 0xaa, + 0x1a, 0x0e, 0xd3, 0xa4, 0x02, 0xfb, 0x96, 0xe3, 0x36, 0xc7, 0x19, 0xe6, 0x25, 0x3c, + 0xb6, 0x91, 0xaa, 0x0d, 0xb5, 0x27, 0x36, 0x62, 0x6e, 0xd1, 0x97, 0x88, 0x75, 0x88, + 0x8e, 0xc7, 0x6c, 0x84, 0x6b, 0xc2, 0x27, 0x27, 0x2a, 0x58, 0x53, 0x17, 0xdf, 0xf0, + 0xb1, 0x14, 0x8d, 0x92, 0xd6, 0xf5, 0xfb, 0x7d, 0x95, 0x33, 0x67, 0x70, 0xa7, 0xd1, + 0x6f, 0xac, 0x1a, 0xdd, 0x86, 0x07, 0x76, 0xcb, 0x48, 0x02, 0x21, 0xf8, 0xfb, 0x33, + 0xd7, 0xe4, 0xe9, 0xb0, 0x79, 0x02, 0xd2, 0xff, 0x86, 0xfd, 0xac, 0x72, 0x09, 0x62, + 0x34, 0xae, 0xd4, 0x8d, 0xe8, 0x92, 0xff, 0x73, 0x55, 0x07, 0x3b, 0xbf, 0x06, 0x15, + 0xf6, 0x7b, 0x11, 0x00, 0xcc, 0x2e, 0xa3, 0xba, 0x3d, 0x6c, 0x1a, 0x1a, 0x90, 0x87, + 0xb1, 0x19, 0xba, 0xee, 0xbf, 0xa6, 0x2b, 0xc9, 0xf0, 0xec, 0x47, 0x9d, 0x99, 0xc1, + 0xa3, 0xb1, 0x58, 0xb5, 0x14, 0xd1, 0x62, 0x9d, 0xb3, 0x99, 0x3f, 0x11, 0x67, 0x2a, + 0x26, 0x70, 0x8e, 0x5a, 0xd8, 0x16, 0xb5, 0x47, 0xab, 0x7e, 0x82, 0x7d, 0x07, 0x1b, + 0xa7, 0x84, 0x2b, 0x3e, 0x90, 0x30, 0x53, 0x83, 0x89, 0x6e, 0xc4, 0x90, 0x5f, 0x70, + 0xc7, 0x8b, 0x69, 0x4e, 0x6a, 0x5a, 0x3e, 0x43, 0x12, 0xcd, 0x82, 0x08, 0x13, 0x2b, + 0x84, 0x0f, 0x05, 0xc7, 0x14, 0x52, 0x3c, 0xa8, 0x19, 0x72, 0x0a, 0xe2, 0x27, 0xfd, + 0x1a, 0xcb, 0xa7, 0x14, 0xfa, 0x4f, 0xc4, 0x5f, 0xc5, 0x39, 0x88, 0x57, 0xb4, 0x0d, + 0xc1, 0x48, 0x79, 0x85, 0x6f, 0x35, 0x4b, 0xa4, 0xd2, 0x58, 0x1d, 0x0c, 0xda, 0x54, + 0xb6, 0x38, 0xba, 0x9d, 0x76, 0xf9, 0xb5, 0x2d, 0x17, 0xc8, 0xf8, 0x8e, 0xe6, 0x3f, + 0x58, 0x45, 0xb5, 0xdc, 0xef, 0xa4, 0xc3, 0x47, 0x9b, 0xce, 0x9a, 0xca, 0xd1, 0x8b, + 0x4a, 0xea, 0xe0, 0x3c, 0x0e, 0xae, 0x22, 0x5d, 0x42, 0x84, 0x8b, 0xde, 0xaa, 0x53, + 0x6d, 0x7d, 0x8d, 0xd3, 0xbc, 0x97, 0x9f, 0x06, 0x58, 0x66, 0x73, 0xbc, 0x6f, 0xf1, + 0xc5, 0xd3, 0xb3, 0x20, 0xf3, 0x49, 0xa5, 0xb3, 0xa8, 0xb3, 0x55, 0x59, 0x22, 0x96, + 0xaa, 0xf6, 0x1c, 0x5b, 0x72, 0x52, 0xf7, 0x3e, 0xc0, 0xa9, 0x46, 0x6a, 0x1b, 0x85, + 0x76, 0x4f, 0xb0, 0x83, 0x1b, 0x4a, 0x1a, 0x36, 0x89, 0x0e, 0x22, 0x4c, 0x01, 0xac, + 0xfc, 0xe4, 0x8e, 0xe3, 0xed, 0x93, 0x87, 0x73, 0x98, 0xe0, 0x72, 0x6d, 0x02, 0x93, + 0x6d, 0x0d, 0x03, 0x2e, 0x18, 0xe3, 0x28, 0x8b, 0x26, 0x70, 0xe1, 0x36, 0x2c, 0x32, + 0xd6, 0xe4, 0x73, 0x3b, 0x9d, 0xd2, 0xd5, 0xf2, 0x6e, 0x1f, 0xe3, 0x06, 0xf7, 0x3c, + 0x00, 0x7f, 0xdd, 0xca, 0xe9, 0xd9, 0xc0, 0xaa, 0xf1, 0x87, 0xd7, 0x42, 0x8b, 0x1e, + 0x9d, 0x47, 0x9c, 0x18, 0x23, 0x7b, 0x98, 0x28, 0xbc, 0xa8, 0xb9, 0x8c, 0x9d, 0x9b, + 0xec, 0x7d, 0x82, 0x70, 0xb5, 0xd8, 0xee, 0xc3, 0xcc, 0x4f, 0x43, 0xfa, 0x01, 0x88, + 0x52, 0x1b, 0xc6, 0x1b, 0x21, 0xdd, 0x04, 0xe3, 0x7a, 0x83, 0xec, 0xe6, 0x8c, 0xa7, + 0xa2, 0xfa, 0x6c, 0x8f, 0x9e, 0x34, 0xa6, 0x29, 0x03, 0x35, 0xaa, 0x1f, 0xbd, 0x83, + 0xd5, 0x4a, 0xaf, 0x44, 0x1e, 0x31, 0x9e, 0xa4, 0x7a, 0x86, 0x2a, 0xd0, 0x29, 0x3c, + 0xed, 0xf5, 0xdd, 0x9e, 0xda, 0xde, 0xee, 0x33, 0xcb, 0x52, 0x2c, 0xd0, 0x11, 0x8b, + 0xbd, 0x81, 0x1a, 0xce, 0x9a, 0x23, 0xbd, 0xa3, 0x9a, 0xba, 0x72, 0xf1, 0x56, 0x6f, + 0xc1, 0x68, 0x84, 0x97, 0xd2, 0xa7, 0x92, 0x8c, 0x36, 0x70, 0x15, 0x25, 0x67, 0x8b, + 0xc9, 0x72, 0x14, 0xb3, 0x1b, 0x37, 0xba, 0xb4, 0x6b, 0x88, 0xf2, 0x7f, 0x04, 0x48, + 0xde, 0xcb, 0x31, 0x62, 0x2d, 0x0f, 0x0f, 0x87, 0xa8, 0x55, 0xba, 0x54, 0x00, 0x03, + 0x32, 0x03, 0x1f, 0x73, 0xab, 0xff, 0xd4, 0x65, 0x91, 0xda, 0x0b, 0x88, 0x72, 0x35, + 0x04, 0xed, 0xb2, 0x33, 0x72, 0x30, 0xda, 0xd2, 0xac, 0xc0, 0xd8, 0xbb, 0x68, 0xbc, + 0x83, 0x7a, 0x2f, 0xf9, 0x30, 0xbf, 0xf0, 0x6f, 0xde, 0x74, 0xeb, 0x90, 0xaa, 0xe4, + 0xf6, 0x0d, 0xbb, 0x6e, 0xb8, 0x27, 0xea, 0x99, 0x88, 0x4a, 0xcd, 0x62, 0x85, 0xa9, + 0x88, 0x92, 0x80, 0x2c, 0xf5, 0x9d, 0x5d, 0x60, 0xd0, 0x16, 0x63, 0x38, 0x7b, 0x3e, + 0xd2, 0x72, 0x3b, 0xd6, 0x48, 0x9e, 0x9c, 0x2c, 0x10, 0x6d, 0x4a, 0xa2, 0xde, 0x23, + 0xce, 0xd1, 0x6c, 0x72, 0x04, 0x29, 0xc7, 0x75, 0x3a, 0x77, 0x38, 0xec, 0x7d, 0x9d, + 0xb8, 0x62, 0x42, 0x29, 0xed, 0xd2, 0x17, 0xb8, 0x0d, 0x74, 0x87, 0x5a, 0x14, 0xca, + 0xe4, 0x86, 0x3f, 0x13, 0x9e, 0x9c, 0x0b, 0x13, 0x1b, 0x2a, 0x4c, 0x28, 0x07, 0x1a, + 0x38, 0xec, 0x61, 0xf6, 0x68, 0x01, 0xaa + ], + }, + TestVector { + description: "Sapling transaction #2", + version: 4, + lock_time: 2549647152, + expiry_height: 466360441, + txid: [ + 0x5f, 0xf8, 0x4f, 0x11, 0x29, 0xd6, 0x5f, 0x85, 0xd6, 0xc3, 0x88, 0x98, 0x86, 0xa2, + 0x4f, 0x23, 0xfd, 0x35, 0x7e, 0xd8, 0xae, 0x4f, 0x7c, 0x66, 0xd4, 0xf3, 0x2b, 0xe0, + 0x93, 0x70, 0xe9, 0x89 + ], + is_coinbase: 0, + has_sapling: 1, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 2, + tx: vec![ + 0x04, 0x00, 0x00, 0x80, 0x85, 0x20, 0x2f, 0x89, 0x02, 0x56, 0xfc, 0xb2, 0xa4, 0x6b, + 0x95, 0x87, 0x66, 0x5b, 0x75, 0x71, 0xaa, 0x03, 0x48, 0x1f, 0xd8, 0xd9, 0xd5, 0x69, + 0x8f, 0x83, 0x6f, 0xc8, 0x63, 0x5e, 0x69, 0xe3, 0xbd, 0xe4, 0x2f, 0x4a, 0xc0, 0x71, + 0x32, 0x8b, 0x54, 0x09, 0x65, 0xac, 0x63, 0x51, 0x52, 0x63, 0x6a, 0x53, 0x51, 0xa2, + 0x35, 0x47, 0x23, 0xb3, 0xb8, 0x19, 0xd0, 0x63, 0x7a, 0x6f, 0xa4, 0x66, 0x39, 0x46, + 0xa3, 0x0a, 0xc5, 0xaf, 0xdd, 0x30, 0xce, 0x83, 0x0f, 0x67, 0x91, 0xb4, 0x57, 0x52, + 0x70, 0xa1, 0x72, 0x0f, 0x91, 0x86, 0x6e, 0x2b, 0x86, 0xf4, 0x78, 0x00, 0x94, 0xc8, + 0xda, 0x62, 0x02, 0x67, 0x86, 0x1c, 0xc7, 0x46, 0xfb, 0x04, 0x00, 0x08, 0x65, 0x63, + 0x63, 0x65, 0x6a, 0x53, 0x6a, 0x65, 0x89, 0x7a, 0x2a, 0xcd, 0x5a, 0x52, 0x01, 0x00, + 0x06, 0x65, 0x00, 0x00, 0x00, 0x52, 0x6a, 0x30, 0x87, 0xf8, 0x97, 0x79, 0x18, 0xcc, + 0x1b, 0x9a, 0xea, 0xdb, 0x78, 0xcd, 0xf1, 0x01, 0x00, 0x01, 0xae, 0x97, 0xb0, 0x82, + 0x60, 0xe6, 0x40, 0x3a, 0xd9, 0x4e, 0x9f, 0xb2, 0x81, 0x83, 0x32, 0x0f, 0x80, 0xd4, + 0xeb, 0x5a, 0x1b, 0xd2, 0x98, 0xf2, 0x01, 0x79, 0xcb, 0xa3, 0x33, 0x13, 0xfd, 0x2f, + 0xf0, 0xfc, 0xfc, 0x18, 0x1f, 0x31, 0x80, 0x1a, 0x79, 0x92, 0xd2, 0xf1, 0x6b, 0xe0, + 0x21, 0x1b, 0x4a, 0x22, 0xf6, 0x2a, 0xab, 0x64, 0x70, 0x1b, 0xf4, 0xa4, 0xe6, 0xd6, + 0x66, 0xfc, 0x30, 0x4a, 0x5c, 0x79, 0xc6, 0x09, 0xac, 0xc4, 0x3b, 0x00, 0xb4, 0x86, + 0x48, 0x93, 0xd3, 0x7d, 0x50, 0x07, 0xf0, 0xc3, 0x29, 0xa4, 0x75, 0x50, 0x52, 0x57, + 0x75, 0x70, 0xdd, 0x38, 0xfa, 0xc0, 0x43, 0xcd, 0x91, 0xc1, 0x2e, 0xe3, 0x4e, 0x9c, + 0xfa, 0xe3, 0x92, 0xa7, 0x8b, 0xda, 0xbd, 0x4e, 0xe3, 0x1d, 0xc0, 0xde, 0xb0, 0x2f, + 0xe7, 0xb1, 0xd8, 0xb0, 0x17, 0x8a, 0xc9, 0x51, 0x31, 0x05, 0xfc, 0xc7, 0xe3, 0x0b, + 0xa8, 0xe0, 0x16, 0xaa, 0x36, 0xa6, 0xb5, 0xdf, 0x5e, 0x5a, 0x19, 0x09, 0xf6, 0x3a, + 0xba, 0x09, 0x5d, 0x98, 0x77, 0xa8, 0xf2, 0xdc, 0x53, 0xf4, 0x6f, 0x6c, 0x9b, 0x07, + 0xad, 0xdf, 0x14, 0x6f, 0x4f, 0xfa, 0x50, 0x1f, 0x9d, 0xd3, 0xcf, 0xf9, 0x24, 0xe3, + 0x01, 0x0f, 0xaf, 0x50, 0x4e, 0x2b, 0x8a, 0xca, 0x73, 0x57, 0xac, 0xbf, 0xfe, 0xc7, + 0x3a, 0xc3, 0x4c, 0x1a, 0x73, 0x16, 0x0f, 0x2c, 0xea, 0x1e, 0x05, 0x10, 0xf8, 0x4d, + 0x2f, 0xe2, 0xf7, 0x3b, 0x6e, 0x92, 0x19, 0x07, 0xa1, 0xb7, 0xb3, 0x75, 0x12, 0x13, + 0x24, 0x1b, 0x2c, 0xfa, 0xa5, 0x5a, 0x5e, 0xa4, 0xdd, 0x51, 0x7e, 0x7b, 0x49, 0xd2, + 0xde, 0x8c, 0x09, 0x08, 0x43, 0x73, 0x0d, 0x24, 0x08, 0xa2, 0xa3, 0x04, 0xaa, 0x1e, + 0x2e, 0x13, 0x70, 0xa6, 0xbf, 0x6c, 0x2b, 0xc7, 0x3f, 0xf0, 0x0d, 0x89, 0x3b, 0xc1, + 0x28, 0x5e, 0xfc, 0xa8, 0x25, 0x99, 0xd1, 0x81, 0xf1, 0x23, 0x51, 0xf9, 0x39, 0xa9, + 0x4e, 0xa8, 0xb9, 0x75, 0xc0, 0x65, 0xa9, 0x1f, 0xf2, 0x57, 0xca, 0xc7, 0xa9, 0x23, + 0x85, 0xfc, 0x8f, 0xa9, 0x21, 0xb1, 0x06, 0xba, 0x86, 0x60, 0xc6, 0x0a, 0xc8, 0xba, + 0x5e, 0xce, 0x45, 0x60, 0x6f, 0x04, 0xf3, 0x6a, 0x3a, 0x90, 0xbb, 0x38, 0x38, 0xc4, + 0x2a, 0xbf, 0x62, 0xdd, 0x2d, 0x84, 0xba, 0xbe, 0xf3, 0xe1, 0x88, 0xe9, 0x17, 0x1a, + 0xff, 0x9b, 0xc1, 0x16, 0x66, 0x90, 0x09, 0xd8, 0x87, 0x13, 0x0a, 0xc9, 0xf7, 0x39, + 0x6a, 0x62, 0x7a, 0x84, 0x74, 0xc1, 0x81, 0x1b, 0x69, 0x6f, 0x99, 0x55, 0x2b, 0x14, + 0xc4, 0x84, 0xdf, 0xe4, 0x2c, 0x24, 0xd5, 0x7c, 0x3a, 0x9c, 0x3f, 0xea, 0x13, 0x76, + 0xcd, 0xcb, 0x63, 0x42, 0x1c, 0x31, 0x4a, 0x62, 0x2a, 0x9a, 0xef, 0x0b, 0xc0, 0x57, + 0xcb, 0x11, 0x02, 0x64, 0x42, 0xb7, 0xeb, 0x8a, 0x0a, 0x7c, 0xa9, 0xfc, 0x01, 0x98, + 0x86, 0x7d, 0x7b, 0xf0, 0x35, 0xe7, 0xff, 0x2f, 0xba, 0x6d, 0x7a, 0x91, 0x3a, 0x03, + 0xef, 0x46, 0xf4, 0x95, 0x9d, 0x29, 0x0b, 0xb1, 0x51, 0x3a, 0xb8, 0x1d, 0x3f, 0x0a, + 0x3c, 0x7f, 0x7f, 0xcf, 0x2f, 0xbb, 0x4e, 0x26, 0x32, 0x19, 0x93, 0xa5, 0x13, 0xad, + 0x3d, 0x7f, 0x4a, 0xfe, 0x6c, 0x1b, 0xbd, 0xc6, 0x57, 0x58, 0x50, 0x4b, 0x31, 0xe0, + 0xda, 0x32, 0xfe, 0x32, 0x4c, 0x6c, 0x0f, 0x12, 0x4f, 0xa3, 0xc6, 0xc5, 0x03, 0x90, + 0xa6, 0x9c, 0xd6, 0xbb, 0x16, 0xb7, 0xf8, 0x15, 0x67, 0xfd, 0x4c, 0x1c, 0xe7, 0xbf, + 0x2c, 0x7c, 0x24, 0x2c, 0xfd, 0x36, 0xce, 0x68, 0x5a, 0x4b, 0x65, 0x69, 0x86, 0xc3, + 0x9f, 0xd7, 0xfc, 0xb2, 0x3c, 0x91, 0x91, 0x3e, 0x46, 0x11, 0x19, 0x1e, 0xdc, 0xc8, + 0x8b, 0x78, 0xf1, 0x45, 0xea, 0x29, 0xd2, 0x71, 0xb9, 0x40, 0xc6, 0x99, 0x41, 0xe4, + 0xc3, 0xfd, 0x2d, 0x71, 0xf3, 0xb1, 0x90, 0x69, 0x0e, 0xe1, 0x6f, 0x5d, 0x14, 0xac, + 0x22, 0x24, 0xe6, 0xfc, 0x89, 0x59, 0x76, 0x54, 0x52, 0x7d, 0xab, 0xe7, 0x2e, 0x75, + 0xd2, 0xd2, 0xa1, 0x3a, 0x9f, 0xba, 0xa6, 0x37, 0x8e, 0x8a, 0x26, 0x43, 0x21, 0x08, + 0x7a, 0x19, 0x00, 0xef, 0xe3, 0xca, 0xd1, 0x4a, 0x57, 0x96, 0x86, 0xaa, 0x36, 0x36, + 0xbd, 0x37, 0x5b, 0xd3, 0x13, 0x6b, 0xee, 0x0b, 0xda, 0xab, 0xcf, 0xac, 0x88, 0x1b, + 0xc7, 0x01, 0x81, 0x27, 0x21, 0xe6, 0xfb, 0x75, 0xaa, 0x07, 0x2d, 0x2d, 0x18, 0x7e, + 0x62, 0x25, 0x8d, 0x65, 0xa1, 0x92, 0x15, 0x7c, 0xdf, 0x2e, 0xc3, 0x21, 0x40, 0x7f, + 0x68, 0x2f, 0x5e, 0xec, 0x6a, 0x32, 0x97, 0xab, 0x20, 0xb7, 0x06, 0x1c, 0x62, 0x24, + 0x57, 0x16, 0xa4, 0x4f, 0x71, 0xfb, 0xfc, 0x34, 0xc7, 0x9b, 0x44, 0xe0, 0x9e, 0x42, + 0x12, 0xac, 0x26, 0x53, 0xf6, 0xc4, 0x03, 0x64, 0x3e, 0x1c, 0x5b, 0x9a, 0xd1, 0x34, + 0xd8, 0x9c, 0x68, 0x0b, 0x70, 0x72, 0x83, 0xaf, 0x54, 0x32, 0x6f, 0xc4, 0xf8, 0x4d, + 0x6a, 0x58, 0x29, 0xa0, 0xad, 0x48, 0x30, 0x80, 0x6c, 0x05, 0x75, 0x84, 0x92, 0xcd, + 0x6a, 0xc4, 0x6b, 0xa0, 0x1a, 0x2b, 0x37, 0x22, 0xb5, 0xe4, 0xcd, 0xaf, 0xbb, 0x3f, + 0x36, 0x78, 0x5f, 0x42, 0x4a, 0xf0, 0x44, 0xda, 0xc5, 0xdb, 0x5f, 0x7d, 0xf8, 0x39, + 0xeb, 0x63, 0xc0, 0xc1, 0x7d, 0x8b, 0x0c, 0x79, 0xdb, 0x86, 0x30, 0x94, 0x20, 0x15, + 0xbe, 0x13, 0xf7, 0x9a, 0xf6, 0xf4, 0x3e, 0x5a, 0xb0, 0x77, 0x81, 0x14, 0x79, 0x8f, + 0x44, 0x22, 0x58, 0xee, 0xdc, 0x43, 0x6f, 0xcc, 0x38, 0x6b, 0x36, 0xb5, 0x7e, 0x19, + 0x17, 0xd7, 0x20, 0x17, 0x73, 0x66, 0xf4, 0x24, 0xb0, 0xa5, 0x4b, 0x0b, 0x60, 0xf4, + 0xfb, 0x13, 0x58, 0xc2, 0x0a, 0xa4, 0x1d, 0xc5, 0x02, 0xe1, 0xdd, 0x8a, 0x16, 0x33, + 0xf3, 0xd8, 0xe3, 0x27, 0x6b, 0x59, 0xe7, 0xd2, 0xc4, 0xe6, 0x24, 0xa6, 0xf5, 0x36, + 0x95, 0xbc, 0xaf, 0x24, 0x7e, 0x36, 0x48, 0x3f, 0x13, 0xb2, 0x04, 0x42, 0x22, 0x37, + 0xfc, 0x6a, 0xb3, 0xeb, 0xa0, 0x2f, 0xc4, 0x14, 0x2b, 0x42, 0x97, 0xeb, 0xb5, 0x68, + 0x3d, 0xb8, 0xd2, 0x43, 0x19, 0x70, 0x6a, 0xd2, 0x6a, 0xaf, 0xd8, 0x1c, 0x53, 0xb7, + 0x40, 0xf3, 0x45, 0x43, 0xa6, 0xb3, 0xe9, 0xf5, 0xbb, 0x7d, 0x5c, 0x49, 0xe8, 0xc3, + 0x7f, 0x61, 0x49, 0x21, 0x25, 0x4f, 0x32, 0x12, 0x39, 0x4c, 0x79, 0x7d, 0x1c, 0xee, + 0x78, 0x99, 0xb7, 0xb4, 0xb6, 0x5b, 0x59, 0xb7, 0x34, 0x2f, 0x92, 0x53, 0x1c, 0x1d, + 0x59, 0xe1, 0x79, 0x70, 0xb7, 0x31, 0x74, 0x14, 0x43, 0x8c, 0xd8, 0x0b, 0xd0, 0xf9, + 0xa6, 0x7c, 0x9b, 0x9e, 0x55, 0x2f, 0x01, 0x3c, 0x11, 0x5a, 0x95, 0x4f, 0x35, 0xe0, + 0x61, 0x6c, 0x68, 0xd4, 0x31, 0x63, 0xd3, 0x34, 0xda, 0xc3, 0x82, 0x70, 0x33, 0xe5, + 0xad, 0x84, 0x88, 0xbf, 0xd9, 0xc4, 0xbb, 0xbe, 0x8f, 0x59, 0x35, 0xc6, 0xc5, 0xea, + 0x04, 0xc3, 0xad, 0x49, 0xc7, 0x47, 0xa9, 0xe7, 0x23, 0x1b, 0xcd, 0x7d, 0x16, 0x21, + 0x5e, 0x6e, 0x80, 0x73, 0x7d, 0x6b, 0x54, 0xfe, 0xc8, 0xb8, 0x84, 0x02, 0xf0, 0x47, + 0x52, 0x45, 0xe1, 0x74, 0xa7, 0x45, 0xb8, 0x31, 0xf8, 0xfe, 0x03, 0xa7, 0x6f, 0xb9, + 0xce, 0xca, 0x4d, 0x22, 0xb7, 0x83, 0xc3, 0x28, 0xc6, 0x91, 0x5c, 0x43, 0x40, 0x50, + 0x64, 0xae, 0x56, 0xbc, 0x89, 0xe6, 0x4d, 0x15, 0x78, 0xe4, 0xd3, 0xa3, 0x4b, 0xb9, + 0x55, 0x91, 0xea, 0xf1, 0xd3, 0xda, 0x02, 0xa4, 0x54, 0x9f, 0xa8, 0x0d, 0xb0, 0xff, + 0x7c, 0xb0, 0x39, 0x93, 0xb6, 0x8a, 0xe1, 0x5a, 0x30, 0xe8, 0x79, 0x49, 0xaa, 0x08, + 0x0e, 0x94, 0xab, 0xde, 0x68, 0x89, 0x8c, 0x33, 0x92, 0xa2, 0x17, 0xd6, 0x49, 0x61, + 0x6b, 0xbe, 0x73, 0x9b, 0x13, 0xd1, 0x4d, 0xf0, 0x3f, 0xf2, 0x76, 0x71, 0x48, 0x9b, + 0xe0, 0xb4, 0xbe, 0xba, 0xaf, 0xa7, 0xd1, 0xe6, 0x39, 0xd5, 0xb3, 0xe9, 0x94, 0xff, + 0xb6, 0xb7, 0xa2, 0x09, 0xf6, 0xad, 0xfe, 0x8d, 0x1e, 0x5c, 0xcf, 0x01, 0x0c, 0x19, + 0x16, 0x8a, 0xeb, 0x18, 0xaa, 0x9d, 0x68, 0x7e, 0x24, 0xad, 0xc0, 0xb1, 0x13, 0x5c, + 0x70, 0xc9, 0x70, 0xe0, 0x90, 0x3a, 0xf6, 0xe1, 0x70, 0x81, 0xd5, 0x81, 0x8e, 0x88, + 0xb1, 0x4e, 0x4f, 0x60, 0x1b, 0x8c, 0x06, 0x3e, 0x3f, 0x43, 0x87, 0xff, 0xa2, 0x32, + 0x2a, 0x51, 0x81, 0x90, 0x9f, 0x09, 0x80, 0xd6, 0x89, 0xde, 0x7f, 0x8e, 0x6a, 0x5c, + 0x62, 0xa7, 0x77, 0xd1, 0x75, 0x00, 0x2a, 0x13, 0x7d, 0xe8, 0x5b, 0x88, 0x88, 0x92, + 0x91, 0x98, 0x11, 0x7a, 0xa5, 0xd6, 0x19, 0x93, 0xe1, 0xdc, 0xf7, 0x58, 0x76, 0xdc, + 0xa6, 0x09, 0xf9, 0xd2, 0x84, 0x71, 0xf9, 0x97, 0xfa, 0x11, 0xf9, 0x9d, 0x42, 0x3f, + 0x9c, 0xf1, 0x73, 0x4b, 0xe8, 0xa5, 0xff, 0x99, 0x7d, 0x45, 0x1e, 0xb3, 0xcf, 0x4b, + 0x3d, 0xfd, 0xd9, 0xd4, 0x54, 0x5c, 0x35, 0xb2, 0xb5, 0xa7, 0xdc, 0x17, 0xa8, 0x36, + 0xb1, 0x2b, 0x43, 0xbe, 0xfc, 0x0b, 0xe0, 0xa1, 0xbd, 0x36, 0x97, 0x72, 0x33, 0x80, + 0x78, 0xb4, 0xff, 0x7d, 0x8e, 0x2d, 0x97, 0x9a, 0x34, 0x41, 0xe1, 0xc8, 0xf5, 0xaf, + 0xe4, 0x7b, 0x1e, 0x7d, 0xa5, 0x6c, 0xf0, 0x06, 0x02, 0xd0, 0x1b, 0x11, 0x0c, 0x05, + 0xcf, 0x48, 0xfd, 0xa3, 0xe6, 0xcc, 0xe3, 0x2a, 0x04, 0x40, 0x00, 0xf4, 0x5c, 0x6d, + 0x1e, 0x69, 0x6d, 0x24, 0x5c, 0xbd, 0x31, 0x2b, 0xdc, 0x3a, 0x3a, 0x21, 0xc9, 0x92, + 0xd0, 0xeb, 0xc8, 0xcc, 0x8f, 0xa6, 0x30, 0x6d, 0x7e, 0x13, 0x0a, 0x2b, 0xa4, 0x20, + 0x18, 0xfe, 0x59, 0x69, 0x49, 0xfd, 0x82, 0x26, 0x7b, 0xcc, 0x59, 0xdd, 0x46, 0x26, + 0xef, 0xc3, 0xea, 0x74, 0x38, 0xd0, 0x5c, 0x91, 0xb0, 0xf8, 0xe0, 0x92, 0x55, 0x4c, + 0x46, 0xea, 0x5c, 0x3a, 0x27, 0x0f, 0xaa, 0x67, 0xf7, 0xe3, 0x73, 0x3a, 0x5e, 0x1b, + 0x47, 0x1c, 0xe5, 0xf5, 0x38, 0xcd, 0xcd, 0xdd, 0xb5, 0x7f, 0x7e, 0xda, 0x08, 0x1f, + 0x2a, 0xdc, 0x28, 0x3f, 0x2a, 0xa6, 0xac, 0x35, 0xfc, 0x19, 0xfd, 0xc1, 0x12, 0x30, + 0x77, 0x84, 0x51, 0xfa, 0x27, 0xff, 0x86, 0x2e, 0x8f, 0xb8, 0x59, 0x83, 0x8a, 0xfe, + 0xa9, 0xd8, 0xdf, 0x84, 0x9b, 0x06, 0x11, 0x5b, 0x9c, 0x39, 0xad, 0xff, 0x0b, 0x25, + 0x19, 0xf4, 0x63, 0xd7, 0xc4, 0xa6, 0x33, 0xaf, 0x1e, 0xbc, 0x13, 0xd6, 0x44, 0xf7, + 0x00, 0x55, 0x8b, 0xa1, 0x09, 0x2e, 0xa2, 0x44, 0xd8, 0xd8, 0x6a, 0xa2, 0x5f, 0x7b, + 0x37, 0xd7, 0x66, 0x4f, 0x9b, 0x97, 0x09, 0x43, 0x3e, 0x6e, 0x70, 0x21, 0x18, 0xa4, + 0xab, 0x9e, 0x7a, 0x7a, 0x3e, 0x62, 0x59, 0x12, 0x99, 0x37, 0xd2, 0x9d, 0x0d, 0xb2, + 0x60, 0x70, 0x52, 0x3e, 0x8b, 0x06, 0x43, 0x13, 0x0a, 0xbe, 0xfe, 0x94, 0x3b, 0x40, + 0x12, 0x98, 0xae, 0x01, 0xa3, 0xab, 0x00, 0xab, 0xbc, 0x60, 0xd7, 0xdb, 0x93, 0x3c, + 0x7f, 0x07, 0xa8, 0xbf, 0x0f, 0x7c, 0xe1, 0x66, 0x0b, 0xcc, 0xb4, 0x5e, 0x04, 0x2b, + 0x45, 0x1b, 0x93, 0x50, 0x02, 0xce, 0xce, 0x27, 0xf3, 0x6a, 0xba, 0x56, 0x47, 0xac, + 0x28, 0xd8, 0x18, 0x6c, 0xdd, 0x1f, 0xb9, 0x5d, 0xc1, 0x35, 0xd4, 0x89, 0x92, 0xf6, + 0x8d, 0xa1, 0x2a, 0xd6, 0x1a, 0xc7, 0x56, 0x68, 0x0d, 0xd7, 0xf8, 0xd0, 0x77, 0x4a, + 0xbd, 0x6c, 0xfd, 0xa2, 0xf0, 0x32, 0xaf, 0x3b, 0xe1, 0x39, 0xa6, 0x33, 0xd6, 0x73, + 0x3c, 0x75, 0xd1, 0xab, 0xa8, 0x90, 0x18, 0xc8, 0x57, 0x2b, 0x99, 0xcd, 0x30, 0xc5, + 0x37, 0x06, 0x79, 0x41, 0xdf, 0x1c, 0x4b, 0xc1, 0xfd, 0x57, 0x0f, 0x7b, 0x4d, 0xdc, + 0x97, 0x51, 0x86, 0x23, 0xe3, 0xae, 0x4a, 0x87, 0xbd, 0xb9, 0x66, 0xc9, 0x4d, 0x86, + 0x1e, 0x80, 0xde, 0x88, 0xc2, 0x92, 0xae, 0xe9, 0x38, 0x71, 0x94, 0xe2, 0x56, 0xc6, + 0x70, 0x07, 0x52, 0x30, 0x1c, 0x73, 0xfc, 0x95, 0x65, 0xa4, 0x04, 0x80, 0xd8, 0x12, + 0x6e, 0x9d, 0x08, 0x58, 0x79, 0xe2, 0x4b, 0x16, 0xe9, 0xc4, 0x85, 0xd8, 0xf0, 0xd6, + 0x18, 0xca, 0x0d, 0xd1, 0x21, 0xb5, 0x1a, 0x7c, 0xab, 0x23, 0x0c, 0x5b, 0x45, 0x67, + 0x2b, 0xdb, 0x8e, 0xa3, 0xa0, 0x40, 0xf7, 0xaa, 0xa0, 0x98, 0xba, 0x26, 0x02, 0x5d, + 0x2e, 0xab, 0x79, 0x48, 0x69, 0x3d, 0xd5, 0xf6, 0xd3, 0x09, 0x65, 0x01, 0xe9, 0xe0, + 0x71, 0x25, 0xd7, 0xeb, 0x29, 0x3b, 0x3a, 0xba, 0xd5, 0x7f, 0xd5, 0xf0, 0x11, 0x64, + 0x70, 0x2d, 0xae, 0x64, 0xbd, 0xba, 0x8c, 0x92, 0x4f, 0xb0, 0x79, 0x96, 0x79, 0xd7, + 0x7f, 0x98, 0xd3, 0x03, 0x91, 0x9f, 0xb4, 0xa7, 0xff, 0x26, 0xa9, 0x6f, 0x13, 0x7a, + 0x5e, 0x5c, 0xb9, 0x5b, 0xc4, 0xc6, 0xff, 0x99, 0x93, 0x52, 0x6b, 0xda, 0x15, 0x03, + 0x16, 0x8a, 0xb4, 0x8c, 0xbd, 0x45, 0x15, 0x39, 0x27, 0xd3, 0x04, 0x30, 0x42, 0x3d, + 0xbd, 0xf0, 0x66, 0x05, 0xf5, 0xb5, 0x4b, 0x80, 0x8f, 0xeb, 0x22, 0xb2, 0x08, 0xb0, + 0x64, 0x58, 0x18, 0x47, 0xb2, 0xf6, 0x4c, 0xa6, 0x48, 0x37, 0x00, 0x72, 0x16, 0xde, + 0x6e, 0xca, 0xff, 0xeb, 0x4b, 0x69, 0xe6, 0x33, 0x47, 0xf8, 0x4a, 0xbc, 0xad, 0x8f, + 0x2e, 0x75, 0x7d, 0x58, 0x61, 0xce, 0x77, 0xee, 0x46, 0x51, 0x3d, 0xa7, 0x41, 0x68, + 0x37, 0xdc, 0xb2, 0x3d, 0x33, 0xea, 0x72, 0xaf, 0x23, 0xd0, 0xad, 0x8c, 0x93, 0x07, + 0xd0, 0xb5, 0x85, 0x8d, 0xa9, 0x5b, 0x77, 0xff, 0xf9, 0x02, 0x7b, 0x88, 0x59, 0xe1, + 0x1d, 0xcb, 0xd5, 0x98, 0x35, 0x0e, 0xee, 0x50, 0x93, 0x94, 0x81, 0x70, 0x8e, 0xa7, + 0x08, 0xeb, 0x9f, 0x66, 0x43, 0x88, 0xb9, 0xc6, 0x4d, 0x6a, 0xf0, 0xf9, 0x66, 0x90, + 0x34, 0x24, 0x00, 0x34, 0x8e, 0x92, 0x9e, 0x07, 0x46, 0x02, 0x53, 0xf3, 0x83, 0x90, + 0xf8, 0x7b, 0xd6, 0xc0, 0x53, 0x08, 0xc3, 0xbd, 0xe2, 0x52, 0x28, 0xe0, 0xfa, 0x08, + 0x80, 0xb0, 0x8e, 0xf3, 0x4a, 0x5a, 0x9c, 0xc0, 0xea, 0x0a, 0x67, 0xca, 0x65, 0xb6, + 0xff, 0xd0, 0x05, 0x57, 0x29, 0x09, 0xf1, 0xc4, 0x2d, 0xd7, 0x45, 0xee, 0xee, 0x9d, + 0xd6, 0xb4, 0x43, 0x9c, 0x9f, 0x3f, 0x98, 0xa1, 0x18, 0xfe, 0x16, 0x69, 0x8e, 0x9c, + 0xef, 0xf5, 0x58, 0xf1, 0x60, 0x66, 0x97, 0x5f, 0xe3, 0x95, 0x83, 0xe9, 0xb5, 0x85, + 0x3b, 0x13, 0x11, 0x39, 0x15, 0x80, 0x01, 0x9f, 0xe5, 0x5d, 0x59, 0xd1, 0xc8, 0x28, + 0xd3, 0xfe, 0xb6, 0xa3, 0xb9, 0xce, 0x92, 0xd0, 0x89, 0xae, 0x4b, 0x40, 0x8e, 0x23, + 0xd6, 0xa4, 0x37, 0xd4, 0x98, 0x9b, 0x51, 0x9b, 0x7a, 0x9e, 0xb0, 0x8a, 0xe6, 0xd4, + 0x48, 0xa7, 0xa1, 0x6e, 0x8a, 0xed, 0x26, 0xa2, 0xec, 0xd0, 0xca, 0xd8, 0x08, 0x44, + 0xfd, 0x06, 0x50, 0xd8, 0xc4, 0xe4, 0xd2, 0xaf, 0x90, 0x65, 0x67, 0x48, 0xd8, 0x09, + 0x9a, 0x0c, 0x75, 0x6f, 0xc1, 0x6c, 0xca, 0x06, 0xa3, 0x34, 0x43, 0x07, 0x02, 0xae, + 0x19, 0x61, 0x66, 0x5b, 0x48, 0x45, 0xac, 0xd1, 0xa8, 0xe3, 0x41, 0x01, 0xe6, 0x8b, + 0xb6, 0x44, 0xac, 0x03, 0x4d, 0xc6, 0x3e, 0x6e, 0x34, 0x4c, 0x3d, 0x63, 0x76, 0x2a, + 0x7a, 0x5b, 0xf5, 0x9f, 0x13, 0x09, 0x54, 0x10, 0x98, 0x1d, 0x6b, 0x6b, 0x16, 0xbc, + 0xd4, 0xc9, 0xfa, 0x68, 0xaf, 0x6e, 0x53, 0x65, 0xe9, 0x4e, 0xcb, 0xe7, 0xab, 0x8b, + 0x80, 0x43, 0xdf, 0xba, 0xcb, 0x23, 0xc8, 0x4d, 0x71, 0xa8, 0xfe, 0x5d, 0x9a, 0xc5, + 0x50, 0x2c, 0xe9, 0xf7, 0x3f, 0x40, 0x8e, 0x14, 0x37, 0x6d, 0xb8, 0x6e, 0xf5, 0x7c, + 0xc3, 0x7d, 0x09, 0x89, 0x6f, 0xa9, 0x06, 0x97, 0x2e, 0x55, 0x71, 0x80, 0xa4, 0xab, + 0x5a, 0xd0, 0x9d, 0x88, 0x46, 0xdd, 0x6d, 0xa7, 0x48, 0x76, 0x54, 0x36, 0xe0, 0x16, + 0x02, 0x40, 0xf8, 0xd4, 0x1c, 0x0a, 0xc7, 0x83, 0xf9, 0x39, 0xf2, 0xd0, 0xed, 0x26, + 0x2c, 0xe8, 0x59, 0xc1, 0x31, 0xeb, 0xc9, 0x3f, 0xf2, 0xe6, 0xe4, 0x07, 0xd4, 0xe2, + 0x43, 0xe1, 0xe9, 0x31, 0xd5, 0x3a, 0x45, 0x43, 0xb6, 0xe2, 0x6d, 0x82, 0x59, 0x6f, + 0xc5, 0x3b, 0x52, 0x31, 0x2c, 0x77, 0x6d, 0x12, 0xeb, 0x2b, 0x65, 0x9b, 0x4f, 0xb0, + 0x98, 0xdf, 0x87, 0xd6, 0x83, 0xcf, 0x9e, 0x54, 0x12, 0xee, 0x56, 0xc3, 0xfe, 0x98, + 0x41, 0xd7, 0x3f, 0xd0, 0x70, 0xdf, 0xa5, 0x1f, 0x5b, 0xaf, 0xed, 0xf2, 0x06, 0xf1, + 0x3c, 0x52, 0x4e, 0x5c, 0x50, 0xca, 0xc9, 0x90, 0x6e, 0xfa, 0x39, 0x32, 0x90, 0x04, + 0x2e, 0x3b, 0xc5, 0x9f, 0x96, 0x0b, 0x7d, 0x24, 0x0a, 0xe4, 0x43, 0xfc, 0x49, 0x26, + 0x9c, 0xe0, 0x00, 0x61, 0xe6, 0x5c, 0x6d, 0x74, 0x81, 0x2a, 0x30, 0xdd, 0x5f, 0x5f, + 0xe7, 0x4e, 0xff, 0x61, 0xe0, 0xcb, 0xab, 0x3c, 0xec, 0x00, 0xd0, 0xae, 0xf9, 0x50, + 0x83, 0x18, 0x94, 0x52, 0xdd, 0x3d, 0x9e, 0xdf, 0x44, 0x87, 0xbc, 0x73, 0x4c, 0x8b, + 0x24, 0xf2, 0x12, 0x96, 0xe4, 0xe9, 0xef, 0x11, 0x7d, 0x7f, 0xb9, 0x77, 0xe3, 0xb0, + 0xe6, 0x40, 0x6e, 0x63, 0x08, 0x59, 0x06, 0x33, 0x1a, 0x93, 0x03, 0x3d, 0x1c, 0xb8, + 0x36, 0x0f, 0xe6, 0xfe, 0xa6, 0x1a, 0x68, 0x26, 0xdf, 0x36, 0x25, 0x57, 0x89, 0xf9, + 0x2e, 0x40, 0xba, 0xfc + ], + }, + TestVector { + description: "Sapling transaction #3", + version: 4, + lock_time: 1867882187, + expiry_height: 204228972, + txid: [ + 0x51, 0x73, 0x60, 0xc8, 0x1b, 0xaa, 0xf5, 0xab, 0x3c, 0xea, 0xe7, 0xf7, 0x36, 0xb7, + 0x29, 0x17, 0xce, 0x68, 0xfe, 0xe8, 0x4a, 0xb1, 0x8e, 0x67, 0x8e, 0x78, 0x96, 0xc3, + 0xff, 0x57, 0x67, 0x76 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 0, + tx: vec![ + 0x04, 0x00, 0x00, 0x80, 0x85, 0x20, 0x2f, 0x89, 0x00, 0x00, 0xcb, 0x9e, 0x55, 0x6f, + 0x6c, 0x49, 0x2c, 0x0c, 0xf1, 0xaa, 0xe1, 0x7f, 0x8f, 0xda, 0x00, 0x00, 0x02, 0x5f, + 0xec, 0x66, 0x74, 0x6f, 0xeb, 0xe9, 0x62, 0x0b, 0xa1, 0x34, 0x94, 0xba, 0xeb, 0x65, + 0x12, 0xa8, 0x58, 0x64, 0x94, 0x13, 0x73, 0x49, 0x81, 0xf0, 0xe8, 0x6a, 0x78, 0x7b, + 0x75, 0x4d, 0x44, 0x8a, 0x0c, 0x95, 0x65, 0xc4, 0xc2, 0xdd, 0x63, 0x13, 0x02, 0x11, + 0xa5, 0x2d, 0xe2, 0x5c, 0x1d, 0x08, 0x20, 0x43, 0x78, 0xea, 0x99, 0xef, 0xec, 0xaf, + 0xfb, 0xe0, 0xe0, 0x99, 0x57, 0x74, 0x16, 0xff, 0x50, 0xa3, 0xb5, 0x62, 0x13, 0x88, + 0x4d, 0x62, 0x62, 0xc1, 0x1d, 0xeb, 0xf2, 0xba, 0x7e, 0x8a, 0xd6, 0x69, 0x2c, 0xb1, + 0x70, 0x78, 0x33, 0x14, 0x18, 0xda, 0x4b, 0xe0, 0x64, 0xff, 0x52, 0x70, 0x07, 0x39, + 0x34, 0xab, 0xcd, 0x2a, 0xb0, 0x46, 0x9e, 0xca, 0xf7, 0x27, 0x5b, 0x4b, 0xd7, 0x2b, + 0xc6, 0xed, 0x34, 0x47, 0x8e, 0xa4, 0x08, 0x9b, 0x73, 0x6a, 0x16, 0xdd, 0x90, 0x6d, + 0x49, 0xf2, 0x5c, 0x33, 0x82, 0x7c, 0x57, 0x1c, 0xe0, 0xb5, 0xd7, 0x21, 0x77, 0xaa, + 0x35, 0x08, 0x80, 0x4b, 0xc0, 0xf8, 0xfa, 0xa9, 0x47, 0x12, 0x22, 0x31, 0x40, 0x2d, + 0x2f, 0x5c, 0xc9, 0xa0, 0xeb, 0x0e, 0x09, 0xd4, 0x27, 0xb4, 0x27, 0x28, 0x8d, 0x93, + 0x7d, 0x9d, 0x72, 0xb7, 0x74, 0x56, 0xf8, 0x86, 0x59, 0x4c, 0xd8, 0xc6, 0xa4, 0x62, + 0xf7, 0x7f, 0xd8, 0x30, 0x76, 0x46, 0x9c, 0xc0, 0xec, 0xba, 0x3c, 0xc4, 0x0c, 0xad, + 0x69, 0xe5, 0xb5, 0x41, 0x12, 0xea, 0xb3, 0x33, 0x96, 0xae, 0xcf, 0xbc, 0x21, 0x1f, + 0x1f, 0x79, 0xcf, 0x33, 0x10, 0x8e, 0x93, 0xd9, 0x53, 0x78, 0xba, 0xe6, 0x95, 0x82, + 0x74, 0xb3, 0x10, 0x88, 0xfb, 0xd8, 0xb3, 0xa3, 0xa0, 0xd1, 0x54, 0xa7, 0x89, 0x73, + 0x5b, 0x03, 0x49, 0xc4, 0xd5, 0x1c, 0x88, 0x9d, 0x08, 0x95, 0x2d, 0xdd, 0x54, 0x88, + 0xbe, 0x95, 0x56, 0x05, 0x94, 0xe6, 0x73, 0xfa, 0x05, 0x1b, 0xf9, 0xb6, 0x14, 0xa1, + 0x5e, 0x10, 0x0b, 0x60, 0xa0, 0xfe, 0x9a, 0x7e, 0x12, 0xa9, 0xb2, 0x56, 0xdf, 0x58, + 0x9b, 0x3e, 0x48, 0xe5, 0xb8, 0x0f, 0xb8, 0xcf, 0xf0, 0x3e, 0x86, 0xf6, 0x0c, 0xc0, + 0x70, 0xfb, 0x23, 0xc9, 0x7d, 0x4c, 0x14, 0xfa, 0x3a, 0x73, 0x46, 0xff, 0x55, 0x6b, + 0xc6, 0x85, 0x5a, 0x5f, 0x83, 0xe3, 0xdc, 0xd9, 0xf6, 0xea, 0xb3, 0xda, 0xbc, 0xd4, + 0x77, 0x50, 0xe3, 0x4e, 0x7c, 0x09, 0x38, 0xf6, 0x4d, 0x45, 0x1e, 0x39, 0x50, 0x9e, + 0x90, 0x27, 0x47, 0xa7, 0x07, 0x55, 0x12, 0x20, 0x95, 0x08, 0x2a, 0xb7, 0x98, 0x59, + 0x19, 0x07, 0x31, 0x41, 0xb6, 0xd3, 0x70, 0x20, 0x91, 0xab, 0x71, 0x72, 0x80, 0xbd, + 0xc5, 0x5e, 0x79, 0x9c, 0x01, 0xad, 0x86, 0x41, 0x90, 0x4e, 0x3b, 0x1d, 0xd2, 0x9e, + 0x1a, 0x96, 0x4c, 0x73, 0x7d, 0xb5, 0x82, 0xb9, 0x15, 0x1e, 0x87, 0xec, 0x00, 0x0d, + 0xae, 0x54, 0x04, 0xf8, 0x95, 0xc4, 0x03, 0xf1, 0x00, 0x05, 0x75, 0x4c, 0x7a, 0x04, + 0x6e, 0xa2, 0xe0, 0xfe, 0x6b, 0xe4, 0x10, 0x34, 0x9d, 0x73, 0x2f, 0x4d, 0x37, 0x81, + 0x0a, 0x91, 0xac, 0xef, 0x1e, 0x03, 0x8b, 0x81, 0xd7, 0x36, 0xd9, 0x8e, 0xad, 0xa9, + 0xcd, 0x7e, 0x0c, 0x2b, 0xe2, 0x7a, 0xb8, 0x50, 0x32, 0x06, 0x60, 0x91, 0x22, 0x4e, + 0xdf, 0x87, 0x2f, 0x79, 0x63, 0x7d, 0xda, 0x39, 0x16, 0x79, 0x6a, 0x5c, 0x62, 0xf5, + 0x7f, 0x1d, 0xe3, 0x76, 0x78, 0xb6, 0xde, 0xa0, 0x08, 0x69, 0x93, 0x36, 0x74, 0xf8, + 0x8e, 0x41, 0xa9, 0x18, 0x08, 0x07, 0x3b, 0x0f, 0x43, 0x6e, 0xbe, 0x25, 0xa5, 0xf4, + 0x4a, 0x60, 0x10, 0x33, 0xe2, 0x18, 0x4b, 0x88, 0xdb, 0x79, 0xe9, 0x68, 0xca, 0x6d, + 0x89, 0xb7, 0x49, 0x01, 0xbe, 0x6c, 0x6d, 0xb3, 0x63, 0x65, 0x80, 0x18, 0x2e, 0x65, + 0x8d, 0xfc, 0x68, 0x67, 0x67, 0xd6, 0xd8, 0x19, 0xfa, 0x92, 0x3e, 0x0c, 0xdf, 0x3e, + 0xa3, 0x65, 0x76, 0xf8, 0x52, 0xbc, 0xd4, 0xe1, 0x96, 0xa7, 0x1a, 0x13, 0x29, 0xf6, + 0xc3, 0xff, 0x8e, 0x42, 0xe3, 0x09, 0x5a, 0xbd, 0x8e, 0xc1, 0x97, 0x99, 0x07, 0x13, + 0xee, 0x89, 0x39, 0x4c, 0x57, 0x19, 0xb2, 0x76, 0xde, 0x8f, 0x81, 0x8a, 0x34, 0xa7, + 0xbe, 0xc1, 0xf2, 0x68, 0x68, 0x2e, 0x91, 0x42, 0xc7, 0xd3, 0x87, 0x89, 0xf6, 0x76, + 0xcc, 0x12, 0xb7, 0x1a, 0xb6, 0x66, 0x35, 0xc5, 0x02, 0xe6, 0x9d, 0x05, 0xb9, 0xc7, + 0xef, 0x01, 0x52, 0x97, 0x75, 0xc6, 0x23, 0xa4, 0x8e, 0x4c, 0xc5, 0xc4, 0x15, 0xc9, + 0xfd, 0x56, 0x53, 0x65, 0xa4, 0x16, 0x37, 0x68, 0x78, 0x51, 0x53, 0x88, 0x7f, 0xb5, + 0xf9, 0x63, 0xe7, 0xac, 0xc1, 0x62, 0xf2, 0x80, 0x5f, 0x45, 0xf4, 0x44, 0x87, 0xf8, + 0x5e, 0x19, 0x9c, 0x1d, 0xf4, 0xa0, 0xfc, 0xa4, 0xd4, 0x4b, 0xaa, 0x62, 0xda, 0x7a, + 0xf5, 0xed, 0x69, 0x68, 0x41, 0x12, 0xd3, 0x5f, 0x36, 0x73, 0x73, 0x2f, 0x5a, 0x1a, + 0xc3, 0xe4, 0xf0, 0x21, 0xba, 0x5c, 0x2c, 0x32, 0xf0, 0x6e, 0x6b, 0x90, 0xfa, 0xe2, + 0xd2, 0x54, 0xcf, 0x09, 0xe7, 0x69, 0x0c, 0xf4, 0xe3, 0xaa, 0x70, 0x30, 0x98, 0x74, + 0x48, 0xe1, 0x47, 0xf9, 0x43, 0xba, 0xb5, 0xca, 0xb5, 0x58, 0x02, 0x9a, 0x36, 0x02, + 0x4d, 0x2e, 0x79, 0x0f, 0xc6, 0xfd, 0x66, 0x7f, 0x17, 0x6e, 0x0a, 0xa9, 0x9d, 0xd1, + 0xd7, 0x2b, 0x57, 0x36, 0x8f, 0x01, 0xb6, 0x6c, 0x4a, 0x96, 0xc1, 0x56, 0xf3, 0xf2, + 0x85, 0x41, 0xab, 0x4c, 0xa4, 0x96, 0x69, 0x60, 0x21, 0x82, 0x08, 0x46, 0x69, 0x61, + 0x12, 0x94, 0x90, 0xa7, 0xd8, 0xb6, 0x5c, 0x14, 0x70, 0xba, 0xd8, 0x03, 0x03, 0x27, + 0xa0, 0x8d, 0x37, 0xce, 0x65, 0x71, 0x09, 0xe2, 0x8a, 0xd4, 0xe7, 0x21, 0x4d, 0x13, + 0x93, 0x09, 0x63, 0x3d, 0xf7, 0xa3, 0x1b, 0xce, 0xbf, 0x06, 0x19, 0x87, 0xa6, 0x35, + 0x52, 0x24, 0xfe, 0xf1, 0x2e, 0xf9, 0xd5, 0xf5, 0xb0, 0x45, 0x86, 0xbd, 0x7d, 0x3b, + 0x39, 0x24, 0x2b, 0x04, 0xd3, 0x47, 0x7f, 0xe5, 0x80, 0x47, 0xb3, 0x20, 0xf4, 0x14, + 0x32, 0x23, 0x5d, 0x3a, 0xa2, 0x73, 0x66, 0xf5, 0xb1, 0xff, 0xb4, 0xe9, 0x37, 0x04, + 0x00, 0x45, 0xd5, 0x89, 0x13, 0x05, 0xbb, 0x30, 0x70, 0xa2, 0xe7, 0x82, 0xd1, 0xe9, + 0xbc, 0xc9, 0x15, 0x8b, 0x93, 0x88, 0xbf, 0xb6, 0x00, 0x4b, 0x6f, 0x92, 0x9d, 0x3e, + 0x0d, 0xa1, 0x64, 0xb2, 0x36, 0x19, 0xaf, 0x1d, 0xe4, 0x56, 0xfd, 0xd0, 0x37, 0xbf, + 0x1e, 0xa7, 0xfa, 0xb2, 0x9a, 0x67, 0x61, 0xef, 0x4d, 0xed, 0xc8, 0x6c, 0x2f, 0x17, + 0x62, 0xad, 0x64, 0x48, 0x4c, 0x08, 0xff, 0xea, 0x77, 0x5a, 0x90, 0x4d, 0xec, 0x82, + 0x7f, 0xd8, 0x7a, 0x18, 0x86, 0x0d, 0x6e, 0x8a, 0x4a, 0x52, 0xb5, 0xcf, 0x44, 0xbe, + 0x28, 0xa6, 0x2d, 0x41, 0x59, 0x02, 0x09, 0x3a, 0x0c, 0x36, 0x5d, 0x29, 0x9e, 0xde, + 0xba, 0x53, 0x13, 0x6c, 0x62, 0x6e, 0x16, 0x0a, 0xcb, 0x00, 0x44, 0xce, 0x6f, 0x2b, + 0xb8, 0xdd, 0xe1, 0xfd, 0xda, 0x5b, 0x47, 0x4d, 0x5b, 0x35, 0x07, 0x47, 0x4e, 0x3d, + 0x52, 0x77, 0x24, 0x12, 0x01, 0xb8, 0x26, 0x1a, 0x49, 0xd4, 0x91, 0xaf, 0x04, 0x9b, + 0x39, 0xe2, 0x6d, 0x13, 0x57, 0xc3, 0x06, 0x92, 0x64, 0x16, 0x77, 0x6d, 0x7d, 0x13, + 0xf8, 0x40, 0xbd, 0x82, 0xac, 0xa0, 0x1c, 0x83, 0x1c, 0x98, 0x3f, 0x19, 0x85, 0xee, + 0x0a, 0xda, 0xe8, 0xdb, 0x84, 0x47, 0xc0, 0xe5, 0x1c, 0x09, 0xdf, 0xe3, 0xde, 0xe3, + 0x88, 0x0a, 0x97, 0x13, 0xce, 0xb7, 0x45, 0xab, 0xfd, 0xd9, 0xf1, 0xc7, 0xea, 0xd7, + 0x63, 0x08, 0xcd, 0xee, 0xa2, 0x1c, 0x8b, 0x09, 0x57, 0x02, 0x7c, 0x5d, 0x00, 0xe5, + 0x0a, 0x43, 0x88, 0xc7, 0xaf, 0x2b, 0xd6, 0x43, 0xcb, 0x5e, 0xae, 0x49, 0x27, 0x4d, + 0x12, 0x30, 0xa4, 0xcd, 0x49, 0x23, 0x7a, 0xe3, 0x7b, 0x38, 0x10, 0xc2, 0xc3, 0x95, + 0x8a, 0x7d, 0xee, 0x02, 0x34, 0x30, 0x1b, 0x89, 0xa2, 0xdf, 0x2a, 0x78, 0xef, 0x0b, + 0xfb, 0x4b, 0xf6, 0xb3, 0x87, 0xdf, 0x2c, 0x6c, 0x86, 0xe6, 0x1c, 0xd1, 0x0c, 0xa1, + 0x1f, 0x81, 0x13, 0x01, 0x26, 0x07, 0xf1, 0x5b, 0x28, 0x56, 0x24, 0x0f, 0xdc, 0x52, + 0x06, 0x5a, 0x10, 0x28, 0xc8, 0xa2, 0xdd, 0xfd, 0xd1, 0x5c, 0xf5, 0x26, 0x5f, 0x87, + 0x38, 0x8a, 0xb9, 0xbf, 0x21, 0xc9, 0xa7, 0x8c, 0x59, 0x03, 0x8a, 0x98, 0xab, 0x64, + 0xfd, 0x67, 0x10, 0x77, 0xd4, 0x72, 0xc2, 0x09, 0xdd, 0x72, 0x9b, 0xd7, 0xf8, 0x48, + 0x09, 0x45, 0xfb, 0xa7, 0x52, 0x09, 0x8a, 0x94, 0xcc, 0xb2, 0x4c, 0xf3, 0xbc, 0x09, + 0x2d, 0x42, 0x36, 0x46, 0x11, 0xa2, 0x93, 0xaf, 0xf3, 0xc5, 0x79, 0x37, 0x2c, 0x12, + 0xe1, 0x50, 0x90, 0xaa, 0x27, 0x23, 0x20, 0x57, 0xf2, 0xed, 0xde, 0x4e, 0x1d, 0xb2, + 0x92, 0xf7, 0xb1, 0x86, 0x47, 0x22, 0x67, 0x35, 0x17, 0x6d, 0x90, 0xf1, 0x26, 0x5b, + 0x37, 0x98, 0xcc, 0xab, 0xac, 0x0b, 0x8d, 0x79, 0xb1, 0x77, 0x20, 0xb2, 0xba, 0x71, + 0xd7, 0x85, 0x0c, 0xc2, 0xa0, 0x87, 0x2b, 0xf0, 0xf4, 0xb8, 0x14, 0x36, 0x78, 0x59, + 0xf8, 0x99, 0x48, 0xf0, 0xa1, 0xa3, 0x83, 0x60, 0x4b, 0x9e, 0xf0, 0x7e, 0xa9, 0x3d, + 0xbb, 0x98, 0x71, 0xc0, 0x09, 0xaa, 0x6a, 0x31, 0xd8, 0xea, 0xf1, 0x43, 0x0b, 0x7b, + 0xc0, 0xac, 0x26, 0x4e, 0x2f, 0x97, 0x6a, 0xd3, 0x97, 0xf2, 0x7f, 0x48, 0x37, 0x8f, + 0x8a, 0x4e, 0xd9, 0x02, 0xc6, 0x6e, 0x49, 0x18, 0xfa, 0xee, 0x8d, 0xc0, 0x06, 0x72, + 0x46, 0x96, 0x0d, 0xb1, 0xf8, 0xcd, 0x07, 0xbf, 0x90, 0xd7, 0x53, 0x7c, 0xc2, 0x7b, + 0xbb, 0x8c, 0x9d, 0x5b, 0x29, 0x62, 0xc4, 0x7e, 0xd1, 0x82, 0xa2, 0xfc, 0xe0, 0x5f, + 0x8e, 0x03, 0xc4, 0xe2, 0x5e, 0x49, 0x6d, 0xd5, 0x7d, 0x6a, 0xb3, 0x45, 0x8f, 0xac, + 0xbd, 0x91, 0xea, 0x22, 0x72, 0xff, 0xda, 0x47, 0xb0, 0x73, 0x59, 0x5e, 0x78, 0xdd, + 0x84, 0xb7, 0x1f, 0xf8, 0x8b, 0x74, 0x21, 0x02, 0x88, 0xf0, 0xea, 0xf8, 0xe7, 0x1a, + 0xeb, 0xa4, 0x4c, 0x5e, 0xc3, 0x82, 0xe3, 0x59, 0x33, 0xe1, 0x7b, 0xa7, 0xef, 0xd6, + 0x64, 0x90, 0xf6, 0x72, 0x03, 0x2d, 0x4e, 0xbc, 0xf7, 0xcd, 0x55, 0x7a, 0xe0, 0xdb, + 0xb7, 0x25, 0x00, 0x4e, 0xcb, 0x05, 0x7a, 0x5a, 0x2b, 0x15, 0x7a, 0x1a, 0xbf, 0xb9, + 0x83, 0x87, 0x08, 0xba, 0x28, 0xe7, 0xea, 0xa2, 0x12, 0xa9, 0x04, 0x22, 0xc1, 0x27, + 0x17, 0x53, 0xb9, 0xf3, 0x0f, 0x8f, 0xf8, 0xe5, 0x33, 0xa9, 0x93, 0xf0, 0x69, 0xbd, + 0x82, 0x2b, 0xf7, 0x24, 0xd1, 0xb7, 0x38, 0xc7, 0x3d, 0x4b, 0x46, 0xe9, 0x90, 0x28, + 0xde, 0x1e, 0xaa, 0xdf, 0x9a, 0xb0, 0x89, 0xdd, 0x46, 0x6c, 0xa1, 0x85, 0xa8, 0x0a, + 0xfc, 0xfd, 0x44, 0x68, 0x5c, 0xf8, 0xec, 0xe5, 0x58, 0xd7, 0xbf, 0xd0, 0x17, 0x39, + 0x20, 0xd7, 0x17, 0x51, 0x30, 0xf0, 0xe4, 0xd0, 0x93, 0x74, 0x41, 0xbc, 0xe9, 0x8c, + 0xfa, 0x5b, 0x33, 0x3b, 0x66, 0x19, 0x0f, 0x2b, 0x44, 0x71, 0x38, 0xe8, 0xc2, 0x6d, + 0x84, 0x12, 0xca, 0xc8, 0x20, 0x86, 0xd6, 0x1b, 0x5d, 0x2c, 0x8c, 0xf0, 0xbb, 0xeb, + 0xac, 0x5b, 0x89, 0xbf, 0xe8, 0x2b, 0x58, 0x91, 0x76, 0x64, 0xba, 0xb9, 0x1c, 0xe2, + 0xec, 0xe2, 0x90, 0xb2, 0x7b, 0x60, 0x52, 0xd4, 0xbf, 0x99, 0x1a, 0x33, 0xf4, 0x58, + 0x1a, 0x63, 0x36, 0x25, 0x78, 0x79, 0x58, 0x89, 0x7f, 0xca, 0x4b, 0x98, 0xb7, 0xe7, + 0x27, 0x7c, 0x5e, 0x6a, 0x1d, 0x88, 0x59, 0x48, 0xc9, 0xd4, 0x84, 0xdd, 0x0c, 0xef, + 0xef, 0x85, 0x4e, 0x81, 0x76, 0xc3, 0x97, 0xdc, 0xfa, 0x77, 0x2e, 0x71, 0x14, 0x72, + 0xe7, 0x90, 0xba, 0x8d, 0x39, 0x35, 0xd5, 0x7c, 0xa3, 0x13, 0x49, 0x37, 0x9e, 0x62, + 0x83, 0xa6, 0xaa, 0x8f, 0xc9, 0x91, 0xef, 0xc7, 0xd3, 0xb7, 0xef, 0x66, 0xb9, 0x2f, + 0xe0, 0x9d, 0x35, 0x16, 0x27, 0x0a, 0xe1, 0x9a, 0x99, 0x92, 0x16, 0xee, 0xae, 0x16, + 0x21, 0x44, 0xac, 0xea, 0x56, 0x0d, 0x17, 0x72, 0x05, 0xf2, 0x6c, 0x97, 0x03, 0xb5, + 0x4e, 0x80, 0xaf, 0x1a, 0x87, 0x94, 0xd6, 0xd3, 0xf1, 0xc5, 0xee, 0xad, 0x22, 0x0b, + 0x11, 0x9f, 0x06, 0xb2, 0x00, 0x98, 0x6c, 0x91, 0x21, 0x32, 0xcb, 0x08, 0xa9, 0x8e, + 0x0f, 0xee, 0x35, 0xe7, 0xf7, 0x7f, 0xc8, 0x52, 0x1d, 0x38, 0x77, 0x3e, 0x61, 0x4e, + 0xee, 0xb8, 0xa3, 0xea, 0xd8, 0x6a, 0x02, 0x48, 0x32, 0xe6, 0x4a, 0x4c, 0x75, 0x72, + 0x0c, 0xdc, 0xdd, 0xf9, 0xd0, 0x77, 0x09, 0xa1, 0x3b, 0x0d, 0xb0, 0xea, 0xd3, 0x4e, + 0xfc, 0xbd, 0x01, 0xbb, 0x9b, 0x44, 0xd7, 0xa2, 0xb1, 0x12, 0x8c, 0x10, 0x65, 0x28, + 0x98, 0x9e, 0x46, 0x29, 0x74, 0x43, 0x54, 0x0f, 0x50, 0xc9, 0xc1, 0x58, 0x30, 0xdf, + 0x3d, 0x77, 0x6c, 0x4a, 0x7b, 0x00, 0xe5, 0x05, 0x93, 0x69, 0x79, 0xd7, 0xd2, 0xb4, + 0x94, 0x0e, 0x7b, 0xa1, 0x36, 0x00, 0xb2, 0x96, 0xf0, 0x6a, 0x76, 0x33, 0x08, 0x8e, + 0x9a, 0x0d, 0xbd, 0x56, 0x8a, 0x1d, 0x61, 0x01, 0x11, 0xe7, 0x04, 0xfc, 0xed, 0x47, + 0x99, 0x89, 0x8f, 0xab, 0x77, 0x71, 0x60, 0xc7, 0xce, 0x3d, 0xf2, 0x23, 0xf6, 0x9d, + 0x5b, 0x30, 0xc2, 0x93, 0x5e, 0xee, 0x50, 0x8e, 0x0c, 0xee, 0x4f, 0xbc, 0xb0, 0xd1, + 0x3b, 0xf6, 0x24, 0x37, 0xdc, 0xf0, 0x5a, 0x63, 0x13, 0x45, 0xef, 0xbe, 0x0d, 0x7b, + 0xb9, 0x01, 0x61, 0x66, 0x55, 0x4f, 0xf3, 0x8a, 0x1d, 0x77, 0xf2, 0xfd, 0xa4, 0xe7, + 0xeb, 0xa7, 0xa7, 0x8a, 0xb3, 0x1f, 0x38, 0x29, 0x42, 0x52, 0xa2, 0xb1, 0x0f, 0xd2, + 0x86, 0x5b, 0x57, 0x05, 0x05, 0x5d, 0xfe, 0x9b, 0x3e, 0x9e, 0x8f, 0x7a, 0xd5, 0xf4, + 0x00, 0x7d, 0xbe, 0x42, 0x2b, 0x3a, 0xa0, 0xbe, 0xb9, 0xd1, 0xc8, 0x9d, 0x37, 0x46, + 0x08, 0x54, 0xff, 0x6e, 0x5f, 0x03, 0xe5, 0xff, 0x3d, 0x4f, 0x18, 0x48, 0xf4, 0xcc, + 0x64, 0x21, 0x8a, 0x01, 0xf2, 0x47, 0x2b, 0xb0, 0x55, 0x80, 0x2f, 0x97, 0xf3, 0x20, + 0x41, 0xa7, 0x92, 0x79, 0x0b, 0x7c, 0x22, 0x6b, 0x04, 0xa6, 0xea, 0xe8, 0x5f, 0x1b, + 0x71, 0xca, 0x19, 0xa1, 0x71, 0x89, 0x02, 0xb4, 0xc3, 0xa3, 0xb5, 0x06, 0xd8, 0xc1, + 0xb7, 0xae, 0x72, 0x8c, 0x9b, 0x6c, 0xc3, 0x17, 0xe5, 0xe0, 0xde, 0xe5, 0x33, 0xe2, + 0xe9, 0x99, 0x73, 0xd8, 0x83, 0xa4, 0x0c, 0x6e, 0x68, 0xf2, 0x31, 0xd2, 0xcb, 0x01, + 0x2f, 0x60, 0xc1, 0x43, 0xcc, 0xab, 0xdd, 0x40, 0x45, 0x59, 0x0d, 0x9e, 0x43, 0xfb, + 0xa3, 0x6f, 0xe4, 0xcf, 0xd9, 0x7b, 0x4b, 0xdd, 0x0c, 0x4d, 0x2c, 0x93, 0xc5, 0x72, + 0x8b, 0x12, 0x87, 0xfd, 0x25, 0x41, 0x72, 0x2c, 0x69, 0x9b, 0xc1, 0xa0, 0x05, 0x83, + 0xdb, 0xc9, 0x48, 0xd5, 0x32, 0x4a, 0xc5, 0xbd, 0x7a, 0x68, 0x09, 0x64, 0x67, 0x3e, + 0xdf, 0x2c, 0x6d, 0xeb, 0xb1, 0xc8, 0xe1, 0xd0, 0x24, 0x16, 0xe6, 0xbd, 0xb2, 0xa7, + 0x68, 0x1b, 0xf4, 0x29, 0x92, 0x25, 0xc2, 0x1b, 0x5d, 0xb6, 0xa8, 0x45, 0xad, 0x10, + 0x4d, 0x34, 0x29, 0xcd, 0xc5, 0x9e, 0x3b, 0xca, 0xcf, 0x6d, 0xbc, 0x88, 0xaf, 0x0f, + 0x67, 0xdc, 0xbd, 0xf3, 0xa0, 0x72, 0x3e, 0x4d, 0x4b, 0xce, 0x32, 0x85, 0x1b, 0xb5, + 0x19, 0x7a, 0x8f, 0x43, 0x30, 0xb2, 0x72, 0x27, 0xf0, 0xb7, 0x71, 0xd0, 0xaf, 0x17, + 0x5e, 0x9c, 0x3f, 0x6e, 0x1f, 0x68, 0x46, 0x2e, 0xe7, 0xfe, 0x17, 0x97, 0xd9, 0x28, + 0x40, 0x6f, 0x92, 0x38, 0xa3, 0xf3, 0xfd, 0x83, 0x6a, 0x27, 0x56, 0xdd, 0x0a, 0x11, + 0xe1, 0xab, 0x94, 0x9d, 0x5e, 0x30, 0x89, 0x4f, 0x56, 0x29, 0x95, 0x25, 0xe6, 0x5d, + 0x95, 0x0f, 0x2e, 0xb5, 0x0b, 0x3a, 0x8e, 0xa7, 0xac, 0xad, 0xbc, 0x3c, 0x77, 0xeb, + 0x53, 0xe7, 0xde, 0x9b, 0xa8, 0x2f, 0x7d, 0xd5, 0xf6, 0x13, 0xcd, 0xa6, 0x29, 0xfc, + 0xd2, 0xf6, 0x36, 0x6b, 0x2e, 0x1e, 0xc2, 0x40, 0xd4, 0x82, 0xc3, 0xa6, 0xf9, 0xd9, + 0x8d, 0xab, 0x1c, 0x86, 0x4c, 0x00, 0xb8, 0xfd, 0x36, 0x46, 0xf0, 0xd5, 0x96, 0xfe, + 0x18, 0x0f, 0x70, 0xb1, 0x94, 0x84, 0x25, 0x63, 0xe9, 0xf3, 0xf4, 0xdc, 0xf5, 0x2b, + 0x89, 0x3a, 0x70, 0x9e, 0x1d, 0xd4, 0xa7, 0xca, 0x1c, 0x49, 0xec, 0x81, 0x4e, 0x8f, + 0xe6, 0xe0, 0xe0, 0xde, 0x54, 0x6a, 0x4f, 0xbe, 0x7d, 0x25, 0x67, 0x0b, 0x2f, 0xc6, + 0x8a, 0x8f, 0xb2, 0xc4, 0xa6, 0x3d, 0xef, 0xec, 0x6f, 0xe0, 0x1d, 0x8c, 0xe0, 0xf5, + 0x1d, 0x3c, 0x65, 0xa4, 0x28, 0x90, 0x97, 0x5f, 0xa1, 0xed, 0xed, 0x70, 0x56, 0x20, + 0xdf, 0xcd, 0x1d, 0x0c, 0xde, 0xad, 0x2a, 0xbf, 0xa6, 0xdf, 0xe2, 0x6d, 0x79, 0xc9, + 0x0c, 0x63, 0xff, 0x96, 0xe5, 0x40, 0xb7, 0x61, 0x5d, 0x43, 0xa6, 0x26, 0x1d, 0x57, + 0x73, 0x03, 0x06, 0xb6, 0x63, 0x2c, 0x8e, 0xe6, 0x1b, 0xaa, 0x4a, 0xb4, 0xd3, 0x08, + 0x4d, 0x65, 0x9c, 0xab, 0xcf, 0xc4, 0x06, 0x4c, 0x09, 0xd2, 0x42, 0x69, 0xb3, 0x03, + 0x17, 0x10, 0xb6, 0x7d, 0x3b, 0x0b, 0x73, 0x6f, 0xac, 0xbc, 0x18, 0x1e, 0xb1, 0xdc, + 0x8c, 0x49, 0x3f, 0x10, 0xdb, 0xe6, 0xfe, 0x45, 0xfd, 0xd4, 0xab, 0x60, 0x22, 0xfa, + 0xbd, 0xd3, 0x4c, 0x09, 0xf7, 0x51, 0x04, 0xc3, 0x85, 0xc9, 0x26, 0x83, 0x41, 0xc1, + 0x6e, 0xbe, 0x80, 0xf8, 0xc8, 0x0e, 0x8e, 0x06, 0x23, 0x06, 0x03, 0x99, 0x5a, 0xde, + 0x55, 0x61, 0xfe, 0xd4, 0x5c, 0xf8, 0xd1, 0x14, 0xd4, 0xcf, 0x02, 0x42, 0x0c, 0x4b, + 0x96, 0x2d, 0xc2, 0x02, 0xf8, 0xa5, 0x07, 0xf3, 0xd8, 0xe8, 0xa3, 0x44, 0xfb, 0xa1, + 0x0a, 0x32, 0x7f, 0xf2, 0x22, 0x54, 0xf6, 0xc3, 0xac, 0x8f, 0x3c, 0xf9, 0x70, 0x0b, + 0x1f, 0xd2, 0xec, 0xbe, 0x9f, 0x4e, 0x91, 0xe4, 0x3a, 0x65, 0x4f, 0xff, 0x02, 0x7c, + 0xd9, 0x17, 0x4b, 0x63, 0x8e, 0x6e, 0xfe, 0xc4, 0xab, 0xfb, 0xa1, 0x87, 0xf8, 0xf3, + 0xdb, 0xa0, 0x45, 0x9d, 0xa6, 0xc3, 0xf8, 0x00, 0xcb, 0x6b, 0x61, 0x33, 0xa8, 0xb4, + 0xac, 0x1e, 0xf6, 0x58, 0xd1, 0x11, 0xc0, 0x3f, 0x07, 0x22, 0x08, 0xdc, 0xc2, 0x07, + 0xa2, 0x22, 0x3a, 0x70, 0x22, 0x92, 0x43, 0x2e, 0x83, 0x06, 0xfc, 0x03, 0x04, 0x63, + 0xe7, 0x54, 0xff, 0x0f, 0x15, 0x3d, 0x97, 0xbc, 0x9c, 0xe9, 0x6d, 0xff, 0x4b, 0xed, + 0x2f, 0x1e, 0xa5, 0xb8, 0xea, 0x87, 0x6d, 0x2e, 0xe4, 0xe4, 0xf6, 0xe4, 0x9a, 0x4a, + 0x85, 0xa9, 0xcf, 0x4a, 0x33, 0xdc, 0xd9, 0x36, 0x60, 0xa4, 0x25, 0x43, 0xe5, 0x34, + 0x22, 0x39, 0x0d, 0x66, 0x5b, 0xdd, 0x30, 0x24, 0x78, 0xb3, 0x3c, 0x8d, 0x57, 0x47, + 0x92, 0x41, 0x4c, 0x5f, 0xe5, 0xb7, 0x4f, 0xe1, 0xd1, 0x69, 0x52, 0x5c, 0x99, 0x30, + 0x1a, 0x3a, 0x68, 0xa0, 0xc8, 0x5f, 0x99, 0x08, 0xed, 0x24, 0x25, 0x51, 0x5d, 0x45, + 0xca, 0xe5, 0xca, 0xe7, 0xce, 0x0e, 0x98, 0xb5, 0x82, 0x9e, 0xd6, 0x96, 0xbe, 0x2c, + 0x3d, 0xb4, 0x59, 0xe0, 0xad, 0x5b, 0x5d, 0xf7, 0x4a, 0xa1, 0x7b, 0x43, 0x44, 0x65, + 0x42, 0xaf, 0x17, 0x84, 0x40, 0x1e, 0xfe, 0xc9, 0xf1, 0x25, 0x6d, 0xaf, 0x71, 0x91, + 0x59, 0xd8, 0xa1, 0x83, 0x3f, 0xc0, 0x5c, 0xdb, 0x01, 0xf6, 0x88, 0xef, 0x49, 0x81, + 0xc7, 0x4a, 0x7f, 0xf4, 0x3d, 0xe3, 0x55, 0xc3, 0xc4, 0x66, 0x1c, 0x36, 0xfa, 0x24, + 0xec, 0x10, 0x99, 0xa8, 0xad, 0xf4, 0xe3, 0x11, 0x48, 0x78, 0x20, 0xb5, 0xa7, 0x76, + 0xea, 0x06, 0x42, 0xef, 0x86, 0x65, 0x29, 0xb9, 0xe4, 0x8c, 0x36, 0x82, 0xd0, 0xa4, + 0x2c, 0x24, 0x84, 0x09, 0x64, 0xb0, 0x9d, 0x5a, 0x7e, 0x64, 0x94, 0x86, 0xe2, 0x9d, + 0x56, 0x82, 0xf1, 0x49, 0x8a, 0xb4, 0x2a, 0xa3, 0x98, 0x20, 0xdb, 0x99, 0x65, 0xd3, + 0x2b, 0xad, 0xb7, 0xc3, 0x4d, 0xf6, 0xad, 0x7e, 0x19, 0x6e, 0x4e, 0xbb, 0x76, 0x3f, + 0x5c, 0x66, 0x51, 0x66, 0xa2, 0x63, 0x8b, 0x25, 0x38, 0x5a, 0x27, 0x40, 0x6c, 0xd0, + 0xbf, 0x91, 0x38, 0xd2, 0x58, 0x77, 0x92, 0x34, 0x95, 0x01, 0x80, 0xfc, 0x15, 0xdc, + 0x89, 0xc9, 0xc8, 0xff, 0xe1, 0xa6, 0x91, 0x7e, 0x5c, 0xdc, 0xde, 0x93, 0x9f, 0x90, + 0xbf, 0x96, 0x63, 0x1a, 0xe4, 0x59, 0x7e, 0x9c, 0xc0, 0xbe, 0xe7, 0xb3, 0x02, 0x5f, + 0x95, 0x56, 0x10, 0x6a, 0x84, 0x3a, 0x18, 0x22, 0x7f, 0x5a, 0xb9, 0x61, 0x7d, 0x7b, + 0xcb, 0x1a, 0xf5, 0x28, 0xfa, 0xa7, 0xa0, 0x52, 0xea, 0x4f, 0x52, 0xca, 0x59, 0x45, + 0x57, 0xfd, 0xad, 0x33, 0x05, 0x2b, 0xc8, 0x2b, 0x39, 0xc6, 0xa6, 0x09, 0xa0, 0x70, + 0x75, 0x3d, 0x78, 0x8b, 0x2c, 0x4a, 0x2c, 0xae, 0xbb, 0xe7, 0x9f, 0xf0, 0x12, 0x07, + 0x1c, 0x07, 0x08, 0x10, 0x94, 0xad, 0x60, 0x59, 0xc2, 0x8f, 0x48, 0xe5, 0x56, 0xc4, + 0xe8, 0xd8, 0xc5, 0x37, 0x8b, 0xc2, 0x93, 0x07, 0x6b, 0xb4, 0x97, 0x07, 0x5f, 0x9c, + 0xa0, 0xba, 0x13, 0x11, 0x55, 0x0f, 0xa2, 0x17, 0x3d, 0x0e, 0xb1, 0xf0, 0xbd, 0xdd, + 0xf3, 0xb3, 0xd5, 0xc2, 0x43, 0xff, 0xea, 0xbe, 0xe8, 0x23, 0xcd, 0x63, 0xb4, 0x39, + 0x39, 0xce, 0x95, 0x46, 0xed, 0x4c, 0x41, 0xe6, 0x0c, 0xcc, 0x7e, 0x1c, 0x54, 0x3c, + 0xb3, 0xe2, 0xd3, 0x50, 0xe2, 0xe2, 0xe9, 0x74, 0x21, 0x5c, 0xf7, 0xaa, 0x96, 0x9b, + 0x66, 0x81, 0x14, 0xac, 0xdb, 0x29, 0xf4, 0xcd, 0xcf, 0xdc, 0xec, 0x2a, 0x8c, 0xe4, + 0xf5, 0x95, 0xf4, 0xff, 0x5f, 0x70, 0x7e, 0x7f, 0xa4, 0xde, 0xe8, 0xbf, 0x8f, 0x39, + 0x52, 0xae, 0x32, 0xe7, 0x7f, 0x34, 0xf8, 0xb3, 0xab, 0xaa, 0xe9, 0x69, 0x28, 0xba, + 0x4a, 0x6c, 0x0f, 0xbf, 0x5b, 0x29, 0x19, 0x2d, 0xae, 0x80, 0x0d, 0xfa, 0x79, 0x57, + 0x0c, 0xaf, 0x0b, 0xb8, 0x33, 0xbd, 0x37, 0xa3, 0xd4, 0xbe, 0xaf, 0x09, 0x1f, 0x6b, + 0x3e, 0x55, 0xaa, 0xe5, 0x25, 0xf4, 0x13, 0xac, 0x80, 0x4c, 0x34, 0x7d, 0x54, 0x1d, + 0x2c, 0x09, 0xec, 0x6e, 0x54, 0x03, 0x5d, 0xf1, 0xd8, 0x30, 0x28, 0x4d, 0x9b, 0x46, + 0xff, 0xd2, 0xb2, 0xeb, 0x04, 0x0b, 0x61, 0x77, 0xd0, 0xa0, 0x9c, 0x16, 0x60, 0x34, + 0xa9, 0x57, 0xb1, 0x8f, 0xf6, 0x2e, 0x43, 0x4a, 0x3e, 0xc7, 0x32, 0x62, 0xe4, 0xb2, + 0x3f, 0xec, 0x9d, 0x29, 0x0a, 0x81, 0xc5, 0xb1, 0xf7, 0x3c, 0xb4, 0xcd, 0x1c, 0x47, + 0x2b, 0x86, 0xe5, 0x34, 0xab, 0x9e, 0x65, 0x53, 0x29, 0x5d, 0xb0, 0xcf, 0x34, 0xe1, + 0x39, 0x2a, 0xad, 0x5a, 0xbc, 0xf3, 0x98, 0x64, 0x16, 0xa7, 0x0a, 0x9d, 0xbe, 0x59, + 0xbb, 0x95, 0x8e, 0xbc, 0x71, 0x1c, 0x3a, 0xe0, 0x8c, 0xaf, 0x52, 0xec, 0xa9, 0xcb, + 0x54, 0xc4, 0x58, 0xbe, 0x7f, 0x5e, 0x62, 0x14, 0xec, 0xa0, 0xf0, 0xa3, 0x81, 0x52, + 0x62, 0x20, 0x01, 0x32, 0xe6, 0x14, 0x54, 0x37, 0xec, 0xd2, 0x1f, 0xc8, 0x03, 0x6c, + 0xb0, 0x0a, 0x49, 0x13, 0x84, 0xc3, 0x41, 0xd8, 0x72, 0xdc, 0xda, 0x31, 0xb1, 0x42, + 0x96, 0x73, 0xd9, 0xc4, 0xf5, 0x7b, 0x81, 0xa0, 0x23, 0x6d, 0xa5, 0xec, 0x55, 0x02, + 0xee, 0x29, 0x63, 0x15, 0x0a, 0x00, 0x26, 0xbd, 0x63, 0xef, 0x67, 0x9e, 0x8c, 0x25, + 0xb8, 0xec, 0xee, 0x06, 0x56, 0x4a, 0xf3, 0xb0, 0x2d, 0xea, 0xb1, 0x06, 0x97, 0xa2, + 0x4d, 0xe6, 0x7d, 0x4f, 0x65, 0x04, 0xae, 0x27, 0x37, 0xb8, 0xe1, 0x73, 0x25, 0xc2, + 0xff, 0x15, 0x0c, 0x62, 0xe3, 0x79, 0x83, 0x44, 0xa1, 0xad, 0x3c, 0xbb, 0x75, 0xb7, + 0xf2, 0xa1, 0x57, 0x38, 0xf6, 0x01, 0xcf, 0x00, 0xf7, 0xe8, 0xbc, 0x08, 0xb6, 0x89, + 0x56, 0x7e, 0x4c, 0x7c, 0x01, 0x05, 0x8b, 0xee, 0xc2, 0x90, 0x3c, 0x5c, 0xa6, 0xb4, + 0xc4, 0xa5, 0x71, 0xf4, 0x60, 0xd6, 0x05, 0x87, 0x36, 0x29, 0x96, 0xc6, 0xe1, 0x25, + 0x54, 0xe8, 0xe3, 0x4e, 0x68, 0x3a, 0x27, 0xf8, 0xa5, 0xff, 0x97, 0x1d, 0x5a, 0x0d, + 0xc2, 0xf3, 0xef, 0xd3, 0x88, 0x99, 0x87, 0xc1, 0xcc, 0x39, 0xce, 0x5d, 0x4b, 0x6b, + 0x54, 0x4c, 0xe0, 0x4c, 0x71, 0xee, 0x4b, 0xfa, 0xe5, 0x04, 0x0d, 0x61, 0xf0, 0x57, + 0xe4, 0xf7, 0x70, 0x17, 0x28, 0xf1, 0x20, 0x04, 0xa7, 0xf7, 0xed, 0xeb, 0x3a, 0xb2, + 0x26, 0x09, 0xed, 0x33, 0xb0, 0xab, 0x5d, 0x69, 0xb1, 0x2d, 0x45, 0x76, 0x57, 0x77, + 0x14, 0xdf, 0xc6, 0xdd, 0xa7, 0x1f, 0xf6, 0x01, 0x7b, 0x55, 0xb3, 0x35, 0x4d, 0x11, + 0xe9, 0x21, 0x67, 0x92, 0xe5, 0x60, 0x9f, 0xc0, 0x67, 0x88, 0xec, 0x66, 0x8e, 0xef, + 0x64, 0x5e, 0x63, 0xb3, 0x7e, 0x2d, 0x0c, 0xd2, 0x63, 0x04, 0x08, 0x00, 0xbc, 0x8a, + 0xa2, 0x80, 0x15, 0x6a, 0x79, 0x4f, 0x62, 0xa5, 0xf6, 0x93, 0xeb, 0xd9, 0x07, 0x4b, + 0x5d, 0x35, 0x4a, 0x71, 0xc8, 0xe3, 0x36, 0xde, 0x04, 0x08, 0xac, 0x70, 0x80, 0xa2, + 0xae, 0xee, 0x36, 0x6c, 0x58, 0x14, 0x6f, 0x32, 0xe3, 0x49, 0xa9, 0xbc, 0x65, 0x7e, + 0xc9, 0xe5, 0x7a, 0x89, 0xa0, 0x4c, 0xce, 0xee, 0x21, 0xbd, 0xf3, 0x79, 0x3e, 0x49, + 0xa5, 0xcf, 0x71, 0x3a, 0x42, 0xd0, 0x29, 0xdd, 0xdb, 0x3d, 0xb4, 0x95, 0x09, 0x2c, + 0x37, 0xce, 0x81, 0x4b, 0xe7, 0x3e, 0xf4, 0xec, 0x8d, 0x70, 0xe8, 0x69, 0xbd, 0x2b, + 0x78, 0x8f, 0x15, 0x00, 0xfe, 0x5e, 0xe5, 0x6c, 0x0c, 0xe7, 0x04, 0xeb, 0xa2, 0xc1, + 0xa3, 0xa3, 0x29, 0x0d, 0xe6, 0xec, 0x68, 0xcc, 0xb5, 0xef, 0x7c, 0xd0, 0x21, 0x2a, + 0x3f, 0x09, 0x96, 0x92, 0xcf, 0x00, 0x04, 0x8d, 0xe5, 0x01, 0x26, 0x19, 0xe7, 0x41, + 0x69, 0x2b, 0xfc, 0x74, 0x05, 0xba, 0x3e, 0x87, 0x5e, 0x98, 0xb7, 0xca, 0x31, 0xe9, + 0x65, 0xa1, 0x6f, 0xdd, 0xb5, 0xb0, 0xb7, 0x72, 0xa3, 0xf5, 0xd0, 0x50, 0xd8, 0xad, + 0x7f, 0x60, 0x7f, 0x71, 0xc5, 0x36, 0x3f, 0x7b, 0x7d, 0x2c, 0x34, 0x38, 0xab, 0xe6, + 0xb8, 0xcd, 0x3b, 0xb4, 0x21, 0x8b, 0x4d, 0x7f, 0x55, 0x65, 0x0b, 0x80, 0x13, 0x80, + 0xc7, 0xb5, 0xc6, 0x10, 0x07, 0x9e, 0x51, 0x37, 0x16, 0xc4, 0x6f, 0xaf, 0xcf, 0x3c, + 0x8c, 0x27, 0x15, 0x38, 0x27, 0x83, 0xae, 0xe6, 0x69, 0xa9, 0xdf, 0x47, 0x17, 0x70, + 0x71, 0xb5, 0x43, 0x98, 0xce, 0xcf, 0xd6, 0x86, 0xa0, 0xbc, 0x9a, 0xd3, 0x7f, 0x44, + 0xb5, 0x38, 0x87, 0x75, 0x87, 0x51, 0x66, 0x00, 0x6d, 0x25, 0xdf, 0x4b, 0x5e, 0xd1, + 0xc4, 0x1f, 0x12, 0x1b, 0x9e, 0x16, 0xfc, 0xa6, 0xe0, 0x15, 0xa9, 0x01, 0xe1, 0xe7, + 0x00, 0xc0, 0x99, 0x4e, 0x42, 0x7b, 0xeb, 0xd3, 0x56, 0xe4, 0x17, 0x6d, 0xec, 0x83, + 0xe6, 0xfe, 0x80, 0x02, 0x9c, 0xfc, 0x47, 0x8b, 0x88, 0xb6, 0xfd, 0x38, 0xc0, 0x39, + 0xe0, 0x8b, 0x6f, 0xd9, 0x5d, 0xab, 0xcf, 0xb2, 0x5f, 0x23, 0x8b, 0x26, 0x62, 0x06, + 0xb0, 0xa2, 0xf9, 0xa2, 0xee, 0xa1, 0xc0, 0x83, 0xfa, 0xc8, 0x08, 0xaa, 0xfa, 0x03, + 0x65, 0x66, 0xcc, 0xd2, 0x02, 0xbc, 0xfa, 0x41, 0x4e + ], + }, + TestVector { + description: "Sapling transaction #4", + version: 4, + lock_time: 3595314650, + expiry_height: 122835128, + txid: [ + 0x0a, 0xb2, 0x57, 0x99, 0x70, 0xc8, 0x47, 0x1f, 0x85, 0x44, 0x80, 0x4a, 0x83, 0x03, + 0x79, 0x75, 0xb7, 0x33, 0x8a, 0x96, 0x15, 0x7d, 0xbd, 0xd6, 0xd5, 0x46, 0x5d, 0x61, + 0x8e, 0x79, 0x35, 0x89 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 2, + transparent_outputs: 2, + tx: vec![ + 0x04, 0x00, 0x00, 0x80, 0x85, 0x20, 0x2f, 0x89, 0x02, 0xc8, 0xb4, 0x89, 0x33, 0xc8, + 0xed, 0x45, 0x28, 0x7e, 0x1b, 0x43, 0x9b, 0x61, 0x06, 0xa5, 0x50, 0x94, 0x73, 0xf5, + 0x7b, 0x87, 0x88, 0xaf, 0x52, 0x7c, 0xf9, 0xa7, 0xab, 0xa5, 0x93, 0xdc, 0x9f, 0x5e, + 0x5a, 0xca, 0x1a, 0x00, 0x8e, 0xe4, 0x88, 0xf3, 0x6d, 0xeb, 0x4a, 0x3f, 0xdb, 0x0f, + 0xf6, 0xf5, 0xa3, 0x04, 0x4a, 0x63, 0xe1, 0x7f, 0x70, 0xa4, 0x30, 0x38, 0x24, 0x60, + 0x3a, 0xb5, 0x0e, 0x9b, 0xf7, 0x5b, 0xae, 0xb5, 0x7b, 0xfd, 0xc8, 0x9b, 0xfd, 0xbc, + 0x27, 0x27, 0x01, 0x00, 0x73, 0xbf, 0x7f, 0x95, 0x02, 0x2b, 0x29, 0x84, 0x72, 0xed, + 0x8b, 0x00, 0x00, 0x02, 0x6a, 0x63, 0x2c, 0xf8, 0x75, 0x50, 0x39, 0x41, 0x03, 0x00, + 0x00, 0xda, 0x25, 0x4c, 0xd6, 0xb8, 0x50, 0x52, 0x07, 0x1d, 0x8e, 0xe5, 0x7f, 0xeb, + 0x91, 0x00, 0x00, 0x04, 0x96, 0x14, 0x04, 0xec, 0x12, 0x4e, 0xec, 0x2a, 0x10, 0xc3, + 0x9c, 0x0c, 0x41, 0x8b, 0xe2, 0x06, 0xe6, 0x05, 0x20, 0x87, 0xa2, 0xd3, 0x37, 0x4a, + 0x8e, 0x46, 0xea, 0xe1, 0xfd, 0x0d, 0x9b, 0x21, 0x6e, 0x7d, 0xf6, 0x15, 0x3a, 0xd9, + 0x73, 0x55, 0x83, 0x79, 0x28, 0x40, 0x4c, 0xd5, 0x81, 0xbc, 0x9c, 0xf9, 0xdc, 0xd6, + 0x67, 0x47, 0xdc, 0x97, 0x0a, 0x9f, 0x00, 0xde, 0xb4, 0x4b, 0xd6, 0x34, 0xab, 0x04, + 0x2e, 0x01, 0x04, 0xc1, 0xce, 0x74, 0x7f, 0x53, 0x75, 0x1b, 0xc3, 0x3e, 0x38, 0x4c, + 0x6b, 0x55, 0x76, 0x39, 0x9e, 0x16, 0xf8, 0xf0, 0xcb, 0x08, 0xde, 0x35, 0x08, 0x37, + 0x33, 0x95, 0xd7, 0x21, 0x78, 0x7e, 0xdc, 0x4c, 0x6b, 0x39, 0x35, 0x66, 0x25, 0x10, + 0x77, 0x10, 0x00, 0x68, 0x0d, 0x78, 0xbb, 0x49, 0xc5, 0x66, 0xef, 0x27, 0xdf, 0x61, + 0xc9, 0xfe, 0xb9, 0x2c, 0x08, 0x97, 0x59, 0x44, 0x87, 0x27, 0xa9, 0x34, 0xe3, 0x57, + 0x95, 0x3d, 0xe1, 0xe9, 0xe9, 0x0f, 0xd8, 0xdf, 0xfe, 0x40, 0xb8, 0x73, 0xbc, 0xd5, + 0xb9, 0x82, 0x08, 0xdf, 0x4b, 0x2c, 0xa2, 0x89, 0x7a, 0xf9, 0x0d, 0x8c, 0x8a, 0x23, + 0x62, 0x30, 0x02, 0xa9, 0xd8, 0xbc, 0x02, 0xe8, 0x06, 0x25, 0x4f, 0x41, 0x0e, 0x3b, + 0x02, 0x40, 0x9c, 0xbe, 0xbf, 0xce, 0x8a, 0xcf, 0x65, 0xcf, 0x39, 0x42, 0x6b, 0x64, + 0xa6, 0xba, 0x93, 0x74, 0xa1, 0x3d, 0x72, 0x59, 0x62, 0x3f, 0x65, 0xe9, 0x3e, 0x10, + 0xbf, 0x1f, 0x16, 0xba, 0x7a, 0xe0, 0x7d, 0xa9, 0x20, 0x58, 0x1c, 0x70, 0x40, 0x9e, + 0xdc, 0x7b, 0x9e, 0x21, 0x4e, 0x95, 0x91, 0x92, 0x82, 0x4c, 0x1d, 0xa6, 0x5d, 0x33, + 0x7b, 0x73, 0x75, 0xf5, 0x03, 0x2f, 0xea, 0xd3, 0xb4, 0xf3, 0x28, 0x48, 0x11, 0x95, + 0x0c, 0x7a, 0x90, 0xae, 0xc9, 0x75, 0xd4, 0xe3, 0x62, 0x9f, 0x52, 0xd1, 0x9a, 0x16, + 0x4e, 0x51, 0x16, 0xef, 0x3a, 0xd0, 0x22, 0x44, 0x2d, 0x1e, 0xec, 0x76, 0xb8, 0x88, + 0x73, 0x8b, 0x53, 0xe5, 0x05, 0x58, 0xa7, 0x0f, 0x20, 0xc8, 0xac, 0xb5, 0x8d, 0xee, + 0x63, 0x27, 0x15, 0xe4, 0x78, 0xe2, 0xbc, 0x21, 0xbc, 0xfb, 0xe3, 0x15, 0x59, 0x96, + 0xca, 0xe7, 0xbd, 0x97, 0xf0, 0x2b, 0x51, 0x6d, 0x32, 0x00, 0xfb, 0x3c, 0x17, 0x39, + 0x7c, 0xc1, 0x2b, 0xb7, 0xa1, 0x9f, 0xd4, 0x36, 0xe6, 0x7a, 0xbc, 0xe6, 0x6d, 0x30, + 0xfe, 0xc0, 0x47, 0xfb, 0x27, 0x70, 0x82, 0x0e, 0x47, 0x6f, 0x3e, 0x32, 0xbc, 0x48, + 0x3b, 0xf5, 0x31, 0x64, 0xae, 0x49, 0x70, 0xf1, 0x1b, 0x9c, 0xae, 0xe4, 0xed, 0x6c, + 0xb8, 0xd2, 0xd7, 0x0f, 0x69, 0x13, 0xd8, 0xe0, 0x2a, 0xf8, 0xfb, 0xb1, 0xe4, 0x09, + 0xb4, 0xef, 0x08, 0x04, 0x48, 0xe5, 0x3b, 0xe6, 0xe5, 0xe6, 0x27, 0x02, 0xa2, 0x87, + 0xa1, 0xe4, 0xf2, 0xc7, 0x72, 0x41, 0x76, 0xa3, 0xdd, 0x2c, 0xb7, 0x58, 0xc3, 0x1b, + 0xe1, 0x1c, 0xfb, 0xdb, 0xbf, 0x6c, 0x16, 0xfd, 0x19, 0xc3, 0xa5, 0x6e, 0x3b, 0xd9, + 0x42, 0x9c, 0xaf, 0x17, 0xf3, 0x8e, 0xda, 0x94, 0x94, 0xd2, 0xf0, 0x28, 0x50, 0xaa, + 0x58, 0xf5, 0xc8, 0x20, 0xe4, 0x36, 0xf4, 0xd9, 0x6c, 0x0b, 0xc9, 0xde, 0xba, 0xf8, + 0x5b, 0xfd, 0x52, 0x54, 0xd7, 0x9d, 0x66, 0xf9, 0xbb, 0x1f, 0x48, 0xe1, 0x14, 0x0b, + 0x06, 0xec, 0x87, 0x18, 0x3c, 0xbc, 0x6e, 0x95, 0xf6, 0xcd, 0x5f, 0x7e, 0xbc, 0xad, + 0xb8, 0x97, 0xc7, 0x7b, 0x4a, 0xfb, 0x36, 0x7b, 0xd6, 0xe3, 0x6b, 0xd3, 0x3a, 0x00, + 0xc1, 0xb8, 0x93, 0xd6, 0xff, 0x8f, 0x90, 0x01, 0x44, 0x15, 0x1b, 0xee, 0x34, 0xc7, + 0x94, 0x4b, 0x99, 0xed, 0x6e, 0x79, 0x45, 0xe7, 0xf0, 0xde, 0x87, 0x26, 0x3d, 0x0b, + 0xba, 0x6e, 0x55, 0xac, 0x96, 0xa9, 0x6d, 0x49, 0x95, 0x12, 0x9b, 0xcf, 0xa9, 0xd9, + 0xda, 0x6d, 0xe6, 0xdd, 0x48, 0x26, 0x39, 0x15, 0x3a, 0x81, 0x69, 0xa4, 0xab, 0x46, + 0x4e, 0x39, 0x0b, 0x7f, 0x0a, 0x96, 0xd1, 0x4a, 0x73, 0xf7, 0x69, 0x7f, 0x7e, 0xce, + 0x3c, 0xd7, 0x81, 0xd3, 0x5d, 0xd2, 0x2a, 0xdd, 0xdd, 0x2f, 0x5d, 0x34, 0x52, 0x04, + 0xe4, 0xbb, 0x55, 0x7e, 0x88, 0x45, 0x3f, 0x18, 0x8c, 0xac, 0xbe, 0x92, 0x29, 0x87, + 0xbb, 0xe3, 0xb3, 0xd9, 0x76, 0x82, 0x61, 0x35, 0xc1, 0x03, 0xb6, 0xca, 0x18, 0x2b, + 0x63, 0xe9, 0xe6, 0x7f, 0x83, 0xdc, 0x9f, 0x48, 0x93, 0x33, 0xd5, 0x2a, 0x7f, 0xd7, + 0x68, 0x8a, 0x58, 0xd6, 0x62, 0x0b, 0x67, 0xe9, 0xc7, 0xb0, 0x91, 0x6f, 0xef, 0x90, + 0xf1, 0x5d, 0x8e, 0x4e, 0xb8, 0x0c, 0xf5, 0x99, 0x68, 0x2f, 0x95, 0x4f, 0xf4, 0xe0, + 0xb3, 0x71, 0x83, 0x13, 0x0c, 0xa2, 0xee, 0xd0, 0x91, 0x3f, 0x46, 0xa4, 0xdb, 0x99, + 0x2a, 0x1c, 0x3b, 0xf3, 0x19, 0xdc, 0x86, 0x75, 0x94, 0x01, 0x01, 0x53, 0x7c, 0xff, + 0xc4, 0xa8, 0x2d, 0x59, 0x9b, 0xbe, 0xa0, 0xd4, 0x7e, 0x7a, 0xbf, 0xa9, 0x92, 0xb4, + 0x99, 0x8c, 0xb2, 0x50, 0x09, 0x55, 0xe6, 0x1c, 0x0d, 0x46, 0xb3, 0x21, 0x17, 0xfb, + 0xb9, 0x7f, 0x7a, 0x76, 0x32, 0xd8, 0x72, 0x4b, 0x5d, 0xff, 0x67, 0xf7, 0x5e, 0x2d, + 0x31, 0x74, 0x06, 0xa0, 0xce, 0xc2, 0x89, 0xed, 0x08, 0x3b, 0x7c, 0x58, 0x19, 0x81, + 0x8c, 0x50, 0x47, 0x93, 0xde, 0x53, 0xb6, 0xbf, 0xdb, 0x51, 0x0e, 0x7c, 0xa7, 0x29, + 0xba, 0x74, 0x3d, 0x10, 0xb3, 0xe9, 0x95, 0x7e, 0xfa, 0x84, 0x20, 0x13, 0x39, 0x47, + 0x7c, 0xf3, 0x5f, 0xbb, 0x6a, 0x27, 0x9b, 0xad, 0x9e, 0x8f, 0x42, 0xb9, 0xb3, 0xfd, + 0x6f, 0x3b, 0x86, 0x61, 0x6c, 0x09, 0xdc, 0x8c, 0x31, 0xe2, 0xb0, 0x42, 0xb6, 0x25, + 0x50, 0x98, 0xf8, 0x0a, 0x99, 0xee, 0xc5, 0x8b, 0xfc, 0xec, 0x71, 0xe5, 0xd4, 0xc6, + 0x97, 0x13, 0x6f, 0x47, 0x69, 0x1b, 0xf5, 0xc8, 0x40, 0xb6, 0x1a, 0xff, 0x38, 0x8d, + 0x82, 0xa0, 0x42, 0x44, 0x9d, 0x31, 0x74, 0x50, 0xb6, 0x6c, 0x27, 0x34, 0x01, 0xc5, + 0x76, 0x57, 0x53, 0x46, 0xa9, 0x76, 0x63, 0x88, 0x28, 0x1b, 0xfb, 0xdb, 0x73, 0x93, + 0x66, 0xbb, 0x53, 0x5d, 0xde, 0x66, 0xc2, 0xc1, 0x28, 0x7b, 0x3b, 0x27, 0x85, 0xae, + 0xd6, 0x4c, 0xc4, 0x0c, 0xbc, 0x7d, 0x33, 0xcb, 0xa4, 0xa9, 0xf3, 0xfc, 0xf5, 0xf8, + 0x31, 0x36, 0xa4, 0x39, 0x2d, 0x21, 0xa7, 0xf9, 0xeb, 0x1c, 0xe4, 0xb6, 0xe1, 0x7e, + 0x6f, 0x4a, 0x85, 0xa5, 0x79, 0x66, 0x9e, 0xfd, 0x0f, 0xb0, 0x98, 0x78, 0xe0, 0x88, + 0xe3, 0x22, 0xe9, 0x06, 0xe8, 0x0d, 0x27, 0xf8, 0xd0, 0xca, 0x7e, 0x79, 0x15, 0xab, + 0x40, 0x96, 0x59, 0xa6, 0xd8, 0x0f, 0xde, 0xd1, 0x0a, 0xff, 0x9f, 0xb7, 0x73, 0x74, + 0x9d, 0x79, 0x28, 0x57, 0xf6, 0x8c, 0x7e, 0x8c, 0xf5, 0x18, 0x26, 0x0a, 0x61, 0x08, + 0x6d, 0xe3, 0x2f, 0xff, 0x82, 0x39, 0xf4, 0x53, 0x61, 0x7a, 0x19, 0xf6, 0xfe, 0xc2, + 0x20, 0x67, 0x60, 0x65, 0xeb, 0xe2, 0x75, 0x7e, 0xfc, 0xac, 0xcb, 0x77, 0xfc, 0x61, + 0xe5, 0x9b, 0x97, 0x63, 0x7e, 0x92, 0x0d, 0xee, 0x5e, 0x7e, 0x7a, 0x12, 0xe9, 0xd6, + 0xd2, 0x28, 0xb2, 0x6b, 0x2f, 0xa8, 0x36, 0xf4, 0x72, 0x83, 0x69, 0xad, 0xcd, 0xfc, + 0xd0, 0x04, 0xdc, 0xf1, 0x9e, 0x27, 0xc0, 0xc0, 0x84, 0x44, 0xd2, 0x9a, 0x12, 0x2b, + 0x23, 0x09, 0xf7, 0x16, 0x3c, 0x99, 0x0e, 0xb9, 0x26, 0x1f, 0xd4, 0x15, 0xc0, 0x45, + 0x4a, 0x56, 0xaa, 0x3e, 0xaf, 0x9c, 0x1f, 0x9b, 0xff, 0xf6, 0x04, 0x77, 0x6a, 0x4d, + 0x25, 0xe7, 0xd3, 0xcd, 0xc5, 0xc5, 0xf1, 0x9c, 0xd2, 0xa8, 0x79, 0x4a, 0x4f, 0x57, + 0x16, 0x7f, 0xbc, 0x7e, 0xaa, 0x06, 0x16, 0x4d, 0x51, 0xc4, 0x53, 0x06, 0x14, 0xbc, + 0xf5, 0x20, 0xb2, 0x63, 0x82, 0x0a, 0xa1, 0x7b, 0x20, 0xb4, 0x8c, 0xbf, 0x59, 0xd8, + 0xe3, 0x09, 0x32, 0x2e, 0xbe, 0x56, 0x6f, 0xbe, 0x46, 0xe0, 0xaa, 0x29, 0x76, 0x6a, + 0xdf, 0xdf, 0x01, 0x7a, 0x71, 0x05, 0x10, 0x3c, 0x7f, 0xca, 0xb7, 0xb0, 0x76, 0x48, + 0xc7, 0xc1, 0x16, 0x04, 0x84, 0xf7, 0x7a, 0x6c, 0x70, 0xa5, 0x38, 0x1b, 0x82, 0x56, + 0x40, 0xa1, 0xbe, 0x48, 0xe4, 0x15, 0xa1, 0xe6, 0xa2, 0x7d, 0x78, 0x02, 0x2a, 0x8a, + 0x2f, 0xf0, 0x70, 0xab, 0xf1, 0x23, 0x94, 0xe3, 0xae, 0x5a, 0x8c, 0x23, 0xe3, 0x73, + 0x3e, 0xa4, 0x7a, 0x44, 0xcb, 0x2c, 0x96, 0x8b, 0xd1, 0x34, 0x71, 0xfb, 0x2c, 0x33, + 0xff, 0x77, 0x88, 0x8b, 0xbf, 0x43, 0x08, 0x4d, 0xf9, 0x25, 0x86, 0x5c, 0x0b, 0x33, + 0x3b, 0x77, 0x9f, 0x86, 0x89, 0x81, 0xab, 0x3f, 0x72, 0xe5, 0x2a, 0xc4, 0x74, 0x3a, + 0xce, 0xa9, 0x29, 0xed, 0x0f, 0x7c, 0x90, 0x15, 0xb0, 0xe8, 0x1e, 0x21, 0x29, 0xdb, + 0x05, 0x0d, 0x5e, 0x78, 0xe6, 0x82, 0xc8, 0x19, 0x93, 0xea, 0x87, 0x53, 0xc9, 0x91, + 0xb0, 0x2e, 0x61, 0x81, 0x0e, 0x74, 0x61, 0xed, 0x87, 0xb3, 0x80, 0xdb, 0x96, 0xab, + 0xe3, 0xbe, 0xad, 0x0f, 0x4b, 0x22, 0x12, 0xdb, 0x65, 0x8c, 0x11, 0xb8, 0x3f, 0x53, + 0x11, 0x47, 0x85, 0x27, 0x65, 0x98, 0xb0, 0x19, 0x7a, 0x7f, 0x1c, 0x25, 0x62, 0x7d, + 0x79, 0x62, 0x4d, 0xac, 0xee, 0x97, 0x7d, 0x9f, 0x4e, 0x1a, 0x35, 0xed, 0x2e, 0xaa, + 0xd3, 0xcb, 0x68, 0x25, 0x0a, 0xa9, 0xb3, 0xab, 0x1a, 0x83, 0x45, 0x72, 0x8e, 0x7d, + 0x1a, 0x78, 0xbe, 0x1f, 0xe4, 0x62, 0xce, 0x8e, 0xad, 0x52, 0x8f, 0x7c, 0x05, 0x0f, + 0x1f, 0x6e, 0x02, 0x2b, 0xa8, 0xb0, 0xce, 0xdf, 0x6e, 0x29, 0x7a, 0xb5, 0x64, 0xca, + 0x1a, 0x1f, 0xaa, 0xf4, 0xcf, 0xf1, 0xe4, 0x20, 0x32, 0xfb, 0xbb, 0x38, 0x9d, 0x3f, + 0x66, 0xd5, 0x75, 0x55, 0xef, 0x3f, 0x3e, 0x9e, 0x49, 0xc2, 0xac, 0x4e, 0x85, 0xbb, + 0x75, 0x1d, 0x62, 0x66, 0xc9, 0x03, 0x5b, 0x77, 0x9d, 0x76, 0x9d, 0x49, 0x5c, 0x91, + 0x8a, 0x05, 0x5e, 0x77, 0x67, 0xfb, 0xb4, 0xbb, 0xac, 0x3f, 0x96, 0x3d, 0xe9, 0x97, + 0x46, 0xec, 0x4d, 0xfb, 0x64, 0x2d, 0x9c, 0x2b, 0x86, 0x38, 0xe1, 0x6c, 0x16, 0xe7, + 0x27, 0x70, 0x79, 0x3b, 0x7e, 0xa1, 0xd0, 0x70, 0xc4, 0xe1, 0x1c, 0xbc, 0x20, 0xd8, + 0xff, 0x3b, 0xea, 0xd1, 0x0d, 0xb9, 0xc9, 0x4a, 0xe0, 0x48, 0x27, 0x21, 0xe1, 0xf2, + 0x2c, 0xef, 0xe0, 0xdf, 0x7c, 0x57, 0x7a, 0xa3, 0x8e, 0xc0, 0xe6, 0xc7, 0x8c, 0x9b, + 0xa1, 0x64, 0xe9, 0xdd, 0x00, 0x55, 0xdd, 0xe8, 0x3e, 0x8a, 0xd2, 0x40, 0xe6, 0xdf, + 0xdb, 0xfb, 0xe1, 0x76, 0xe4, 0x55, 0x1f, 0xdd, 0xe9, 0x2d, 0xb1, 0x67, 0x27, 0x42, + 0x04, 0x41, 0x70, 0x06, 0x58, 0xb5, 0x0e, 0xbb, 0x5a, 0x16, 0x13, 0x26, 0x7e, 0xac, + 0x51, 0xc8, 0x0b, 0x19, 0xec, 0xb7, 0x86, 0xab, 0x3b, 0xb9, 0x37, 0xf0, 0xd9, 0x8e, + 0x08, 0xb9, 0xc9, 0xcd, 0x4d, 0xf1, 0x53, 0x4e, 0xfe, 0xe3, 0x8a, 0x8f, 0x87, 0x8c, + 0x9f, 0x3b, 0xdc, 0x7e, 0xfb, 0x2d, 0x53, 0xff, 0x84, 0xfb, 0x83, 0xea, 0xe7, 0xc9, + 0x9e, 0xff, 0xa6, 0x3c, 0x96, 0x49, 0xa1, 0xf1, 0x70, 0xd2, 0x9a, 0xf0, 0x3a, 0x3b, + 0x45, 0x58, 0x9f, 0xae, 0x81, 0xeb, 0x0b, 0x5d, 0x8e, 0x0d, 0x38, 0x02, 0x1d, 0x3b, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xe8, 0x8c, 0x99, 0x04, 0x37, 0x6d, 0x27, 0xf1, 0x3e, 0x44, + 0x41, 0xd5, 0x38, 0x74, 0x42, 0xc5, 0xea, 0x0a, 0xf5, 0xa2, 0x0a, 0x38, 0x32, 0xbc, + 0x3b, 0x9c, 0x59, 0xb8, 0x4b, 0xca, 0x39, 0xb5, 0x2c, 0xd6, 0xb1, 0xfa, 0x29, 0x32, + 0xba, 0x9d, 0x66, 0xc4, 0x12, 0xf5, 0xcd, 0x39, 0x35, 0x1e, 0x13, 0x33, 0xef, 0x85, + 0xd0, 0xee, 0xe5, 0x45, 0xa7, 0xe4, 0x06, 0xf6, 0xeb, 0x3b, 0xf8, 0x93, 0xf3, 0xed, + 0xac, 0x94, 0x64, 0x33, 0x92, 0xa2, 0x8b, 0x0e, 0x49, 0x0c, 0x51, 0xe4, 0xb7, 0x16, + 0x3c, 0x1c, 0xf7, 0x57, 0xd2, 0x24, 0x18, 0xdd, 0x63, 0x38, 0x1b, 0xa2, 0xf2, 0x98, + 0x28, 0x83, 0x6f, 0xe9, 0x78, 0xda, 0xb5, 0x20, 0x1b, 0x2d, 0xb0, 0x8c, 0x3b, 0x38, + 0x9b, 0xa4, 0xb6, 0xac, 0xf7, 0x78, 0xc2, 0xbf, 0x91, 0x02, 0xbe, 0x0c, 0x3e, 0x12, + 0xd7, 0x7a, 0xea, 0x6d, 0xf7, 0x53, 0x8e, 0x8c, 0xf3, 0x62, 0xba, 0xaa, 0xad, 0x1d, + 0xc5, 0x60, 0x42, 0xc6, 0xf2, 0x4c, 0xaf, 0x46, 0xbe, 0xd6, 0x6a, 0xbf, 0x4c, 0x40, + 0x2a, 0x74, 0x92, 0x4e, 0xcf, 0xd0, 0xa0, 0x8d, 0xed, 0xee, 0xa0, 0xef, 0xce, 0xcd, + 0x35, 0x2c, 0x27, 0x5f, 0x13, 0xed, 0x20, 0x76, 0x03, 0x82, 0x2b, 0x1e, 0xf9, 0x97, + 0xb7, 0xed, 0x42, 0xf4, 0xa5, 0x76, 0xb9, 0xe4, 0xc0, 0x07, 0x38, 0x56, 0x3f, 0x82, + 0xa7, 0x62, 0x85, 0x46, 0x7d, 0xa2, 0x95, 0xc2, 0x3b, 0xa1, 0xc5, 0x87, 0xeb, 0xef, + 0xaf, 0x13, 0xcd, 0x4d, 0x50, 0xf2, 0x3c, 0xa5, 0x74, 0x3c, 0x22, 0x5c, 0x38, 0x6d, + 0x46, 0xd4, 0xac, 0x70, 0x83, 0x79, 0xef, 0x99, 0x96, 0x74, 0x4b, 0x39, 0x12, 0x04, + 0x4b, 0x35, 0x5f, 0x92, 0x7a, 0x67, 0xaf, 0x1e, 0xf2, 0x6a, 0x71, 0x7f, 0xb5, 0xa8, + 0x46, 0xac, 0x9d, 0xa1, 0x5e, 0xa3, 0xf1, 0x8f, 0x8c, 0x36, 0x18, 0x3f, 0x87, 0x9b, + 0xb9, 0xa3, 0xb2, 0x98, 0xff, 0xf9, 0xa4, 0x89, 0x64, 0x6e, 0x77, 0x8e, 0x6d, 0x67, + 0x01, 0xf9, 0xad, 0xac, 0x7a, 0xe8, 0x82, 0x09, 0xa8, 0x43, 0xba, 0x8a, 0x55, 0xd1, + 0x19, 0x2b, 0xbe, 0xef, 0x31, 0xd0, 0x71, 0x45, 0x37, 0xf7, 0xa0, 0x35, 0xb0, 0x79, + 0xc6, 0xad, 0xd4, 0xab, 0x50, 0x61, 0x2d, 0x35, 0x89, 0x7a, 0x93, 0x3d, 0x49, 0xe8, + 0xef, 0x08, 0x6c, 0xdf, 0x96, 0xc8, 0x0d, 0x28, 0x56, 0xcc, 0xc7, 0xe4, 0x5f, 0xc4, + 0xef, 0xd4, 0xbf, 0x1b, 0x98, 0xab, 0x28, 0x89, 0x1b, 0x4a, 0xea, 0x7e, 0xf8, 0x4c, + 0xf7, 0x36, 0x93, 0x5c, 0x46, 0x6b, 0x24, 0x97, 0x4d, 0xf8, 0xf5, 0x35, 0x5b, 0x8b, + 0xa3, 0x20, 0xac, 0x5f, 0xbc, 0x47, 0x5a, 0xa2, 0xcf, 0x5a, 0xd3, 0x77, 0x80, 0xbd, + 0x9f, 0x9d, 0x46, 0x42, 0xcf, 0x6c, 0x2d, 0xc6, 0xb8, 0x2f, 0x91, 0x7d, 0x09, 0xc4, + 0xf7, 0x28, 0x88, 0xf9, 0x15, 0x53, 0x44, 0x7f, 0xc5, 0x70, 0x26, 0x6d, 0xaa, 0xfd, + 0x4b, 0x96, 0xcf, 0xe2, 0xa0, 0xb0, 0x67, 0x92, 0x46, 0x9a, 0x72, 0x7d, 0xbe, 0xd0, + 0x55, 0x91, 0xea, 0x60, 0x57, 0x32, 0x20, 0x5e, 0x26, 0x05, 0x97, 0x8a, 0x3a, 0x90, + 0x2c, 0x3c, 0xd6, 0x5f, 0x94, 0x83, 0x00, 0xf7, 0x37, 0x51, 0x88, 0x15, 0xf4, 0x63, + 0xd3, 0xc6, 0x1a, 0x18, 0x9b, 0xc3, 0xbc, 0x84, 0xb0, 0x22, 0xf6, 0x3d, 0x65, 0x4f, + 0x52, 0x0e, 0x3a, 0x7a, 0xd8, 0x8e, 0x5d, 0x8d, 0xa1, 0x50, 0x14, 0xbe, 0x4b, 0xb9, + 0x67, 0x99, 0x27, 0xdc, 0x7e, 0x0f, 0xba, 0xf0, 0x58, 0xd9, 0x3f, 0x37, 0xc7, 0x2b, + 0x28, 0x6b, 0x02, 0xb7, 0x5f, 0x3c, 0xdb, 0xfb, 0x85, 0x0e, 0xed, 0x90, 0xcb, 0x23, + 0x39, 0x24, 0x32, 0xeb, 0xc3, 0x6b, 0xd2, 0x47, 0x54, 0x46, 0x9c, 0x03, 0x73, 0x1a, + 0x7e, 0xbb, 0xed, 0x28, 0x57, 0x78, 0x49, 0x81, 0xa0, 0x71, 0x67, 0x05, 0xd9, 0xcb, + 0x47, 0xd9, 0x87, 0xf8, 0x3d, 0x34, 0x21, 0xb1, 0x07, 0xd1, 0x55, 0xdb, 0xb6, 0x61, + 0xed, 0x08, 0xf2, 0xfc, 0x2e, 0x6b, 0x4a, 0x5b, 0x09, 0x77, 0x64, 0x51, 0xd8, 0x73, + 0xb2, 0xfc, 0x63, 0x68, 0x1c, 0xe3, 0x08, 0xc8, 0x08, 0xf5, 0x38, 0x8c, 0xb1, 0xaa, + 0x55, 0x89, 0xa1, 0x87, 0x73, 0xdb, 0x39, 0x07, 0xa0, 0x6b, 0xef, 0x62, 0xd1, 0x29, + 0x60, 0xaa, 0xe7, 0x2a, 0x2b, 0x89, 0x7e, 0x26, 0xb5, 0x75, 0xfd, 0x04, 0x8a, 0x57, + 0x22, 0x2c, 0x7c, 0x68, 0x0d, 0x54, 0xdc, 0x73, 0x28, 0xd0, 0xf0, 0xf2, 0xd7, 0x0b, + 0x43, 0x10, 0x8c, 0xb2, 0x0c, 0x5c, 0x31, 0x16, 0x46, 0x31, 0xb0, 0xe5, 0xb3, 0xbd, + 0x31, 0xb7, 0xdf, 0x8f, 0x4c, 0x1f, 0xe1, 0x43, 0x4f, 0xa7, 0x47, 0x56, 0x70, 0x6f, + 0x83, 0x10, 0x60, 0xa5, 0xb7, 0x03, 0xdf, 0x9c, 0xd4, 0x2e, 0x24, 0x96, 0x0e, 0x50, + 0x8a, 0x04, 0x36, 0x11, 0x8d, 0x4a, 0x92, 0x07, 0xb6, 0xd8, 0x50, 0x59, 0x6d, 0xde, + 0xbe, 0x30, 0xf9, 0x28, 0xee, 0xea, 0xe7, 0x35, 0x98, 0xfb, 0x3d, 0x86, 0x9d, 0x2d, + 0x18, 0x15, 0xa9, 0xe1, 0x4d, 0x12, 0x79, 0xf7, 0xb4, 0xb6, 0x3f, 0x4b, 0xca, 0x0f, + 0x56, 0x68, 0x9b, 0xf8, 0x73, 0x3b, 0x03, 0x06, 0x49, 0x64, 0xa4, 0xb0, 0x20, 0xb0, + 0x60, 0xdc, 0xf4, 0x54, 0x71, 0xfa, 0x1d, 0x41, 0xe5, 0xee, 0x03, 0xf9, 0xbd, 0x90, + 0x65, 0x2b, 0x53, 0x72, 0x30, 0x3a, 0x3a, 0xb9, 0xbb, 0x2e, 0xe3, 0x79, 0xb9, 0xaf, + 0xcd, 0x1f, 0x6a, 0x3c, 0xb9, 0x00, 0x0b, 0xb1, 0x4e, 0xfc, 0x33, 0x3d, 0x3d, 0x64, + 0x75, 0x4a, 0x2b, 0xfc, 0x0c, 0x08, 0xe1, 0x9f, 0x5a, 0xb8, 0x29, 0x59, 0xb5, 0xcb, + 0x96, 0x49, 0x97, 0x9e, 0x3c, 0xcf, 0x75, 0xa8, 0xda, 0xd0, 0x54, 0x60, 0x26, 0x1f, + 0xcd, 0xcb, 0x00, 0x7a, 0xeb, 0xc1, 0x5e, 0x11, 0x67, 0x5c, 0x2d, 0xb4, 0xa6, 0xcb, + 0x79, 0x38, 0xe1, 0xfe, 0xb5, 0xcd, 0xdc, 0x27, 0xd6, 0xd0, 0x75, 0x44, 0x1e, 0x16, + 0xc7, 0x07, 0xf0, 0x97, 0x14, 0x47, 0x4c, 0x96, 0x16, 0x0a, 0xa6, 0x8e, 0xaa, 0x12, + 0x31, 0x79, 0x06, 0x9c, 0xd2, 0x20, 0x44, 0x06, 0x26, 0xcd, 0xfe, 0xed, 0x65, 0xf9, + 0xfa, 0xbd, 0xaa, 0x6d, 0xb1, 0x76, 0x0d, 0xa5, 0xd8, 0x4c, 0xfd, 0x60, 0x03, 0xcf, + 0xfe, 0x52, 0xfd, 0xd0, 0xd2, 0xa9, 0x80, 0x34, 0x8f, 0x26, 0x9f, 0x5a, 0x07, 0x64, + 0x2e, 0x89, 0xce, 0x26, 0x27, 0xba, 0x0e, 0x87, 0x13, 0x9e, 0xc2, 0xdb, 0x57, 0x2d, + 0x1c, 0xec, 0x82, 0x76, 0xd1, 0xa6, 0x2a, 0x47, 0x2f, 0x61, 0x2a, 0xc9, 0xda, 0x09, + 0x3a, 0x9c, 0x5f, 0xcc, 0x78, 0x11, 0x9c, 0x82, 0xbe, 0xfd, 0x7b, 0x30, 0xff, 0x2c, + 0x00, 0x59, 0x41, 0x0b, 0xfd, 0x5b, 0x32, 0x2c, 0xa5, 0xdb, 0x69, 0x39, 0x39, 0xfa, + 0x89, 0x76, 0x6f, 0xf0, 0x98, 0xad, 0x4b, 0xc6, 0x40, 0x37, 0xa3, 0x4a, 0x73, 0x12, + 0x86, 0x05, 0x72, 0x3a, 0x24, 0x1f, 0x0e, 0xb1, 0x54, 0x0f, 0x5f, 0x5b, 0x55, 0x5b, + 0x75, 0x79, 0x98, 0x0f, 0x97, 0x50, 0x46, 0x9b, 0x58, 0xcb, 0x10, 0x70, 0x0b, 0xdf, + 0xcf, 0xc6, 0x28, 0xac, 0x85, 0xc0, 0x7f, 0xb3, 0xc0, 0x42, 0x00, 0x32, 0x70, 0x9c, + 0x0e, 0xb6, 0xef, 0x2c, 0x14, 0xb4, 0x37, 0x2b, 0x58, 0xa0, 0xde, 0x19, 0x78, 0x9c, + 0x91, 0xfc, 0x99, 0x31, 0xec, 0xbc, 0xac, 0x64, 0x19, 0xca, 0x0e, 0x5d, 0x97, 0xa3, + 0xb4, 0x1c, 0x76, 0xc8, 0xa1, 0x96, 0xc7, 0xa3, 0xad, 0xf5, 0x5b, 0xdb, 0xe6, 0x0e, + 0x85, 0x59, 0x26, 0x4b, 0x6d, 0x8e, 0xf7, 0x5d, 0x26, 0xdc, 0x72, 0x0f, 0xe5, 0xec, + 0x1f, 0x59, 0x66, 0x2d, 0x95, 0xd0, 0x8e, 0x78, 0x9e, 0x3a, 0xd1, 0x82, 0x9e, 0x40, + 0x11, 0x9a, 0xa7, 0x89, 0x7d, 0x89, 0x40, 0x4d, 0xc4, 0x96, 0x60, 0x46, 0x68, 0xf5, + 0x59, 0xca, 0x67, 0x43, 0x7d, 0x2b, 0xfb, 0xb7, 0xf5, 0x1f, 0x36, 0xe0, 0xa5, 0xb7, + 0x22, 0x8f, 0x05, 0xb6, 0xec, 0x57, 0x89, 0xc1, 0x3f, 0xc2, 0x71, 0x95, 0x56, 0x15, + 0x52, 0x63, 0x96, 0x6e, 0x81, 0xf5, 0x21, 0x51, 0xe2, 0xf6, 0xe3, 0x68, 0x69, 0xd8, + 0xa3, 0xc4, 0xc4, 0x96, 0xa5, 0x13, 0x63, 0x2c, 0xaa, 0x8a, 0xbe, 0x1f, 0x27, 0x35, + 0xeb, 0x60, 0xfc, 0x12, 0x85, 0x82, 0x8e, 0xad, 0xdc, 0x54, 0x41, 0xa4, 0x02, 0xa3, + 0xbf, 0x5b, 0xcd, 0x22, 0x7c, 0xd8, 0x04, 0xe3, 0xc8, 0xca, 0x21, 0x24, 0x3c, 0xdf, + 0xcd, 0x53, 0xd8, 0x66, 0x05, 0xf3, 0xf8, 0xaf, 0x1a, 0x9c, 0xc5, 0x69, 0x33, 0x15, + 0x53, 0x28, 0x28, 0x01, 0x43, 0xfa, 0xdb, 0x3a, 0x1f, 0xc3, 0x3d, 0x76, 0x9f, 0x07, + 0xff, 0xc0, 0x1e, 0x35, 0x79, 0xe1, 0x18, 0x1f, 0x19, 0x15, 0xdb, 0x89, 0xd8, 0x2e, + 0x50, 0xbd, 0x74, 0x24, 0x08, 0x7c, 0x79, 0x7d, 0x9b, 0x7b, 0x3b, 0x7d, 0x2a, 0x53, + 0xb8, 0xff, 0xf9, 0xf2, 0xd9, 0x28, 0xab, 0x99, 0x6d, 0xce, 0x5e, 0xd2, 0x71, 0x58, + 0x98, 0xe4, 0x85, 0x8e, 0xec, 0x60, 0x78, 0xa9, 0x48, 0x8d, 0x2d, 0xa6, 0xd1, 0x73, + 0x05, 0xd0, 0xa3, 0x47, 0x18, 0x62, 0xa2, 0x22, 0x38, 0xb9, 0xbe, 0xc2, 0x3e, 0xf2, + 0xe2, 0x04, 0x1d, 0x50, 0x08, 0x73, 0x3e, 0x9e, 0xa5, 0x66, 0x2c, 0x9f, 0xea, 0x0e, + 0x4a, 0xfd, 0xf3, 0x27, 0x0c, 0x11, 0x32, 0x3b, 0xa4, 0x8b, 0x35, 0x50, 0x85, 0x74, + 0x40, 0x97, 0xf3, 0xf6, 0xc5, 0x2e, 0xe4, 0x04, 0x31, 0x73, 0x9c, 0x5c, 0xa8, 0xdb, + 0x2b, 0xda, 0x13, 0xda, 0x9b, 0x33, 0x0b, 0x62, 0x00, 0x0b, 0x79, 0xfd, 0x35, 0x44, + 0xb1, 0x31, 0x83, 0x15, 0x9d, 0x17, 0x4f, 0xfe, 0xd2, 0x54, 0x85, 0x40, 0xa5, 0x2e, + 0xe4, 0xb6, 0x2d, 0x35, 0xaa, 0x5a, 0x58, 0x63, 0xf2, 0xba, 0xa4, 0x47, 0x5f, 0x3e, + 0xb6, 0xc7, 0x35, 0x9d, 0xc8, 0x39, 0xdb, 0xc8, 0x68, 0x90, 0xd1, 0x99, 0xd8, 0xea, + 0x6c, 0x9d, 0x97, 0xf1, 0x9e, 0x79, 0x2c, 0x7b, 0xcb, 0x66, 0x25, 0xff, 0x32, 0xb7, + 0x31, 0x57, 0x5f, 0x62, 0xd9, 0x44, 0xc8, 0x06, 0xb3, 0xf9, 0x3c, 0x04, 0xb7, 0x3a, + 0x98, 0xb2, 0x73, 0x43, 0xeb, 0x25, 0xa0, 0x6c, 0x87, 0x53, 0x60, 0xde, 0x1a, 0x14, + 0x38, 0x84, 0x0a, 0xd0, 0x66, 0x1d, 0xeb, 0xdc, 0x9b, 0x82, 0x8a, 0xd0, 0xcb, 0xc0, + 0x01, 0x1b, 0x32, 0x35, 0xb2, 0xc7, 0x53, 0x77, 0x78, 0xf4, 0x58, 0x82, 0x1b, 0x83, + 0xaa, 0x4c, 0xb3, 0xe5, 0x4e, 0xd0, 0x61, 0x3e, 0x32, 0xe6, 0x3e, 0xf9, 0x85, 0xf9, + 0x35, 0xbd, 0x7f, 0xf8, 0xc7, 0x70, 0x5c, 0x89, 0xc0, 0xbb, 0xcc, 0xda, 0x9e, 0x66, + 0x5e, 0x3b, 0x06, 0xba, 0x87, 0x9f, 0xdd, 0xf3, 0x5e, 0x0b, 0x2f, 0x60, 0xc2, 0xa7, + 0x0c, 0xb8, 0xeb, 0x9d, 0xe2, 0xf5, 0xd7, 0x38, 0xc0, 0x5e, 0x34, 0xe5, 0x0f, 0x1f, + 0x26, 0x19, 0x25, 0x8b, 0x89, 0xe5, 0x73, 0xda, 0x55, 0x75, 0x46, 0x3d, 0x2e, 0x3b, + 0xce, 0x39, 0xf7, 0x0e, 0xb4, 0x55, 0x26, 0xcd, 0x99, 0xfa, 0xd9, 0x0f, 0x97, 0x92, + 0xd0, 0xcd, 0x59, 0x3b, 0xa8, 0x6a, 0xa1, 0xae, 0xa5, 0x03, 0xdd, 0xca, 0x5e, 0x3e, + 0x57, 0x37, 0xe6, 0xfc, 0x7b, 0xab, 0x27, 0x85, 0x12, 0x69, 0x20, 0xc4, 0x47, 0xd5, + 0xe5, 0x6a, 0x75, 0xdb, 0xe8, 0x9d, 0x68, 0x8b, 0xc0, 0xda, 0xa7, 0x9a, 0xa6, 0x2d, + 0xe9, 0xea, 0x29, 0x55, 0xf7, 0x1e, 0x1a, 0x61, 0x68, 0x2a, 0x61, 0x78, 0xf8, 0x0b, + 0xca, 0xda, 0x3b, 0x97, 0xae, 0xec, 0x77, 0xd9, 0xc8, 0x56, 0x3b, 0x06, 0x9e, 0xa0, + 0x13, 0x2f, 0x72, 0x3f, 0xbe, 0x75, 0x60, 0x2d, 0xd6, 0x29, 0xac, 0x48, 0x09, 0x93, + 0xd3, 0x71, 0x4f, 0xf0, 0x2c, 0x97, 0x0e, 0xbd, 0x83, 0xe6, 0xd6, 0xcb, 0xbe, 0x39, + 0x08, 0x6b, 0x03, 0x54, 0x20, 0xe0, 0xc2, 0x75, 0x62, 0x86, 0x58, 0xa3, 0xba, 0x92, + 0x30, 0x5c, 0xc0, 0x76, 0x98, 0xf1, 0x2e, 0xe1, 0xe4, 0x17, 0x13, 0x70, 0xac, 0x39, + 0xdf, 0x0e, 0x46, 0x6d, 0xc8, 0xec, 0xc3, 0x9d, 0xa5, 0xee, 0x47, 0xb6, 0x82, 0x9d, + 0xbb, 0xa9, 0x97, 0x0f, 0x03, 0x58, 0xed, 0x68, 0x26, 0x49, 0x60, 0x5c, 0x7b, 0xfe, + 0xe6, 0x93, 0x1a, 0x29, 0x5b, 0x14, 0xa3, 0x40, 0x76, 0x00, 0x07, 0x4e, 0xdc, 0x79, + 0xfa, 0x61, 0xe6, 0x80, 0x6f, 0x11, 0x08, 0xd3, 0x34, 0xb4, 0xa5, 0x90, 0xf7, 0xa0, + 0x26, 0xb0, 0xeb, 0x02, 0x80, 0x4d, 0x39, 0x17, 0x46, 0x6e, 0x99, 0x91, 0x20, 0x64, + 0x1c, 0xe0, 0x7e, 0xbc, 0xdc, 0x99, 0x42, 0x60, 0x82, 0xe0, 0x77, 0x1f, 0x15, 0x9c, + 0x82, 0x6a, 0x9b, 0xe6, 0xce, 0xd7, 0x2d, 0x0e, 0x9c, 0xfa, 0x5b, 0x4b, 0x8a, 0x86, + 0x40, 0xca, 0x34, 0x88, 0xa1, 0xeb, 0x2b, 0x6e, 0x37, 0x4e, 0x8c, 0x2e, 0x00, 0x3c, + 0xdf, 0xa2, 0x32, 0x10, 0x37, 0x48, 0xb5, 0xc9, 0xdc, 0x11, 0xbb, 0x30, 0xf6, 0x46, + 0xb9, 0x73, 0xd7, 0x83, 0xf5, 0x99, 0x14, 0x17, 0x4e, 0x48, 0xbd, 0x6a, 0x84, 0xfa, + 0xd8, 0x9d, 0xbc, 0xa5, 0xc7, 0x6d, 0x0a, 0xb4, 0x14, 0x5a, 0xbd, 0x08, 0xe4, 0xd0, + 0xf2, 0xc7, 0x60, 0x25, 0xfc, 0x85, 0xfc, 0x11, 0x6c, 0xca, 0x8d, 0x30, 0x2c, 0x8a, + 0x3b, 0xeb, 0x26, 0x60, 0x3a, 0x1a, 0xf1, 0xb5, 0x93, 0x91, 0xea, 0xf4, 0x71, 0x75, + 0x9a, 0xdf, 0x19, 0x4c, 0x40, 0xc2, 0x09, 0x29, 0x8c, 0xc0, 0x51, 0xfc, 0x79, 0x03, + 0xfe, 0x40, 0x90, 0x2c, 0x35, 0x6f, 0x28, 0x27, 0x9f, 0x27, 0x94, 0xbb, 0xb9, 0xe0, + 0x0b, 0x1e, 0x22, 0x0e, 0x55, 0xb6, 0x76, 0xa1, 0x8a, 0x9c, 0xad, 0xb8 + ], + }, + TestVector { + description: "Sapling transaction #5", + version: 4, + lock_time: 2090508281, + expiry_height: 325986117, + txid: [ + 0xfa, 0x29, 0x02, 0x95, 0xd0, 0x96, 0xcb, 0xca, 0xc8, 0x49, 0xb1, 0xc4, 0xaf, 0x0a, + 0xa6, 0xe1, 0x1e, 0xfc, 0x56, 0x87, 0xc6, 0xa3, 0x1c, 0x04, 0xff, 0xfd, 0x10, 0x1d, + 0x9e, 0x7e, 0xc2, 0xc8 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 1, + tx: vec![ + 0x04, 0x00, 0x00, 0x80, 0x85, 0x20, 0x2f, 0x89, 0x00, 0x01, 0xaf, 0x32, 0xdc, 0xb1, + 0x88, 0x9f, 0x01, 0x00, 0x06, 0x6a, 0xac, 0x00, 0x65, 0x51, 0xac, 0xf9, 0x9f, 0x9a, + 0x7c, 0x45, 0x27, 0x6e, 0x13, 0x1e, 0x22, 0xca, 0x6c, 0x41, 0x40, 0x01, 0x00, 0x01, + 0x13, 0xde, 0xae, 0x9e, 0xea, 0xee, 0x94, 0xe0, 0x89, 0xfc, 0x29, 0xc4, 0xa8, 0xe1, + 0x2a, 0xd9, 0x6e, 0x6c, 0xc6, 0x9f, 0xf1, 0x09, 0x35, 0xd9, 0xc7, 0x19, 0x97, 0xd0, + 0x4e, 0x54, 0xa8, 0x69, 0xf4, 0xc1, 0xcf, 0x87, 0x71, 0xc8, 0xed, 0x44, 0xaf, 0xe8, + 0xe4, 0x60, 0x89, 0x00, 0xe1, 0xc5, 0x6a, 0xb7, 0x47, 0x29, 0xf0, 0x38, 0xf4, 0x6c, + 0xba, 0xa1, 0xb6, 0xaf, 0xe5, 0xc8, 0x29, 0x12, 0x37, 0x67, 0xf4, 0x6d, 0x4b, 0xab, + 0x70, 0x28, 0xb0, 0x9b, 0x20, 0x38, 0xfc, 0x1b, 0x72, 0x7f, 0x61, 0x9e, 0x61, 0xc4, + 0xfc, 0x16, 0xbf, 0xfe, 0x65, 0x7e, 0x99, 0x12, 0x6a, 0xc5, 0x18, 0x4f, 0xc8, 0x7f, + 0x5e, 0x53, 0x01, 0x88, 0x64, 0x23, 0xb3, 0x56, 0x87, 0x59, 0x09, 0xec, 0x92, 0xb3, + 0x2d, 0x33, 0x08, 0x42, 0x53, 0xa1, 0xb9, 0x7c, 0x5d, 0x2e, 0xd6, 0x6c, 0x7e, 0x22, + 0xd1, 0x85, 0x58, 0xfe, 0x82, 0xb5, 0xec, 0x88, 0xc6, 0x07, 0x05, 0x82, 0xfa, 0xcf, + 0x75, 0x6d, 0x70, 0x32, 0x38, 0xd9, 0xaf, 0x94, 0x19, 0x96, 0x6b, 0xe4, 0x62, 0xdf, + 0xbd, 0x31, 0x5c, 0x5b, 0xfa, 0xf0, 0x44, 0xaa, 0x69, 0x5a, 0x05, 0xe6, 0x9d, 0x3d, + 0x41, 0xe7, 0x73, 0x78, 0x75, 0x1d, 0x4e, 0x02, 0xc2, 0x66, 0xdf, 0xb5, 0xcb, 0x6a, + 0x7c, 0x40, 0x08, 0xf9, 0x44, 0x88, 0x83, 0x11, 0xe6, 0xde, 0x37, 0xdc, 0x7b, 0xdf, + 0x65, 0xd7, 0x0c, 0xab, 0x3e, 0x07, 0x8a, 0xb4, 0x4e, 0x23, 0x2b, 0x41, 0x1c, 0xaf, + 0xb2, 0x88, 0x4e, 0x26, 0x45, 0x95, 0xbe, 0xed, 0xf9, 0xd4, 0x9a, 0x79, 0x36, 0xbb, + 0x28, 0x7f, 0xe2, 0x8e, 0x1c, 0x29, 0x63, 0x5e, 0xae, 0xca, 0x74, 0x7d, 0x06, 0x87, + 0xcf, 0x46, 0x59, 0x02, 0xd2, 0x5f, 0x5e, 0x51, 0x58, 0x48, 0x1d, 0xaa, 0xcd, 0xd3, + 0x00, 0xb4, 0x77, 0x40, 0xbc, 0x0c, 0x62, 0x77, 0xb4, 0x47, 0xcc, 0x26, 0x64, 0x04, + 0x42, 0x43, 0xdd, 0x48, 0x11, 0x40, 0x4e, 0xcb, 0xd7, 0xc7, 0xa6, 0x3c, 0x9f, 0xb7, + 0xd9, 0x37, 0xbc, 0xd8, 0x12, 0xc2, 0x34, 0x59, 0x23, 0xb5, 0x90, 0x26, 0x83, 0xbd, + 0x2e, 0xd5, 0x4c, 0x01, 0xae, 0x04, 0x19, 0xa7, 0xf5, 0x4e, 0x8a, 0x3a, 0x59, 0xc6, + 0xa6, 0xda, 0xcf, 0x89, 0xc7, 0x37, 0x0e, 0x79, 0xb5, 0x60, 0x13, 0x6a, 0x2b, 0x00, + 0xdd, 0xb6, 0x07, 0x4d, 0x74, 0xff, 0xc5, 0xc5, 0xdf, 0xd0, 0x6b, 0x6c, 0x51, 0x9a, + 0xbe, 0xc3, 0x59, 0x6a, 0x47, 0x61, 0x13, 0xbe, 0x41, 0x38, 0xee, 0xad, 0x5f, 0xfd, + 0xe8, 0x6b, 0x1e, 0x32, 0x40, 0x1f, 0xa3, 0x84, 0x62, 0x32, 0xd0, 0xb3, 0xc9, 0xbd, + 0x56, 0x88, 0xb6, 0x4a, 0x33, 0x09, 0x38, 0x16, 0x2a, 0x8b, 0x89, 0x29, 0xd7, 0x0c, + 0x1b, 0x67, 0x53, 0x62, 0xf4, 0xc2, 0x03, 0x7e, 0x80, 0xa1, 0xc4, 0x70, 0x92, 0xd3, + 0x09, 0x7a, 0xbb, 0xe8, 0x2e, 0x58, 0x97, 0x23, 0x55, 0x39, 0xac, 0xd5, 0x13, 0xd8, + 0x09, 0xd3, 0x5d, 0x46, 0x2f, 0x5c, 0x18, 0x9b, 0x5d, 0xb4, 0xd6, 0x7b, 0x61, 0xe4, + 0x03, 0x5e, 0xd6, 0xa4, 0x77, 0x46, 0x7f, 0x4a, 0x32, 0x0b, 0x8a, 0x4e, 0xba, 0x0a, + 0xb5, 0x6c, 0x26, 0x3e, 0x4b, 0xfb, 0xe2, 0x6a, 0x41, 0x8e, 0xd1, 0xcd, 0xe6, 0x18, + 0x4b, 0xde, 0x54, 0x88, 0x37, 0xbe, 0x47, 0x15, 0x4e, 0x2f, 0x08, 0x37, 0x00, 0x65, + 0x85, 0x6f, 0xda, 0x06, 0x79, 0x48, 0x3b, 0x68, 0x28, 0x8e, 0xfe, 0xb5, 0xa6, 0x21, + 0x7e, 0xe2, 0x9d, 0x7d, 0x4d, 0xb8, 0x69, 0x26, 0x3b, 0x5f, 0x9b, 0x3a, 0x7a, 0x83, + 0x3b, 0x6e, 0x4c, 0xa7, 0x90, 0xcc, 0xf9, 0xfd, 0xae, 0x80, 0x79, 0xe5, 0x56, 0x09, + 0x27, 0x2c, 0x63, 0xb5, 0x49, 0xb0, 0xc8, 0x5f, 0x11, 0x0c, 0xc9, 0xc9, 0x58, 0x68, + 0x01, 0x14, 0xb3, 0x11, 0x74, 0x80, 0xaf, 0x57, 0xcb, 0x15, 0x9e, 0xdf, 0xbe, 0x5c, + 0xb9, 0xc6, 0x2b, 0xce, 0x2c, 0xf2, 0xab, 0x29, 0xb6, 0x67, 0x11, 0xac, 0x7a, 0xa5, + 0x3a, 0x74, 0x9f, 0xfa, 0x83, 0x90, 0x7e, 0xcb, 0x69, 0x12, 0xaa, 0x56, 0x96, 0x38, + 0xde, 0xa1, 0x9e, 0x54, 0x41, 0x61, 0x1e, 0xfc, 0xa3, 0x20, 0x99, 0x65, 0x3e, 0x8a, + 0x5c, 0xa1, 0xfb, 0xbd, 0xba, 0xb1, 0xd6, 0x44, 0x71, 0xec, 0x32, 0x0e, 0xc3, 0x8e, + 0xa4, 0x88, 0x40, 0x0c, 0x9b, 0x1f, 0x4e, 0x8c, 0xb5, 0x48, 0x0c, 0x0e, 0x92, 0x42, + 0xb0, 0x86, 0xa8, 0x0e, 0xee, 0xd4, 0x90, 0xae, 0x32, 0x00, 0x0c, 0x80, 0x09, 0xec, + 0xb7, 0x1f, 0xfa, 0x39, 0xf4, 0xf3, 0xb5, 0x74, 0x9c, 0xfd, 0x1b, 0xef, 0xe0, 0xd9, + 0x66, 0x7a, 0xb3, 0x02, 0x20, 0xc2, 0xdc, 0x04, 0x39, 0x36, 0x98, 0xb2, 0xcf, 0xa2, + 0x04, 0x92, 0xf2, 0x50, 0xce, 0x14, 0x32, 0x35, 0x81, 0x58, 0x70, 0x3d, 0xf7, 0xb1, + 0x39, 0xd7, 0x45, 0xce, 0x1f, 0xc3, 0x40, 0x78, 0x77, 0x01, 0xfb, 0x51, 0xdd, 0x5e, + 0x48, 0xb8, 0x95, 0x09, 0x41, 0x7d, 0x88, 0x89, 0x00, 0x80, 0x63, 0xf9, 0xba, 0x01, + 0x5a, 0x07, 0xd8, 0xd3, 0x9b, 0xbd, 0x00, 0x76, 0x2f, 0x59, 0x5a, 0xfa, 0xd8, 0xd8, + 0x59, 0xea, 0xab, 0xf0, 0xd8, 0x2d, 0x46, 0x33, 0xcf, 0x82, 0x98, 0xb0, 0x9b, 0xea, + 0x3f, 0x22, 0x28, 0x55, 0xa9, 0x2a, 0x08, 0x43, 0xf5, 0x2f, 0xa5, 0x8d, 0xb3, 0xa1, + 0x75, 0xc3, 0x0d, 0x2a, 0xbe, 0x64, 0x82, 0x64, 0x90, 0xcb, 0xe6, 0xca, 0x14, 0x88, + 0xfe, 0x3a, 0x01, 0x5a, 0x94, 0x6d, 0xc9, 0xc4, 0x5a, 0xc3, 0x09, 0x25, 0x72, 0x7a, + 0x13, 0xe0, 0x89, 0x78, 0xf7, 0x24, 0x03, 0x47, 0x20, 0x8a, 0x4d, 0x25, 0x38, 0xc2, + 0xd5, 0x61, 0x24, 0x37, 0x8c, 0x22, 0xc0, 0x4e, 0x23, 0xdc, 0x28, 0xb1, 0x50, 0x19, + 0xbe, 0x77, 0x6d, 0x70, 0xbf, 0xc1, 0xd2, 0x64, 0x5b, 0x5e, 0x80, 0xd1, 0xfd, 0x84, + 0x19, 0xdf, 0x72, 0x90, 0x43, 0x80, 0xe2, 0xe1, 0xfc, 0x4d, 0xd1, 0xdf, 0x1b, 0xa3, + 0xdf, 0xe4, 0x80, 0xcc, 0x84, 0x6d, 0x51, 0x51, 0x4a, 0x06, 0x5e, 0xd7, 0x62, 0x78, + 0x7a, 0xfd, 0x6e, 0xb9, 0x0b, 0xdf, 0x8f, 0xbb, 0xad, 0x5e, 0xb3, 0xd2, 0x3f, 0xdc, + 0x8c, 0x54, 0xcc, 0xa1, 0x0f, 0xa1, 0xfe, 0x54, 0x64, 0x82, 0xf5, 0xe1, 0x42, 0x4b, + 0xfd, 0xa8, 0x7a, 0xa7, 0xfb, 0x78, 0x6e, 0x26, 0x0f, 0x26, 0x14, 0xbe, 0x08, 0x11, + 0xee, 0x16, 0xb8, 0xd2, 0x9d, 0xf9, 0xa0, 0xf3, 0x30, 0xe9, 0x70, 0x9f, 0x63, 0xc9, + 0x50, 0xfb, 0xd9, 0x03, 0xff, 0x7d, 0x5b, 0x0c, 0xa2, 0x9f, 0xd6, 0x3b, 0x0f, 0x97, + 0x51, 0x77, 0x69, 0x02, 0x5c, 0xc3, 0x6a, 0x52, 0xe0, 0xc1, 0x15, 0x93, 0x4a, 0x3c, + 0xa2, 0x58, 0xb8, 0xba, 0xb9, 0x00, 0x16, 0xa4, 0x01, 0xd5, 0xd8, 0xd7, 0xc3, 0xb9, + 0x44, 0x92, 0x5b, 0x35, 0xa9, 0x34, 0x9a, 0x1a, 0xc7, 0xd9, 0x85, 0x21, 0x61, 0x0c, + 0x2f, 0xad, 0x8b, 0x5c, 0x8b, 0x31, 0x9c, 0xd6, 0xe0, 0x5f, 0x9b, 0xbe, 0xd3, 0x53, + 0xf1, 0xd0, 0xc8, 0x65, 0xa9, 0x4a, 0xa4, 0x56, 0xdc, 0xd1, 0x8a, 0x39, 0xe2, 0xf5, + 0x85, 0xd9, 0xbe, 0xa8, 0x4e, 0xb5, 0xf0, 0xaf, 0x8b, 0x45, 0x77, 0x94, 0x98, 0xc9, + 0xae, 0x1f, 0x75, 0x5d, 0x9f, 0x90, 0xa2, 0xc3, 0x27, 0x3e, 0x52, 0xaa, 0xd3, 0xca, + 0x34, 0xb4, 0x43, 0x79, 0x1b, 0x02, 0x99, 0x94, 0xb1, 0xee, 0x4c, 0x40, 0xfc, 0xa0, + 0x05, 0x35, 0x2b, 0x8d, 0x6d, 0x28, 0x69, 0x83, 0x17, 0x7d, 0x65, 0x5b, 0x6f, 0x34, + 0xc4, 0x99, 0x32, 0x2b, 0x65, 0xda, 0x6e, 0xb6, 0xb9, 0xe1, 0xf4, 0xd5, 0x90, 0x21, + 0x25, 0xb6, 0x4c, 0x93, 0xda, 0x74, 0xcc, 0x1a, 0x35, 0x60, 0x18, 0xb0, 0x09, 0x3b, + 0xb5, 0xcc, 0x82, 0x05, 0xb2, 0x69, 0x2f, 0x6d, 0x3e, 0x9c, 0x1c, 0xc8, 0x85, 0x41, + 0xb4, 0xd9, 0x83, 0x84, 0x54, 0x85, 0xb4, 0x50, 0xcd, 0x4b, 0x98, 0x2a, 0xba, 0x8d, + 0x2e, 0x91, 0xf4, 0x1f, 0x22, 0xee, 0xe7, 0xf3, 0x6d, 0x79, 0xcc, 0xa9, 0xc0, 0xe0, + 0x1b, 0x26, 0xc4, 0x65, 0x11, 0x18, 0xea, 0x77, 0x15, 0x14, 0xc7, 0x7e, 0xd6, 0x0c, + 0xd5, 0x24, 0x51, 0x94, 0x2d, 0xc8, 0x5b, 0x3f, 0xba, 0x44, 0x8b, 0x2d, 0x63, 0x10, + 0xf2, 0x77, 0x79, 0x42, 0x83, 0x2e, 0x21, 0xcf, 0x3d, 0x44, 0x87, 0x4f, 0x8d, 0x04, + 0xa8, 0x05, 0x26, 0xc6, 0x9f, 0xd3, 0xb5, 0x10, 0x49, 0xe6, 0x92, 0xba, 0x45, 0xa7, + 0x02, 0xee, 0x12, 0x51, 0x4a, 0xc2, 0xe1, 0x89, 0x4f, 0x9b, 0x83, 0xd7, 0x56, 0xd0, + 0x93, 0x96, 0x97, 0xca, 0x98, 0x2b, 0x68, 0x7c, 0x9e, 0xd7, 0xe0, 0xb2, 0x32, 0x77, + 0x07, 0x3c, 0x19, 0x30, 0xa4, 0x73, 0xd1, 0x66, 0x8e, 0xf2, 0xe9, 0xae, 0x96, 0x63, + 0xcf, 0xf0, 0x58, 0x16, 0x62, 0x6c, 0xd3, 0xc5, 0xbf, 0x77, 0x16, 0x53, 0xd7, 0x78, + 0x51, 0x81, 0x35, 0x5c, 0x05, 0xae, 0xd2, 0x4a, 0x99, 0xc4, 0xb6, 0x74, 0xd2, 0x4a, + 0x0f, 0x08, 0xf4, 0xb0, 0xcf, 0xbe, 0x90, 0xf2, 0xfd, 0xba, 0xb4, 0x24, 0x82, 0xe9, + 0x8f, 0x13, 0xff, 0xfc, 0xd1, 0xad, 0x33, 0xf4, 0xf4, 0xc0, 0x4d, 0xeb, 0xc8, 0x9f, + 0x40, 0xb5, 0xdb, 0xf6, 0x45, 0x46, 0xc5, 0x20, 0xdc, 0xa5, 0xd0, 0xec, 0xf3, 0xf6, + 0x5d, 0x3a, 0x77, 0xd0, 0x12, 0x9f, 0x60, 0x03, 0x71, 0x10, 0x8a, 0xac, 0x30, 0xa9, + 0xec, 0xa8, 0xbe, 0xe5, 0x52, 0x4f, 0xab, 0x67, 0x1f, 0xc0, 0x86, 0x58, 0x76, 0x2c, + 0x87, 0x38, 0xab, 0xc9, 0xfa, 0x76, 0x93, 0xe3, 0x9d, 0x39, 0xd7, 0x03, 0xd5, 0xcd, + 0x94, 0x2b, 0x5a, 0x55, 0xfe, 0xda, 0xfe, 0xcc, 0xae, 0xf7, 0x02, 0x17, 0x69, 0xe9, + 0x2c, 0xc9, 0xd3, 0xac, 0x7b, 0x4c, 0x23, 0xb3, 0x3f, 0xc2, 0x23, 0x21, 0x85, 0x4b, + 0xa3, 0x3f, 0x49, 0xc7, 0xce, 0x75, 0x88, 0xee, 0x75, 0xba, 0x56, 0xc1, 0x7b, 0x65, + 0x4f, 0xa2, 0x6e, 0x23, 0xef, 0xe1, 0x89, 0xd0, 0xd3, 0x3b, 0xce, 0x29, 0x80, 0xd3, + 0xad, 0xd7, 0x35, 0xc8, 0x56, 0xb7, 0x0f, 0xfa, 0x35, 0x9b, 0xa8, 0x27, 0xe5, 0xb8, + 0xe2, 0xe3, 0xd7, 0x15, 0xcf, 0x23, 0x01, 0x48, 0x6b, 0xeb, 0x8c, 0x09, 0xc4, 0x97, + 0xeb, 0x9a, 0xff, 0x28, 0xce, 0x7b, 0x1c, 0x42, 0x4c, 0xfc, 0x05, 0x16, 0xc8, 0xae, + 0xee, 0xcc, 0x40, 0x9a, 0xd7, 0x3b, 0xef, 0x70, 0xa1, 0x0c, 0xa4, 0x19, 0xab, 0x71, + 0xca, 0xf3, 0xda, 0xa7, 0x5f, 0x20, 0xba, 0xb3, 0x76, 0x54, 0x53, 0x90, 0x6f, 0x43, + 0xbe, 0xd3, 0xe8, 0xe0, 0x8d, 0x42, 0xb5, 0x13, 0x48, 0x97, 0xb4, 0x36, 0xbf, 0xf3, + 0xa1, 0xbc, 0xef, 0xc5, 0x3a, 0xec, 0x30, 0xed, 0x89, 0x11, 0x0f, 0x90, 0x10, 0x97, + 0x8d, 0xf7, 0x0c, 0xe4, 0xac, 0x6f, 0x1d, 0x60, 0x25, 0x50, 0xcf, 0x20, 0xe4, 0x44, + 0x36, 0x06, 0x3e, 0x3a, 0x15, 0xb5, 0x1e, 0xcb, 0xaa, 0x4a, 0x59, 0xdf, 0x2f, 0xe0, + 0x15, 0xcb, 0x36, 0x37, 0xf3, 0x72, 0x83, 0x04, 0xec, 0x3a, 0x72, 0x4f, 0x31, 0x49, + 0x27, 0x5e, 0x7b, 0x63, 0x4b, 0xd8, 0x82, 0x78, 0xd9, 0x3f, 0xab, 0x6b, 0x94, 0x16, + 0x68, 0xd9, 0x13, 0xdb, 0xcd, 0x89, 0x21, 0x3f, 0x3b, 0xac, 0xfc, 0xfd, 0x20, 0x02, + 0xea, 0x86, 0x6f, 0x3f, 0x17, 0x07, 0x35, 0x12, 0x64, 0xb6, 0x67, 0x88, 0xf4, 0xeb, + 0x7f, 0x68, 0xc5, 0xa5, 0x36, 0xfa, 0x9c, 0x13, 0x0d, 0x8f, 0x6d, 0xa1, 0xbb, 0x03, + 0x1d, 0xf9, 0xe2, 0x20, 0xd8, 0xca, 0x8b, 0xab, 0x46, 0xdd, 0xcf, 0x9c, 0x35, 0xfa, + 0x63, 0x48, 0x09, 0xa7, 0x3d, 0xcd, 0x91, 0xb7, 0x9f, 0x5b, 0xcb, 0x98, 0x7b, 0x20, + 0x54, 0x4b, 0xb5, 0x2a, 0xaf, 0x0d, 0x9e, 0x3a, 0xea, 0x91, 0x18, 0x3b, 0x8c, 0x48, + 0x12, 0x78, 0x6c, 0x8d, 0xc9, 0xb9, 0x30, 0x73, 0xa3, 0x05, 0x26, 0x71, 0xb3, 0x71, + 0x50, 0x52, 0x5d, 0x59, 0x24, 0xaa, 0x6e, 0xe5, 0xe0, 0x36, 0xc1, 0xbe, 0xb9, 0xda, + 0xf6, 0xf9, 0x4d, 0x05, 0x10, 0x0b, 0x2d, 0xdd, 0x36, 0xb1, 0x3c, 0x4d, 0xf9, 0xd4, + 0x56, 0xf6, 0x48, 0x0b, 0xb1, 0xaf, 0xa6, 0x20, 0x26, 0xea, 0x80, 0x97, 0x94, 0xd3, + 0xb7, 0x4d, 0x78, 0x01, 0x7e, 0xe0, 0xfb, 0xca, 0x83, 0xcc, 0x7e, 0x5c, 0xbd, 0x52, + 0x7a, 0xcd, 0xe7, 0x46, 0x53, 0x73, 0x51, 0x2c, 0x07, 0x64, 0x6a, 0x62, 0xc6, 0x0f, + 0x5c, 0x16, 0xc2, 0xef, 0x9f, 0x41, 0x8d, 0x8c, 0x7d, 0x18, 0x8f, 0x7b, 0x13, 0xdd, + 0x45, 0x38, 0xa5, 0x5d, 0x18, 0x6a, 0xd6, 0x36, 0x2a, 0x58, 0x9a, 0x9f, 0x52, 0xb2, + 0x5e, 0x61, 0x6f, 0xb2, 0xa3, 0x57, 0xac, 0xca, 0xde, 0x63, 0x57, 0xfa, 0x5a, 0x42, + 0xa7, 0x98, 0xe4, 0x17, 0x13, 0x11, 0xad, 0xe9, 0xcc, 0xfd, 0x15, 0xf2, 0x7c, 0x8c, + 0x19, 0x72, 0x17, 0x9d, 0x26, 0x1f, 0xb9, 0xb0, 0x9b, 0xc7, 0xa0, 0x36, 0xc1, 0x05, + 0x55, 0x9b, 0x04, 0x38, 0x9d, 0xfd, 0x8a, 0x7b, 0xe2, 0xa3, 0xae, 0x2b, 0xba, 0x2a, + 0xfb, 0xd1, 0xe9, 0xbf, 0x90, 0x05, 0xc8, 0xb3, 0x66, 0x35, 0x4f, 0x90, 0x9b, 0xe7, + 0x1e, 0x52, 0xc0, 0x90, 0x80, 0xfb, 0xa7, 0x45, 0x23, 0x77, 0xe8, 0xf1, 0x2c, 0x18, + 0x4f, 0xe7, 0xed, 0x46, 0x5b, 0x32, 0xc9, 0xf9, 0xb2, 0x81, 0x9e, 0xa1, 0xd1, 0x19, + 0xfc, 0x26, 0x7c, 0x8a, 0x75, 0x33, 0x81, 0xeb, 0x51, 0xac, 0xf8, 0x54, 0xc1, 0x9e, + 0x8d, 0x58, 0xff, 0x42, 0x74, 0xeb, 0xa8, 0xc6, 0x3f, 0x0f, 0xa1, 0x70, 0xa6, 0x3c, + 0xbf, 0xce, 0x2c, 0xf8, 0x7b, 0xdc, 0xdf, 0x32, 0xb7, 0xe1, 0x98, 0x04, 0x54, 0x1c, + 0x2c, 0x58, 0x97, 0x24, 0xef, 0xc6, 0x9b, 0xc4, 0x65, 0xd0, 0x90, 0x8e, 0x09, 0xb8, + 0x4d, 0x1f, 0x50, 0x41, 0x2b, 0xb0, 0x7f, 0x47, 0xfb, 0x9f, 0x0d, 0x47, 0x29, 0x28, + 0x16, 0x14, 0xca, 0xca, 0xb6, 0x14, 0xef, 0x65, 0xce, 0xba, 0x13, 0x96, 0xb5, 0x24, + 0x9d, 0x2c, 0x61, 0x70, 0x4f, 0xb6, 0xf3, 0x48, 0x44, 0x71, 0x83, 0xf9, 0x88, 0x2a, + 0x98, 0xae, 0x9c, 0x71, 0xa7, 0x66, 0x33, 0xe0, 0x5b, 0x33, 0x3a, 0x1b, 0xce, 0xee, + 0xc9, 0xbd, 0x44, 0xb8, 0x87, 0x6f, 0xab, 0x6c, 0xd7, 0x2a, 0x5e, 0x33, 0x5c, 0x97, + 0x7a, 0x8c, 0x56, 0xca, 0x16, 0x7b, 0x1a, 0x19, 0x8e, 0x93, 0x1b, 0xf2, 0x85, 0xf6, + 0x86, 0x81, 0xfc, 0x5a, 0xca, 0x84, 0x66, 0x76, 0xe8, 0x9b, 0x17, 0xee, 0x76, 0x9a, + 0x08, 0xf9, 0xb4, 0x60, 0xfe, 0x4e, 0x48, 0x81, 0xf9, 0xb2, 0x0f, 0xed, 0xb3, 0x9d, + 0x1f, 0xc6, 0x66, 0x5d, 0x10, 0x6b, 0xaa, 0x5a, 0x93, 0x14, 0x0d, 0x1d, 0xda, 0xca, + 0xe4, 0xa7, 0x59, 0x0f, 0x5a, 0xb0, 0x78, 0x52, 0xc1, 0x81, 0x1f, 0x1a, 0x03, 0x5c, + 0x3f, 0x1a, 0x60, 0xb1, 0x54, 0x22, 0x6c, 0x9d, 0xb0, 0x8f, 0xfd, 0xd0, 0xb6, 0xde, + 0xee, 0x72, 0x2a, 0x90, 0x07, 0x6c, 0xa7, 0xc6, 0xd6, 0x04, 0xfe, 0x83, 0x32, 0x86, + 0x8e, 0x1d, 0x59, 0x32, 0x2f, 0x26, 0x2b, 0xbf, 0xbe, 0x95, 0xcc, 0x5b, 0x9b, 0x1e, + 0x20, 0x31, 0x0b, 0x76, 0x35, 0x0b, 0x4d, 0x60, 0x4c, 0xd1, 0xa4, 0x58, 0x66, 0x1d, + 0xc4, 0x74, 0xfe, 0x4c, 0x58, 0x79, 0x04, 0xc0, 0x53, 0x47, 0x5e, 0x17, 0x61, 0xb8, + 0x0a, 0x60, 0xcc, 0x48, 0xed, 0xd9, 0x54, 0x34, 0xdf, 0x02, 0x3b, 0x94, 0xa5, 0x8a, + 0x99, 0xd6, 0x25, 0x66, 0xe0, 0x0f, 0x67, 0x77, 0x90, 0xdc, 0xa0, 0x76, 0xa4, 0xf1, + 0x67, 0x47, 0x0c, 0x43, 0xa8, 0x1e, 0x6c, 0x32, 0xf0, 0xd0, 0x0d, 0x23, 0x65, 0x6b, + 0xa7, 0x48, 0x28, 0xb8, 0xe4, 0xd4, 0x75, 0x38, 0xe5, 0x0c, 0x0e, 0xce, 0xe2, 0xcd, + 0xfe, 0x0d, 0x59, 0x43, 0xe2, 0x3e, 0x3f, 0x17, 0x33, 0x82, 0x9d, 0x3e, 0x1b, 0x80, + 0x53, 0x93, 0x30, 0xe0, 0x6c, 0x6a, 0xe3, 0xd0, 0xec, 0xe7, 0x38, 0xc0, 0xdd, 0x74, + 0x2a, 0xa5, 0x86, 0x0f, 0x43, 0xb5, 0x30, 0xf0, 0x3d, 0xc5, 0x5d, 0xeb, 0xf7, 0x20, + 0x12, 0x3f, 0x8f, 0xba, 0xf2, 0xe5, 0x68, 0x59, 0xa5, 0x34, 0x3d, 0x46, 0x12, 0xee, + 0x21, 0x46, 0x4d, 0xb2, 0x50, 0x1d, 0x4f, 0x35, 0x31, 0x47, 0xf3, 0xe1, 0xa5, 0xab, + 0xb8, 0x93, 0x85, 0x08, 0x16, 0xc8, 0x0a, 0xf2, 0x9d, 0x88, 0x92, 0x48, 0xc9, 0x2a, + 0x72, 0x9a, 0x0e, 0x2b, 0xe2, 0xb6, 0x6c, 0xc1, 0x3a, 0xc5, 0xd9, 0x96, 0xb2, 0x50, + 0x14, 0x66, 0x6d, 0xdc, 0x63, 0x8a, 0x1f, 0xd2, 0xa0, 0xaf, 0xee, 0x93, 0xd9, 0x8e, + 0x31, 0xdc, 0x1e, 0xa8, 0x58, 0xd7, 0x2b, 0x84, 0xbb, 0xd3, 0x2f, 0xc0, 0xc6, 0x16, + 0xe7, 0xd4, 0xab, 0xda, 0xf3, 0xc1, 0x8f, 0xf9, 0x60, 0x13, 0x24, 0x5d, 0x83, 0xb3, + 0xbd, 0xf9, 0x21, 0xf4, 0x03, 0xf1, 0xae, 0xcf, 0xdd, 0xd8, 0x85, 0xfd, 0xcf, 0xc2, + 0x23, 0x34, 0x33, 0x65, 0x92, 0xb0, 0xe7, 0x18, 0x9a, 0xa0, 0x7f, 0xb1, 0xdb, 0x8a, + 0x1f, 0xe6, 0x4f, 0x26, 0xe5, 0xf2, 0xbe, 0x5c, 0xf8, 0x00, 0x00, 0x6a, 0xda, 0xe8, + 0x18, 0x56, 0x04, 0x14, 0x19, 0x7a, 0x9f, 0x82, 0x07, 0x05, 0xea, 0xa1, 0x28, 0x3a, + 0xc7, 0x93, 0x16, 0x83, 0x08, 0x3f, 0x22, 0xfc, 0x4d, 0xc7, 0xff, 0x68, 0x1a, 0xb8, + 0x46, 0x18, 0x6f, 0x22, 0xd5, 0x73, 0x08, 0xac, 0x4f, 0xb3, 0x02, 0x23, 0x3d, 0xa1, + 0xaa, 0x62, 0xd2, 0xf2, 0x32, 0x3c, 0x90, 0x94, 0x43, 0xae, 0x8d, 0xa1, 0x19, 0x03, + 0x6e, 0xe4, 0x2e, 0x0e, 0xa7, 0x88, 0xeb, 0xe2, 0x5e, 0x69, 0x8a, 0x6d, 0xb0, 0x1a, + 0x87, 0xa8, 0xda, 0xdb, 0x82, 0x7a, 0x1b, 0xe8, 0xb5, 0x79, 0x9b, 0x33, 0xc9, 0x9a, + 0x82, 0x2b, 0x73, 0xf7, 0xe6, 0x62, 0xed, 0x6f, 0x86, 0x03, 0x45, 0xa2, 0x62, 0x83, + 0xc1, 0xb4, 0x08, 0x0e, 0xcd, 0xf5, 0x79, 0xd7, 0x0e, 0x7b, 0x0c, 0x0a, 0xb7, 0x1e, + 0x11, 0x6e, 0xe2, 0xd9, 0xda, 0x27, 0x46, 0x1e, 0x28, 0x12, 0x2a, 0x09, 0xca, 0x04, + 0xde, 0x38, 0x76, 0x50, 0x2f, 0xd2, 0x4d, 0xff, 0x92, 0x09, 0x55, 0x2f, 0x91, 0x13, + 0x87, 0x70, 0x78, 0xa0, 0x94, 0xe0, 0xe5, 0xf8, 0xce, 0xbb, 0x41, 0x54, 0xe0, 0x3a, + 0x6b, 0x56, 0xf6, 0x04, 0xdf, 0x98, 0x4b, 0xd2, 0x9e, 0xfd, 0x4f, 0x88, 0xc3, 0xf6, + 0x29, 0xea, 0x2b, 0xba, 0x91, 0x27, 0xea, 0x5a, 0x6c, 0xc5, 0xa3, 0x9d, 0x74, 0x1e, + 0xdd, 0x71, 0x1a, 0x24, 0x44, 0x7f, 0xe0, 0x6c, 0xf8, 0x45, 0x5a, 0x44, 0x06, 0x5e, + 0x24, 0x52, 0x76, 0x3b, 0x0d, 0x93, 0xf8, 0x6a, 0x31, 0x47, 0xbd, 0x08, 0x75, 0x7a, + 0x4f, 0x7a, 0xa7, 0x79, 0x3c, 0x97, 0x82, 0x1c, 0x2b, 0x57, 0x22, 0xc9, 0xdb, 0xad, + 0x20, 0xf6, 0xa1, 0xe7, 0xad, 0xf6, 0x8b, 0xf2, 0x22, 0x7b, 0xe5, 0x12, 0x04, 0xe9, + 0xde, 0xca, 0x8d, 0x9e, 0xb6, 0x26, 0x6f, 0x65, 0x9b, 0x33, 0x55, 0xc8, 0x97, 0x7e, + 0xae, 0x7e, 0x9e, 0xd5, 0x39, 0xd1, 0x79, 0x39, 0xf0, 0xc6, 0x16, 0x6b, 0x01, 0x13, + 0x2d, 0xb0, 0x01, 0x66, 0x25, 0x0e, 0xa9, 0x64, 0xe3, 0x9d, 0x9d, 0x55, 0xab, 0x43, + 0x9a, 0x29, 0xbb, 0x0b, 0xcf, 0xd3, 0xa9, 0x99, 0xb3, 0x1f, 0xe7, 0xa9, 0x51, 0x00, + 0x2e, 0xe5, 0xdc, 0x01, 0x27, 0x03, 0x24, 0xb1, 0x10, 0x10, 0x37, 0x89, 0x29, 0x42, + 0x90, 0x7c, 0x6e, 0x19, 0x50, 0x9a, 0x6c, 0x5f, 0x66, 0x59, 0xba, 0xf7, 0xf4, 0x36, + 0x3c, 0x49, 0x15, 0xe6, 0x1b, 0xda, 0x34, 0x06, 0x9b, 0xd9, 0x86, 0xb6, 0x37, 0x7f, + 0xf6, 0x04, 0xed, 0xe5, 0xa7, 0x42, 0x5d, 0xb2, 0x88, 0x86, 0xb1, 0xa2, 0x61, 0x36, + 0x6d, 0xa8, 0xa1, 0x39, 0x86, 0x65, 0xbe, 0xed, 0x3b, 0xe9, 0xbc, 0x2e, 0x05, 0x5e, + 0x71, 0x1b, 0x7d, 0x36, 0xdd, 0xbd, 0xd3, 0x65, 0xcc, 0xdc, 0xd7, 0xfc, 0xba, 0xfe, + 0x71, 0x29, 0x66, 0x95, 0x08, 0xda, 0xc0, 0xad, 0x2d, 0x55, 0xee, 0x7f, 0xc6, 0x0b, + 0xce, 0x22, 0x88, 0x50, 0xba, 0x7b, 0x94, 0x3a, 0x8d, 0x50, 0xff, 0xcb, 0x2a, 0x67, + 0x06, 0x51, 0xd3, 0x15, 0xd8, 0x71, 0x9c, 0x7b, 0x57, 0xf6, 0x37, 0xa3, 0x7e, 0xdd, + 0x32, 0x6a, 0xbc, 0x76, 0xf0, 0xa7, 0x69, 0x0c, 0x23, 0x68, 0x80, 0x16, 0x01, 0x07, + 0xc2, 0xb4, 0xc8, 0x5e, 0xcf, 0x2a, 0xd9, 0xf5, 0xdd, 0x26, 0x45, 0x62, 0x6e, 0x40, + 0x90, 0xf1, 0x00, 0x47, 0xcc, 0x13, 0x15, 0x40, 0xca, 0x58, 0x03, 0x04, 0x5a, 0x6a, + 0xee, 0x91, 0xea, 0x0b, 0x3f, 0x9b, 0x77, 0xc4, 0x43, 0x40, 0x69, 0xc5, 0x32, 0x0c, + 0xf5, 0xb7, 0x01, 0x82, 0xd9, 0xfb, 0xbf, 0x30, 0x98, 0x30, 0x60, 0x11, 0x75, 0x9d, + 0x0d, 0x64, 0xa8, 0x84, 0x14, 0x1e, 0xa0, 0x21, 0xcd, 0xd9, 0x5e, 0xfa, 0x32, 0x63, + 0xa5, 0x05, 0xb8, 0x52, 0x29, 0xd1, 0x54, 0xec, 0xaa, 0x23, 0x5e, 0x8f, 0xa1, 0x07, + 0x95, 0xc9, 0xda, 0x27, 0x41, 0xcd, 0x98, 0x71, 0x90, 0x16, 0xa9, 0x01, 0x17, 0xa7, + 0x6f, 0x84, 0xf0, 0x0b, 0x5c, 0x3d, 0x4b, 0xce, 0xd7, 0x9a, 0x73, 0xbf, 0xb3, 0xa1, + 0xc7, 0x8a, 0xd1, 0xad, 0xea, 0x50, 0x78, 0xf2, 0xf1, 0xb0, 0x0f, 0x81, 0x5b, 0xc7, + 0xa3, 0x0e, 0xf8, 0x58, 0x40, 0x07, 0x77, 0x32, 0xdc, 0xb1, 0xa6, 0x1e, 0x9f, 0x31, + 0x76, 0x3d, 0x52, 0x2d, 0x04, 0xc4, 0x90, 0x37, 0x1a, 0xea, 0xbc, 0xa9, 0x49, 0x9b, + 0x05, 0x13, 0x17, 0x8d, 0x54, 0x31, 0x14, 0x8a, 0x72, 0x80, 0x5d, 0x09, 0x32, 0x9e, + 0xa5, 0xd9, 0x41, 0xf3, 0x32, 0xd5, 0xc6, 0xd3, 0x2b, 0xa2, 0xef, 0x9f, 0x87, 0x23, + 0xb6, 0xae, 0xa4, 0x5f, 0x94, 0xb6, 0xb2, 0x1a, 0xab, 0x7d, 0x16, 0x06, 0x46, 0xc3, + 0x76, 0x0e, 0x7a, 0xcd, 0xa1, 0xff, 0xdd, 0x8f, 0x54, 0xf4, 0xa2, 0xc3, 0x1a, 0xfe, + 0x9b, 0x48, 0x19, 0x23, 0x3b, 0xfe, 0x8e, 0xf8, 0x91, 0x64, 0xfa, 0x0e, 0xcb, 0xf1, + 0xcc, 0xe8, 0x66, 0x62, 0xe7, 0x47, 0x34, 0x44, 0x65, 0x9f, 0xc8, 0xcb, 0xc9, 0xf3, + 0x61, 0x7e, 0xe8, 0x19, 0x5f, 0xe1, 0xbc, 0xf5, 0xbb, 0x1b, 0x63, 0x4c, 0xd4, 0x3f, + 0x62, 0xea, 0x93, 0xa4, 0x6d, 0x88, 0xf2, 0xfc, 0xbc, 0x3e, 0x28, 0x40, 0x84, 0xe7, + 0x04, 0xfb, 0x1d, 0x7d, 0x0d, 0x9a, 0xcb, 0x91, 0x96, 0x1e, 0x2e, 0xeb, 0xe2, 0xdc, + 0x9e, 0xbe, 0x36, 0x5b, 0x25, 0xb5, 0x66, 0x75, 0x97, 0x3d, 0x0c, 0x38, 0xf4, 0x76, + 0x30, 0x57, 0x47, 0x23, 0xcd, 0x3e, 0xc6, 0x6c, 0x8f, 0x3b, 0x12, 0x82, 0x21, 0xa7, + 0x90, 0xd9, 0x2c, 0x89, 0x5b, 0x94, 0x27, 0x0f, 0xe9, 0x40, 0x51, 0xa1, 0x70, 0xe9, + 0x5b, 0x8b, 0xe7, 0x16, 0x34, 0x86, 0xec, 0x8c, 0x0b, 0xee, 0xbe, 0xf6, 0x5e, 0x16, + 0x26, 0xb0, 0x46, 0xd7, 0xe7, 0xf8, 0x26, 0x37, 0x2b, 0x6a, 0xa1, 0x0b, 0xae, 0xfb, + 0x84, 0x8f, 0xa1, 0xdf, 0x6b, 0xb1, 0xdc, 0x43, 0x95, 0x40, 0xf6, 0x3c, 0x9c, 0x7a, + 0x9d, 0x5f, 0x88, 0x13, 0x40, 0x29, 0x62, 0x65, 0x1e, 0xe9, 0x84, 0x39, 0x02, 0xb6, + 0xc3, 0x98, 0x2d, 0xce, 0x50, 0xa6, 0x17, 0x8a, 0x55, 0xa1, 0xad, 0xc0, 0x1c, 0xe7, + 0xdc, 0x6c, 0x83, 0x38, 0xe1, 0xa9, 0xce, 0xef, 0xc1, 0x78, 0xdc, 0x43, 0x14, 0xf6, + 0x74, 0x9a, 0x81, 0xa7, 0x31, 0xee, 0x3c, 0x7f, 0xc0, 0xc3, 0x5d, 0x1c, 0xe3, 0x63, + 0xce, 0xf1, 0x13, 0x28, 0xf3, 0x87, 0xc4, 0x01, 0xfe, 0xf2, 0x7a, 0x67, 0xa6, 0x29, + 0x2f, 0x6f, 0x72, 0xb0, 0xa1, 0xd6, 0xc3, 0x89, 0x16, 0x2d, 0x16, 0x2e, 0xf0, 0x50, + 0xae, 0x5f, 0x3d, 0xdb, 0xb5, 0x5c, 0xaa, 0xbc, 0xa9, 0xa1, 0xbe, 0x89, 0xb4, 0x63, + 0x49, 0x4d, 0x74, 0x39, 0xfb, 0x56, 0x47, 0xa9, 0x18, 0x12, 0x8b, 0x96, 0x25, 0xd3, + 0x3e, 0xac, 0xa6, 0x19, 0xd5, 0x2f, 0x03, 0x5f, 0xe6, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xe8, + 0xd8, 0xb9, 0x0f, 0xe3, 0x67, 0x0d, 0x8c, 0x5a, 0x2e, 0x3e, 0x05, 0x49, 0x69, 0xa3, + 0xd9, 0x7e, 0x61, 0xb5, 0xe6, 0x30, 0x67, 0x4f, 0xc7, 0x08, 0x57, 0xf1, 0xbb, 0xf1, + 0x0f, 0xdc, 0x40, 0x49, 0xef, 0xf5, 0x60, 0xeb, 0xa5, 0xf2, 0x2a, 0xcc, 0x8d, 0x77, + 0xdb, 0xee, 0x0b, 0x20, 0x55, 0x7f, 0xa4, 0xd0, 0x33, 0x31, 0x72, 0xcb, 0xb5, 0xcb, + 0xcc, 0x2b, 0x13, 0x5f, 0x2c, 0xcd, 0xe0, 0x14, 0xe6, 0x3e, 0xbe, 0x4e, 0xdf, 0x92, + 0x5e, 0x61, 0xba, 0x2a, 0x32, 0x0c, 0xd3, 0x99, 0x91, 0x5a, 0xdd, 0xfc, 0xeb, 0x1a, + 0xd0, 0x69, 0xa9, 0xfd, 0x5b, 0x62, 0x10, 0xa4, 0xb6, 0xe5, 0x04, 0x52, 0xb1, 0xf9, + 0x06, 0xdd, 0x16, 0xf0, 0x16, 0x68, 0xf0, 0xaf, 0x56, 0x6a, 0x28, 0x7c, 0xce, 0xfc, + 0xd8, 0x94, 0x73, 0x41, 0x85, 0x9a, 0xe7, 0xdc, 0x3a, 0x06, 0xf6, 0xbf, 0x15, 0x74, + 0xfe, 0xb9, 0x31, 0xf9, 0x27, 0xe2, 0xd5, 0x05, 0xf6, 0x08, 0x59, 0x9e, 0x23, 0xb0, + 0x5a, 0xf7, 0xc3, 0x23, 0x69, 0x83, 0x97, 0xa8, 0x01, 0xdc, 0x7f, 0x78, 0x82, 0x5c, + 0xc7, 0xeb, 0x9f, 0xcc, 0xe6, 0xc6, 0xc4, 0xf8, 0xf6, 0x88, 0x39, 0xd3, 0x0a, 0xc5, + 0x67, 0x14, 0x8e, 0x70, 0x84, 0xdb, 0x2b, 0x37, 0x58, 0x30, 0xa0, 0x7b, 0x30, 0x5f, + 0xed, 0xd6, 0x07, 0xa3, 0x47, 0xfa, 0x65, 0xde, 0xf0, 0x1d, 0x4e, 0x1f, 0xd6, 0xc1, + 0x6b, 0x4b, 0x47, 0xf5, 0xb0, 0x1b, 0x43, 0x65, 0xb7, 0x72, 0x26, 0xe6, 0x0f, 0xdd, + 0x40, 0xf2, 0x2a, 0x39, 0x5a, 0xa2, 0x35, 0xf0, 0xdf, 0xda, 0x8f, 0xb4, 0xd3, 0xde, + 0x65, 0xb0, 0xcf, 0x4f, 0x4c, 0x22, 0x0b, 0x3b, 0x4a, 0x9e, 0x32, 0xbc, 0x0d, 0xb6, + 0x4f, 0x16, 0x2c, 0x07, 0xdf, 0x42, 0xa1, 0x01, 0x99, 0x03, 0xa6, 0x7c, 0xda, 0x69, + 0x3d, 0xde, 0xb5, 0xca, 0x39, 0xa0, 0xfe, 0x50, 0x08, 0x50, 0xec, 0x7c, 0x06, 0xbe, + 0xe7, 0x18, 0x66, 0xb3, 0x55, 0xcc, 0xbc, 0x07, 0x8c, 0xd4, 0xdc, 0x03, 0x6f, 0xda, + 0xa8, 0x1c, 0xb2, 0xde, 0x99, 0xcc, 0x88, 0xf6, 0x0a, 0x49, 0x46, 0x42, 0x87, 0xf5, + 0x9f, 0xc7, 0x14, 0x8b, 0x1a, 0xfb, 0x4a, 0x2f, 0x9b, 0xb8, 0x97, 0x14, 0xe1, 0xeb, + 0x8c, 0x03, 0x61, 0xe5, 0x99, 0x2a, 0x5b, 0x79, 0xcd, 0xbb, 0x91, 0xd9, 0xbf, 0x29, + 0xeb, 0x59, 0x8c, 0xbb, 0x4b, 0xda, 0x92, 0x3d, 0x26, 0x7f, 0xea, 0xcb, 0x91, 0xce, + 0x72, 0xd6, 0x1a, 0xb1, 0xea, 0x00, 0xf5, 0x6a, 0xa6, 0x76, 0x6e, 0xab, 0xc4, 0x7d, + 0xca, 0xa6, 0x9a, 0x02, 0x4b, 0xbf, 0xf2, 0xf2, 0x96, 0x91, 0x7f, 0x17, 0xa3, 0xf8, + 0xc9, 0x3e, 0x1b, 0xf2, 0x9c, 0x3c, 0xfc, 0x99, 0x1a, 0x2b, 0xe8, 0xcf, 0xa7, 0x0e, + 0x5d, 0xe3, 0xf2, 0xdd, 0x52, 0xa7, 0x55, 0x01, 0x38, 0x68, 0x7a, 0xec, 0x28, 0x92, + 0x6f, 0xa1, 0x68, 0xb1, 0x81, 0xdb, 0x72, 0x82, 0xbd, 0x60, 0xda, 0xd3, 0x31, 0x0d, + 0xfe, 0x54, 0x2c, 0xeb, 0xe6, 0x94, 0x74, 0x00, 0x25, 0xc7, 0xec, 0x2a, 0x20, 0x43, + 0xfe, 0xbb, 0x77, 0x9f, 0x7f, 0x37, 0x89, 0xa5, 0xe2, 0x42, 0xdb, 0x48, 0x03, 0xee, + 0x36, 0x72, 0x52, 0xc4, 0x63, 0xc9, 0xa8, 0x8b, 0x41, 0x7b, 0x70, 0x86, 0x6d, 0x9a, + 0xfb, 0x7a, 0x08, 0x27, 0x68, 0x01, 0xf9, 0x22, 0x7c, 0x63, 0x81, 0xf1, 0x5c, 0xc0, + 0x94, 0xac, 0x7b, 0xd1, 0x54, 0xa4, 0xce, 0xf9, 0x0b, 0x48, 0x47, 0xdc, 0x16, 0x8a, + 0x01, 0xf1, 0xe3, 0x1e, 0xec, 0x74, 0xa7, 0xef, 0xce, 0xba, 0x11, 0xf5, 0x07, 0x69, + 0xf5, 0xd8, 0xf5, 0x4d, 0x36, 0x20, 0xc2, 0x3e, 0xc8, 0x99, 0x3f, 0x7a, 0xef, 0x27, + 0xc1, 0xd3, 0x51, 0x96, 0xb1, 0x02, 0xb3, 0xcf, 0x3f, 0xed, 0x8b, 0xf8, 0x5d, 0x8a, + 0x45, 0xf6, 0x96, 0x83, 0xec, 0xdd, 0x1a, 0x23, 0x44, 0xef, 0xb8, 0x48, 0x07, 0xd9, + 0x0f, 0x18, 0x35, 0xb4, 0xf2, 0xf2, 0x4d, 0x8f, 0xf8, 0x12, 0x30, 0x47, 0xeb, 0x9f, + 0x7d, 0x30, 0x62, 0x3e, 0x14, 0x29, 0x0d, 0x56, 0x17, 0x96, 0x3b, 0x42, 0x21, 0x40, + 0x4a, 0xe7, 0x61, 0xc8, 0x6b, 0xec, 0x7a, 0x07, 0xbf, 0x81, 0xa0, 0xb9, 0xa7, 0xf7, + 0xd0, 0x87, 0xac, 0x26, 0xce, 0x3d, 0xfa, 0x9c, 0x93, 0xfe, 0xea, 0xeb, 0xd1, 0x0d, + 0xc1, 0x88, 0xc6, 0x27, 0xd4, 0xb9, 0x1d, 0x2a, 0x79, 0x01, 0xdc, 0x39, 0x4e, 0x52, + 0x39, 0x05, 0x0a, 0x17, 0xec, 0xd5, 0x33, 0x20, 0xa5, 0xd7, 0x72, 0x4c, 0xd4, 0xf9, + 0x82, 0xc9, 0x3d, 0x6b, 0xbd, 0x01, 0xce, 0xc3, 0xe1, 0xf7, 0x1a, 0x0f, 0x12, 0xde, + 0xa3, 0xd1, 0x42, 0xff, 0x0f, 0xff, 0xd7, 0xa1, 0xb8, 0xf9, 0xeb, 0x82, 0xcc, 0x72, + 0x10, 0x3c, 0x71, 0x97, 0x55, 0x3d, 0x07, 0x2a, 0xe1, 0xad, 0xf7, 0x0c, 0xa4, 0x00, + 0x7a, 0x3d, 0x07, 0xff, 0xf5, 0xec, 0x82, 0xe6, 0x64, 0x71, 0x01, 0x0c, 0xf9, 0x8a, + 0x3a, 0x2a, 0x5b, 0xe1, 0x6b, 0x86, 0x2d, 0x29, 0xc7, 0x70, 0x12, 0x72, 0x47, 0x61, + 0xe9, 0xcb, 0xbe, 0x42, 0x62, 0xcc, 0xa5, 0xb0, 0xb9, 0x31, 0xe8, 0xbb, 0x72, 0x67, + 0x1f, 0xe4, 0xb4, 0xb5, 0x88, 0xc9, 0x0a, 0xd5, 0xc0, 0x0b, 0x55, 0xdc, 0x8c, 0x8a, + 0xf9, 0xb0, 0xf6, 0xa3, 0xca, 0x1e, 0x07, 0xef, 0xf1, 0x58, 0x11, 0x39, 0x1c, 0x53, + 0xf7, 0xe4, 0x3b, 0x1b, 0x81, 0x16, 0xda, 0xdc, 0x01, 0x6d, 0x19, 0x26, 0xc8, 0x48, + 0x0d, 0x4e, 0xe3, 0x4e, 0x76, 0x19, 0x1b, 0x79, 0xbe, 0xd0, 0xce, 0x95, 0x97, 0x3a, + 0x4c, 0x7c, 0xf2, 0xf0, 0x57, 0xc7, 0x14, 0x7e, 0xdb, 0x01, 0x3d, 0x20, 0x5d, 0x81, + 0xe2, 0x36, 0x08, 0x88, 0xa2, 0xab, 0xdd, 0xcc, 0xf0, 0xf6, 0xf3, 0xd8, 0xf8, 0xba, + 0x11, 0x1d, 0x64, 0x2c, 0x52, 0xd0, 0x4e, 0xbd, 0x3c, 0xe1, 0x7c, 0x60, 0xd9, 0x22, + 0x57, 0xea, 0x58, 0x69, 0x09, 0x45, 0x01, 0xbb, 0x67, 0x12, 0x68, 0xb2, 0x24, 0x47, + 0x7a, 0x8e, 0x01, 0x41, 0xd6, 0xff, 0x37, 0xe2, 0x4f, 0xf1, 0xc7, 0x65, 0xe8, 0x4d, + 0x26, 0x4d, 0xb8, 0x8f, 0x00, 0x92, 0x8e, 0x64, 0xc4, 0x12, 0xbd, 0x59, 0x15, 0x1a, + 0x65, 0x71, 0xc6, 0x67, 0x09, 0x16, 0xb0, 0x70, 0x6b, 0x04, 0x4f, 0xc5, 0xc2, 0xbd, + 0x93, 0xad, 0xe3, 0x96, 0x79, 0x57, 0xcd, 0xb9, 0x41, 0x27, 0x4c, 0xc6, 0xbd, 0xb4, + 0xe0, 0x36, 0xb7, 0x67, 0xb9, 0x50, 0xc0, 0x9e, 0x46, 0x26, 0xa1, 0xd0, 0x05, 0xbc, + 0xf4, 0x83, 0x6e, 0xf6, 0xa1, 0xde, 0x48, 0x09, 0x5d, 0xcb, 0x46, 0x12, 0x78, 0xb1, + 0x6c, 0x45, 0x68, 0x90, 0xb2, 0x3d, 0x40, 0xbd, 0x36, 0x04, 0x10, 0xf0, 0x01, 0x0a, + 0x55, 0xf5, 0x05, 0xfe, 0x5e, 0x2d, 0xb2, 0x01, 0xc7, 0x52, 0xe9, 0xb5, 0xb1, 0x5b, + 0xf8, 0xaa, 0x9e, 0x82, 0xd6, 0x49, 0xab, 0x11, 0x73, 0xba, 0x2a, 0x51, 0x32, 0xe0, + 0xcc, 0x50, 0x51, 0xcc, 0xf7, 0x4c, 0x7a, 0x6a, 0x37, 0x07, 0xab, 0x59, 0x83, 0xf7, + 0xcc, 0x27, 0x5c, 0x99, 0x1a, 0xbe, 0x4d, 0x7c, 0xee, 0x5f, 0x28, 0x9e, 0xfe, 0x72, + 0x7e, 0xb3, 0xda, 0x86, 0xfa, 0x21, 0xa2, 0x8d, 0x6b, 0x8a, 0x2a, 0xff, 0xd4, 0x2d, + 0xb9, 0x8b, 0xb2, 0xa4, 0x6c, 0xd8, 0xa3, 0x29, 0x31, 0x2f, 0xa9, 0x45, 0x39, 0xd9, + 0xcb, 0x35, 0xdc, 0xb6, 0x04, 0x67, 0x8b, 0x63, 0x90, 0x64, 0xd9, 0x20, 0x05, 0xdf, + 0x2d, 0x10, 0x68, 0x1c, 0x64, 0xb9, 0xed, 0x8c, 0xe4, 0x7d, 0x7e, 0xba, 0x0f, 0x2b, + 0x50, 0x2b, 0x20, 0x6a, 0xd4, 0xb2, 0xe9, 0x2b, 0xbe, 0x45, 0x86, 0xf6, 0xd7, 0x50, + 0x9e, 0x57, 0xa6, 0x37, 0x7f, 0xea, 0xbe, 0x38, 0xb3, 0xcc, 0x6c, 0x95, 0x5d, 0x5e, + 0x7b, 0xdf, 0x7e, 0xb1, 0x32, 0xd8, 0x6b, 0xc0, 0x7a, 0x30, 0x98, 0xb4, 0x13, 0xe4, + 0x40, 0x5d, 0xaa, 0xa2, 0x55, 0x29, 0x1d, 0x55, 0x2b, 0x2c, 0x80, 0x07, 0xbe, 0xd4, + 0x1e, 0x22, 0xf1, 0xcf, 0x79, 0x11, 0x82, 0x12, 0x00, 0x55, 0x5e, 0x9c, 0x4f, 0xfb, + 0x09, 0xef, 0xc1, 0x22, 0x38, 0x11, 0x75, 0x03, 0x1c, 0x38, 0x28, 0x0b, 0x53, 0x26, + 0xeb, 0xbe, 0xaf, 0x33, 0x4f, 0xdc, 0xf0, 0xdc, 0x44, 0x4e, 0x62, 0x9f, 0x93, 0x95, + 0x51, 0x54, 0x0b, 0xcb, 0xbb, 0xb1, 0xab, 0x9c, 0x23, 0x1a, 0x86, 0x6b, 0x32, 0x9e, + 0x85, 0x24, 0xab, 0x25, 0xf9, 0x3e, 0x5e, 0x33, 0x4a, 0x05, 0x27, 0x2a, 0x3f, 0x82, + 0x6f, 0x9d, 0x05, 0xa4, 0x50, 0x58, 0xdf, 0xcd, 0xf6, 0x88, 0x43, 0xa8, 0xb9, 0x36, + 0xa0, 0xcf, 0x5e, 0x6a, 0xa8, 0xae, 0x1b, 0x80, 0xf6, 0x01, 0x61, 0xbf, 0x41, 0x4f, + 0x28, 0x02, 0x11, 0x11, 0x09, 0x21, 0xa9, 0xc8, 0x5f, 0x51, 0x04, 0xa0, 0x16, 0x8e, + 0x8e, 0x72, 0xde, 0x4f, 0x8a, 0xa0, 0x41, 0x32, 0xeb, 0x25, 0x88, 0x76, 0xf1, 0x9d, + 0x7b, 0xe5, 0xf2, 0xdd, 0x2b, 0x0b, 0x30, 0x4b, 0x92, 0x3b, 0x29, 0x52, 0xd9, 0x1f, + 0xde, 0xe7, 0xe5, 0x52, 0x05, 0xdb, 0xb1, 0x94, 0xeb, 0xba, 0x32, 0x2f, 0xdc, 0x67, + 0xb2, 0x52, 0x2c, 0x92, 0x61, 0x21, 0xc7, 0xfa, 0x1a, 0xf1, 0x7e, 0xd0, 0x6c, 0x47, + 0x27, 0x8f, 0x96, 0x08, 0x92, 0x96, 0x08, 0x7a, 0x70, 0x4b, 0x7d, 0x0f, 0x84, 0x7d, + 0x51, 0xd6, 0xcc, 0x68, 0xac, 0xc5, 0x22, 0x07, 0x74, 0x73, 0x41, 0xf6, 0xb9, 0x8c, + 0xb1, 0xcd, 0x4f, 0xaf, 0xcd, 0x2b, 0xb0, 0xd0, 0x5b, 0xc7, 0x9b, 0xb8, 0x0d, 0x7c, + 0x4b, 0x8a, 0x1a, 0x11, 0xbc, 0x0a, 0x3b, 0xde, 0xca, 0x45, 0x41, 0x86, 0x9b, 0x4d, + 0xc9, 0xd6, 0xb4, 0x8c, 0xd7, 0x86, 0x9b, 0xf7, 0x63, 0xb9, 0xdc, 0x42, 0x45, 0x27, + 0x3c, 0x70, 0x4b, 0x0d, 0x8d, 0xec, 0x4b, 0x85, 0xd1, 0x6d, 0xd4, 0x38, 0xce, 0xd6, + 0x22, 0x0f, 0xa6, 0x69, 0x26, 0x66, 0x3f, 0xcc, 0x22, 0x8f, 0xc6, 0xc4, 0xd2, 0x7e, + 0x17, 0xe3, 0x27, 0x83, 0x4b, 0x67, 0x57, 0x91, 0x4d, 0x1b, 0xcb, 0xf3, 0x4b, 0x65, + 0xd8, 0x58, 0xab, 0x8b, 0x5c, 0x12, 0x0c, 0xb0, 0x85, 0x05, 0x22, 0xf5, 0x42, 0x89, + 0x3f, 0xdd, 0xb1, 0x79, 0xe8, 0x7f, 0x83, 0x2d, 0xaa, 0xa1, 0x52, 0xc8, 0x31, 0xf1, + 0x35, 0x64, 0x00, 0x9c, 0x41, 0x81, 0x23, 0x53, 0x3d, 0xe2, 0xc6, 0x79, 0x49, 0xe3, + 0xaf, 0x2d, 0xcb, 0x60, 0xd6, 0xbd, 0xbd, 0xda, 0xda, 0x63, 0xa3, 0x0b, 0x4b, 0x54, + 0xcd, 0x1c, 0xe5, 0xa5, 0xa0, 0x0f, 0x8e, 0x85, 0x57, 0xeb, 0xa9, 0x23, 0x4e, 0x81, + 0x17, 0x8d, 0x0f, 0xca, 0xb5, 0x61, 0x0f, 0xba, 0x96, 0x69, 0xcf, 0xeb, 0x1b, 0xd0, + 0x8c, 0xd9, 0x65, 0x33, 0x49, 0x8b, 0x27, 0x2c, 0x57, 0x79, 0xa9, 0xf9, 0x39, 0x69, + 0x1d, 0xe1, 0xad, 0x88, 0x1c, 0x80, 0x87, 0x8d, 0x6c, 0x29, 0x42, 0x15, 0x23, 0x0b, + 0xbb, 0x61, 0x90, 0x69, 0xb4, 0xdc, 0x17, 0xb3, 0xe5, 0x9d, 0xbd, 0x24, 0x2c, 0xd8, + 0x8e, 0xcc, 0x3b, 0xe3, 0xa2, 0x69, 0x6b, 0xf7, 0xf2, 0xd9, 0xe5, 0xb8, 0xc1, 0x52, + 0xcc, 0x0d, 0x99, 0xa0, 0xa5, 0xe9, 0xa3, 0x8b, 0x1b, 0x8e, 0xb1, 0xa0, 0x13, 0xeb, + 0x76, 0x51, 0x33, 0x37, 0xa7, 0xb0, 0xda, 0xdb, 0x4e, 0x81, 0x7b, 0x6f, 0x49, 0x78, + 0x02, 0xbd, 0x47, 0xe9, 0x3a, 0x82, 0x0c, 0x4f, 0xad, 0x6c, 0x65, 0x09, 0x74, 0x42, + 0xb9, 0xca, 0xc1, 0x61, 0xb6, 0x4d, 0x77, 0xb3, 0x05, 0x7d, 0x13, 0x36, 0xb5, 0x5a, + 0x48, 0xa7, 0xb7, 0xf6, 0x89, 0xec, 0x9f, 0x6c, 0x6a, 0x81, 0xf3, 0xee, 0xe6, 0xdd, + 0xc5, 0xcc, 0xc9, 0x6f, 0xef, 0x4c, 0x4c, 0x89, 0x07, 0xbb, 0xff, 0xe3, 0x2c, 0xb6, + 0x12, 0x92, 0x05, 0x3d, 0xb8, 0x6d, 0x36, 0x6b, 0x7e, 0x6b, 0x30, 0x13, 0xd1, 0x4b, + 0x20, 0x5f, 0xb4, 0x5d, 0x06, 0x7e, 0x37, 0x50, 0x2e, 0x37, 0x9c, 0x4a, 0xa1, 0x38, + 0x2f, 0xf9, 0x22, 0x0c, 0x4f, 0x38, 0x9c, 0xa2, 0xc4, 0x14, 0x99, 0x55, 0x60, 0x52, + 0x3e, 0x6d, 0xde, 0x86, 0xa3, 0x7f, 0x3f, 0x86, 0xda, 0x8e, 0x7c, 0x03, 0x4f, 0x4b, + 0x6d, 0x79, 0x43, 0xce, 0xf1, 0x20, 0x30, 0xc4, 0x00, 0x99, 0xd8, 0x77, 0xca, 0xbe, + 0x81, 0xb0, 0x87, 0x50, 0xe3, 0xfb, 0xfe, 0x63, 0x12, 0xf6, 0x38, 0x0b, 0x98, 0xfb, + 0x85, 0x0a, 0x2a, 0x14, 0x2b, 0x91, 0x4a, 0xdc, 0x71, 0x54, 0x47, 0xc5, 0x79, 0x1a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x1b, 0x67, 0xae, 0x65, 0x6c, 0xad, 0xdd, 0x21, 0xe1, 0xb4, 0x6d, 0xc9, + 0xa7, 0x64, 0x12, 0x7b, 0xc0, 0xa3, 0x01, 0xb4, 0x80, 0x04, 0xa9, 0xc5, 0x27, 0x6b, + 0xcf, 0x08, 0xe7, 0xfe, 0x4a, 0xe5, 0x2d, 0x76, 0xe4, 0x31, 0x48, 0x8a, 0x5b, 0x9d, + 0x43, 0x1f, 0xa1, 0x36, 0x34, 0x6e, 0x5a, 0x53, 0xab, 0x3f, 0x68, 0x12, 0xf2, 0xd9, + 0x70, 0xf7, 0xb3, 0x98, 0x98, 0xcf, 0x8b, 0x62, 0xf2, 0xdb, 0xf6, 0x1e, 0x99, 0xa2, + 0x91, 0x5d, 0xfb, 0x75, 0xae, 0x22, 0xb7, 0x9f, 0x84, 0xcf, 0x25, 0x97, 0xeb, 0x34, + 0xec, 0x3d, 0x29, 0x2e, 0x6b, 0x5d, 0x84, 0xeb, 0xac, 0x4d, 0x92, 0xde, 0x52, 0xe1, + 0xf8, 0xbf, 0x6b, 0xfd, 0xba, 0xda, 0x63, 0x44, 0x09, 0xf2, 0x0e, 0xf2, 0xcc, 0x6e, + 0x3c, 0x39, 0x0e, 0x43, 0x5f, 0x47, 0xe3, 0x47, 0x23, 0x8d, 0xb4, 0x86, 0x90, 0x84, + 0x04, 0x73, 0xb0, 0xa0, 0x83, 0x1a, 0x5a, 0x8a, 0x58, 0xc4, 0xdc, 0xfc, 0x4e, 0xab, + 0x7b, 0x41, 0x8c, 0xba, 0x2a, 0x41, 0x4f, 0x95, 0x57, 0x71, 0x90, 0xff, 0x88, 0xd7, + 0x27, 0xf7, 0x3e, 0x2f, 0xff, 0x97, 0xaa, 0xbd, 0x11, 0x14, 0xb7, 0x64, 0xe3, 0xed, + 0xbc, 0x18, 0x3e, 0x60, 0x3a, 0xcf, 0xb7, 0xc0, 0x9b, 0xf1, 0x32, 0xbb, 0x01, 0xef, + 0xc7, 0x17, 0x8d, 0x4f, 0x9a, 0x2d, 0xba, 0xf4, 0x92, 0x4f, 0xd8, 0x0f, 0xbe, 0x0e, + 0x60, 0x4f, 0x60, 0x39, 0x08, 0x32, 0xeb, 0x98, 0x04, 0x79, 0xe0, 0x4e, 0x9c, 0x9a, + 0x2b, 0xb2, 0xfb, 0x36, 0x84, 0xd8, 0xf8, 0x06, 0x48, 0xd5, 0x80, 0x78, 0x38, 0x54, + 0x58, 0x4f, 0x62, 0xbe, 0x0c, 0xc9, 0x21, 0x88, 0x32, 0x38, 0x56, 0x10, 0xd9, 0x62, + 0x36, 0x5f, 0x50, 0x71, 0xfa, 0x3d, 0x36, 0x8f, 0xfb, 0x67, 0x1b, 0xa2, 0xc2, 0xf9, + 0xa0, 0xfc, 0x68, 0xd8, 0x07, 0x22, 0x19, 0xa7, 0x7b, 0xef, 0x2d, 0x6b, 0x4a, 0x19, + 0xf1, 0x6d, 0xd5, 0x30, 0x74, 0x22, 0x47, 0x46, 0xbb, 0xa5, 0xf1, 0x72, 0x82, 0x20, + 0xb1, 0x96, 0xe4, 0x0f, 0x93, 0x7c, 0x47, 0x05, 0x42, 0x9d, 0x04, 0xaa, 0x3c, 0x50, + 0x5c, 0x95, 0x60, 0x3e, 0x05, 0xff, 0x55, 0x2e, 0xc1, 0x86, 0x42, 0xd5, 0x67, 0x05, + 0x02, 0x67, 0xb9, 0xf9, 0x92, 0x9c, 0x2e, 0x13, 0x80, 0x14, 0xb5, 0xef, 0x1b, 0xa7, + 0x1d, 0x9a, 0x71, 0x86, 0xe3, 0xd1, 0x3c, 0x8a, 0x8e, 0x40, 0x8c, 0x2a, 0x9d, 0x12, + 0x01, 0xa7, 0xfe, 0xbb, 0x83, 0x34, 0x51, 0x2b, 0x44, 0xb8, 0x2b, 0xb2, 0x01, 0x78, + 0x9f, 0x63, 0x58, 0x04, 0x89, 0x6e, 0x3e, 0xb2, 0x1b, 0x5b, 0xd8, 0xc4, 0x21, 0xf0, + 0xb4, 0xcf, 0xba, 0x04, 0xde, 0x92, 0x52, 0x8f, 0x04, 0xfb, 0x4b, 0x52, 0x6b, 0x73, + 0x7e, 0xe3, 0x2d, 0xa8, 0x63, 0xf5, 0x98, 0x45, 0x61, 0x31, 0x98, 0x3a, 0x01, 0x35, + 0x8f, 0xb0, 0x7d, 0xe6, 0x75, 0x21, 0x11, 0x58, 0x5a, 0x86, 0x25, 0x6c, 0xe0, 0x34, + 0xc0, 0xd8, 0x57, 0x5a, 0x42, 0x76, 0x13, 0x61, 0xb1, 0x18, 0x77, 0x05, 0x0b, 0xc6, + 0xaf, 0xc3, 0x16, 0x15, 0x64, 0xe9, 0x6f, 0xd8, 0xcf, 0x04, 0x8f, 0xeb, 0xeb, 0x2a, + 0x92, 0x20, 0x07, 0x1c, 0xff, 0x18, 0x2d, 0x6c, 0xa0, 0x37, 0xce, 0x2c, 0x2d, 0xed, + 0x91, 0x6b, 0xd7, 0xb8, 0x4d, 0xe2, 0x8a, 0xc0, 0x17, 0x1d, 0x97, 0xfc, 0x24, 0x95, + 0x6c, 0x26, 0x66, 0x69, 0xc1, 0x03, 0x6b, 0x2b, 0x1a, 0x23, 0xda, 0xbc, 0xf3, 0x4e, + 0x38, 0xf3, 0x51, 0x45, 0x12, 0xae, 0x8a, 0x47, 0xb3, 0x53, 0xb4, 0x16, 0x69, 0x96, + 0x75, 0xe4, 0xd3, 0x1a, 0x2f, 0xe0, 0x34, 0x08, 0xe4, 0x24, 0xa7, 0x82, 0x9a, 0x06, + 0xad, 0xe6, 0x36, 0x53, 0x61, 0xd8, 0xa9, 0x61, 0x25, 0x7c, 0xbe, 0x25, 0xb0, 0xcd, + 0xe3, 0x3e, 0x96, 0x48, 0x77, 0xdf, 0x5e, 0x57, 0xc5, 0x3d, 0xb2, 0x83, 0x51, 0x77, + 0x34, 0x3e, 0x2d, 0x87, 0x6d, 0x51, 0x4c, 0x62, 0xfb, 0xb3, 0xb4, 0xa7, 0x08, 0xce, + 0x62, 0x62, 0x05, 0xcc, 0xf9, 0x2f, 0x24, 0x0d, 0x60, 0x2c, 0xdb, 0x5d, 0x68, 0x41, + 0xfd, 0x29, 0xda, 0x63, 0x08, 0xb6, 0xca, 0x40, 0x97, 0xd8, 0x52, 0x54, 0x10, 0x46, + 0x54, 0x52, 0x23, 0x9b, 0x04, 0x51, 0xa8, 0xdb, 0xed, 0xac, 0x1e, 0x41, 0xed, 0xdd, + 0x0f, 0x6b, 0xe0, 0xe3, 0xd8, 0x89, 0x69, 0x07, 0x03, 0xa3, 0x14, 0x57, 0x07, 0xe0, + 0xb3, 0xf5, 0xdb, 0x91, 0xb8, 0x19, 0x37, 0x56, 0xe0, 0xe3, 0x47, 0xb6, 0x64, 0xa1, + 0xcc, 0xcb, 0xd7, 0x86, 0x9a, 0x40, 0x22, 0xea, 0xdf, 0x3f, 0x87, 0x3c, 0x10, 0xec, + 0xab, 0x9a, 0x93, 0xf2, 0xca, 0xdc, 0xa7, 0xa3, 0x33, 0xb8, 0x1b, 0xb6, 0x10, 0x4e, + 0x82, 0xea, 0x14, 0xfe, 0x74, 0x1e, 0xb0, 0x62, 0x08, 0x0d, 0xc8, 0x5a, 0xcb, 0xc8, + 0xcc, 0x3a, 0x9b, 0xc8, 0x0c, 0x03, 0xd9, 0x1f, 0xfb, 0x3c, 0x25, 0xf9, 0xe4, 0x2b, + 0xc2, 0x5c, 0xf7, 0x7d, 0x73, 0x90, 0xc3, 0xab, 0xaf, 0x26, 0x10, 0xf4, 0xec, 0xdb, + 0x01, 0x9b, 0x15, 0x8d, 0xa2, 0x15, 0x5b, 0xef, 0xec, 0xb9, 0xc2, 0x29, 0x6d, 0x03, + 0xf8, 0x23, 0xea, 0xac, 0x0c, 0x74, 0x0d, 0x2a, 0x44, 0x89, 0xb8, 0x28, 0x4c, 0x7e, + 0x7b, 0x3a, 0x72, 0x9a, 0xfb, 0x69, 0xbd, 0x5b, 0xfa, 0x5f, 0x62, 0xf9, 0xb5, 0x27, + 0x37, 0x97, 0xdd, 0x24, 0xa0, 0x18, 0x30, 0x7f, 0xc6, 0x20, 0xe6, 0x42, 0xaa, 0x27, + 0xe7, 0x50, 0x6e, 0x17, 0xb1, 0x98, 0xdc, 0xa4, 0x79, 0x0e, 0x8d, 0xe1, 0xbf, 0xb6, + 0x71, 0xd8, 0xdc, 0x75, 0x13, 0x91, 0x0e, 0x95, 0x43, 0x10, 0x72, 0x1b, 0x4f, 0xb5, + 0x37, 0x33, 0xc9, 0x18, 0xf0, 0xd1, 0x89, 0x85, 0x18, 0x89, 0x62, 0x73, 0x22, 0xd5, + 0x20, 0xca, 0xcc, 0x9d, 0xd7, 0x03, 0x6b, 0xb4, 0x39, 0xa1, 0x69, 0xef, 0x2c, 0xdd, + 0x6c, 0xdb, 0xae, 0xa5, 0xa9, 0x1b, 0xc2, 0x4a, 0xb3, 0xfc, 0xa1, 0x57, 0x4c, 0x12, + 0xc9, 0x31, 0xe7, 0xaa, 0x3e, 0xd3, 0xc6, 0x49, 0x66, 0xc0, 0x6b, 0x62, 0x2d, 0x23, + 0xc8, 0x8d, 0xb2, 0xfd, 0x4b, 0x8f, 0xa5, 0x0b, 0xe3, 0x61, 0x94, 0x3b, 0x79, 0x6d, + 0x14, 0x85, 0x5f, 0x20, 0x71, 0xd3, 0x20, 0xd4, 0x3d, 0x6c, 0x49, 0x4c, 0x9e, 0xda, + 0x35, 0xcf, 0x9b, 0xf3, 0x7d, 0xc5, 0x4b, 0x40, 0x2e, 0xb2, 0x87, 0x64, 0xa0, 0xb9, + 0x17, 0x6c, 0xf9, 0x49, 0xb2, 0xa7, 0x78, 0x64, 0x19, 0x83, 0x89, 0x2f, 0xfb, 0x5c, + 0x7b, 0xfa, 0x68, 0xe6, 0x36, 0xde, 0xfe, 0xfc, 0xb2, 0xfa, 0x07, 0x94, 0x45, 0xec, + 0xd3, 0xad, 0xdf, 0x0c, 0x22, 0xb2, 0x61, 0x72, 0x49, 0x92, 0xe2, 0xf0, 0xd2, 0x7c, + 0xff, 0x23, 0xa6, 0x46, 0x15, 0x30, 0xdc, 0x05, 0xf4, 0x9e, 0x97, 0x2d, 0xa3, 0x71, + 0x6f, 0x41, 0x91, 0xbf, 0xf4, 0xed, 0x29, 0x02, 0x67, 0x46, 0xf0, 0x9e, 0xfa, 0x9d, + 0xfc, 0xbc, 0xde, 0xc5, 0xa6, 0x95, 0xb1, 0xf7, 0x31, 0x36, 0x14, 0x64, 0xec, 0x42, + 0xe3, 0xb5, 0x26, 0x7e, 0xb6, 0x5f, 0x55, 0x6b, 0x26, 0x7a, 0xf3, 0x59, 0x71, 0xb4, + 0x14, 0x9b, 0xb3, 0xe5, 0xaa, 0x03, 0xa4, 0x95, 0xfb, 0xeb, 0x90, 0x15, 0xac, 0x3f, + 0xf1, 0x3a, 0x5c, 0x1c, 0x2a, 0x5f, 0x81, 0x96, 0x47, 0x3d, 0x5b, 0xfe, 0x70, 0x48, + 0xdf, 0x27, 0x7f, 0x0b, 0x5c, 0xf4, 0xe6, 0xc7, 0x1c, 0xa9, 0x36, 0x6e, 0xca, 0x3b, + 0x9c, 0xf1, 0xe6, 0x06, 0x9d, 0x53, 0x9e, 0x5c, 0xe4, 0x3f, 0xd9, 0xaa, 0x25, 0xc2, + 0x11, 0xd3, 0x79, 0x92, 0xc3, 0x40, 0xad, 0xea, 0x8b, 0x24, 0x9f, 0x28, 0xab, 0x23, + 0x49, 0x39, 0x17, 0xc4, 0x9d, 0xeb, 0x28, 0x3b, 0x4c, 0x8a, 0x64, 0x90, 0x41, 0x88, + 0x7e, 0x66, 0x83, 0x8d, 0x1c, 0x42, 0x9d, 0xec, 0xdb, 0x31, 0x59, 0xcb, 0x30, 0xaf, + 0xe4, 0xfb, 0x31, 0x68, 0xcc, 0xec, 0x44, 0x98, 0x2e, 0x05, 0xf8, 0x71, 0x13, 0x2e, + 0xfa, 0x63, 0xd6, 0x5a, 0x24, 0x93, 0xcd, 0xf2, 0x39, 0xe8, 0xb2, 0xc8, 0x09, 0x05, + 0xe8, 0x04, 0xa8, 0x4d, 0xd7, 0x6a, 0xfe, 0xaa, 0x68, 0x94, 0x79, 0x1d, 0x49, 0xb1, + 0xe4, 0x00, 0xb3, 0xfc, 0xaa, 0x82, 0x73, 0x99, 0x60, 0xad, 0xda, 0x36, 0x45, 0xbb, + 0x85, 0x75, 0x6c, 0x63, 0x00, 0x5c, 0x01, 0x6f, 0x65, 0x8b, 0xa6, 0xab, 0x52, 0x57, + 0xc4, 0x86, 0xaf, 0x13, 0xed, 0xc9, 0xb4, 0x6b, 0xf6, 0x29, 0x34, 0xaa, 0x71, 0x4f, + 0x00, 0x36, 0x05, 0x96, 0x5a, 0xc5, 0x4d, 0x82, 0x50, 0xa5, 0x53, 0x52, 0x00, 0xd1, + 0x20, 0x2a, 0xcc, 0xca, 0xaa, 0x9e, 0x42, 0xea, 0x98, 0x2a, 0x21, 0x61, 0x8e, 0xdb, + 0xb1, 0x34, 0xc3, 0x3b, 0xc8, 0x4e, 0x35, 0xfc, 0x76, 0x56, 0x05, 0x86, 0xa3, 0xc3, + 0x43, 0x8e, 0x8f, 0x2b, 0x0c, 0xe7, 0x0d, 0x86, 0x31, 0x71, 0xdf, 0x23, 0x8e, 0x12, + 0x60, 0xd5, 0x9f, 0x82, 0x40, 0x37, 0xa7, 0x71, 0x7b, 0x2e, 0x21, 0xa9, 0x6e, 0x4d, + 0x79, 0x9b, 0x8e, 0xc4, 0xc9, 0x8b, 0x8d, 0x16, 0x83, 0x6c, 0x18, 0x22, 0xb2, 0x45, + 0x62, 0x66, 0x46, 0x59, 0x86, 0x85, 0x0d, 0x23, 0x31, 0xc7, 0x29, 0x34, 0xbd, 0xb6, + 0x71, 0x54, 0xab, 0xa0, 0xad, 0x49, 0xbe, 0x0e, 0x52, 0xd8, 0xb0, 0x78, 0x41, 0x11, + 0x7c, 0x0e, 0xb7, 0x6a, 0x39, 0x54, 0x96, 0x39, 0xf7, 0xad, 0xe7, 0x6a, 0x90, 0x71, + 0x0e, 0x79, 0x83, 0x97, 0x8e, 0x9b, 0x23, 0x34, 0x9b, 0xee, 0x22, 0xcd, 0x0c, 0x71, + 0xa1, 0xf0, 0x72, 0x70, 0xe2, 0xce, 0x8b, 0x36, 0x05, 0x1b, 0x00, 0x55, 0xba, 0x97, + 0x05, 0xab, 0x22, 0x2e, 0x8e, 0x85, 0x8d, 0xc4, 0x5b, 0x66, 0xc1, 0xef, 0x3f, 0xe2, + 0x66, 0x55, 0x03, 0xe7, 0x8b, 0x30, 0x29, 0xef, 0xfb, 0xd5, 0xbb, 0x13, 0x9e, 0x85, + 0x2c, 0x3b, 0xf9, 0x07, 0x13, 0x2e, 0x54, 0xc3, 0xed, 0xad, 0x03, 0xf7, 0xe8, 0x68, + 0xf5, 0x23, 0x15, 0x5f, 0x9f, 0x6b, 0xce, 0xf4, 0x50, 0xbc, 0x9b, 0x56, 0x31, 0x0c, + 0xda, 0x17, 0x3e, 0x50, 0xe9, 0x5a, 0x6e, 0xe5, 0xf0, 0x68, 0xb2, 0x5e, 0x32, 0x9c, + 0x35, 0x48, 0xfc, 0x24, 0x99, 0x37, 0x3c, 0xde, 0x29, 0x36, 0x0f, 0xbb, 0xfa, 0x5b, + 0x64, 0xb5, 0x74, 0x4a, 0xb0, 0x3a, 0x4b, 0xd5, 0xd9, 0x48, 0xc1, 0xbe, 0xf8, 0xcf, + 0x4e, 0x6b, 0xd9, 0x4c, 0x32, 0x80, 0x9b, 0x18, 0xf1, 0x18, 0x9c, 0x32, 0xbb, 0x8f, + 0xae, 0x27, 0x53, 0xe4, 0x85, 0x1c, 0x31, 0x96, 0xf5, 0xbb, 0x1d, 0xa0, 0x78, 0x51, + 0xb5, 0xd3, 0x1f, 0x20, 0xa0, 0xfd, 0x3a, 0x7a, 0x4b, 0x45, 0x01, 0xf3, 0x18, 0x5d, + 0x26, 0x7b, 0x1c, 0x8b, 0xb3, 0x59, 0x5d, 0x85, 0xc5, 0x3c, 0xae, 0x18, 0x9e, 0xc9, + 0xdb, 0x6f, 0x14, 0x53, 0xb3, 0xc6, 0xad, 0x4f, 0x3b, 0x93, 0xdd, 0x10, 0x6a, 0x3a, + 0x39, 0x0d, 0xb2, 0x7a, 0x1a, 0x75, 0x0e, 0x7e, 0xd0, 0x89, 0x7e, 0xbb, 0x61, 0x98, + 0x48, 0x4d, 0xcc, 0xdf, 0xa7, 0xa7, 0xe1, 0xd8, 0xeb, 0x2f, 0x23, 0x66, 0x8d, 0x54, + 0xe9, 0x8f, 0x9e, 0xd3, 0xae, 0x90, 0xfe, 0x0c, 0x27, 0x5f, 0x17, 0x7e, 0xcf, 0x70, + 0x1f, 0xd3, 0x0b, 0x92, 0xf6, 0x1b, 0x3c, 0x12, 0x53, 0xcc, 0x31, 0x78, 0x95, 0xfe, + 0x5e, 0x39, 0xc4, 0xea, 0x03, 0x24, 0x8e, 0x83, 0x20, 0x2e, 0xa5, 0x89, 0xa0, 0xe8, + 0xfc, 0xaf, 0xc4, 0x34, 0x07, 0xb5, 0x71, 0x9c, 0x08, 0x6a, 0xc2, 0xf5, 0x8c, 0x1c, + 0x4e, 0x05, 0x63, 0x69, 0x56, 0xb6, 0x30, 0x4e, 0x31, 0x7f, 0x4f, 0x65, 0xb4, 0xe2, + 0xb9, 0x9f, 0x25, 0xe8, 0xd7, 0xbb, 0x53, 0x28, 0xea, 0x1f, 0x31, 0x13, 0x25, 0x6a, + 0x45, 0x08, 0x01, 0x6a, 0x3e, 0x9d, 0x01, 0x2e, 0xf8, 0x19, 0xfa, 0x36, 0xa5, 0xdb, + 0xce, 0x7e, 0x3a, 0xff, 0x47, 0x42, 0xc0, 0xcd, 0x3d, 0x5d, 0x9e, 0xb8, 0x40, 0x44, + 0xa0, 0x03, 0x23, 0x39, 0x40, 0x69, 0x9b, 0xc2, 0x79, 0x45, 0xb9, 0xac, 0x93, 0x82, + 0x23, 0xc1, 0x17, 0x3f, 0x34, 0xd1, 0x7e, 0x7e, 0x2e, 0x7b, 0xbc, 0xad, 0x2d, 0x91, + 0x9d, 0x1a, 0xf5, 0x54, 0x94, 0x0b, 0x68, 0xd7, 0x43, 0x3a, 0x6d, 0x67, 0xe8, 0x5c, + 0xd3, 0x35, 0x66, 0xb0, 0x60, 0xe4, 0x48, 0xb4, 0xa2, 0xa0, 0x52, 0xa8, 0xb7, 0x9e, + 0x27, 0x57, 0x8d, 0xce, 0x6e, 0x09, 0x88, 0x6e, 0xf0, 0x92, 0xef, 0x09, 0x67, 0x97, + 0x47, 0x8b, 0xb5, 0x4b, 0x9a, 0xbb, 0xa5, 0xae, 0x26, 0x79, 0x9b, 0x07, 0xcd, 0xc8, + 0x8c, 0x80, 0x2e, 0x6a, 0xf5, 0xcb, 0xfd, 0x41, 0x24, 0x29, 0x57, 0x00, 0xac, 0x12, + 0xd9, 0x10, 0xa0, 0x2a, 0x74, 0xc8, 0xab, 0xd2, 0x4d, 0x39, 0x88, 0x72, 0xdd, 0x9d, + 0x3a, 0xb3, 0xc5, 0x4c, 0x63, 0xa0, 0x9e, 0x51, 0xbb, 0x51, 0x62, 0x54, 0x01, 0x03, + 0xab, 0x0c, 0xae, 0xfc, 0x6e, 0x5b, 0x88, 0x05, 0x21, 0xf4, 0x9c, 0x55, 0x93, 0xa7, + 0xec, 0xe1, 0xef, 0xdc, 0x00, 0xad, 0x96, 0xc3, 0x82, 0xfe, 0xcf, 0x0f, 0x9c, 0x1c, + 0x8e, 0xcd, 0xcb, 0xc2, 0x2e, 0x89, 0x07, 0xce, 0x99, 0xdf, 0x99, 0x4a, 0x33, 0x0a, + 0x90, 0x44, 0x6d, 0xae, 0xec, 0xab, 0x71, 0xf0, 0x02, 0x35, 0xdd, 0x70, 0x23, 0x3c, + 0x43, 0x17, 0xd6, 0x4e, 0xf6, 0xba, 0x3f, 0x65, 0x76, 0x42, 0xba, 0xad, 0x97, 0x35, + 0xe5, 0x48, 0x68, 0xc1, 0x97, 0x54, 0x56, 0x89, 0xa0, 0x57, 0x0b, 0xd4, 0x58, 0x4a, + 0xad, 0xe4, 0x1a, 0x59, 0x08, 0xb8, 0xaa, 0x33, 0x54, 0x95, 0x72, 0xc7, 0x20, 0x9f, + 0x63, 0xad, 0x0b, 0x80, 0x4c, 0x76, 0x02, 0xf4, 0x8d, 0xed + ], + }, + TestVector { + description: "NU5 transaction #1", + version: 5, + lock_time: 2107650444, + expiry_height: 174774902, + txid: [ + 0xfb, 0xb8, 0x48, 0x32, 0xd0, 0x50, 0x7d, 0xfa, 0x41, 0xdc, 0xbe, 0x67, 0x89, 0x5c, + 0x68, 0x74, 0xa1, 0x7e, 0xc1, 0x68, 0xc6, 0x7b, 0x78, 0x0f, 0xb8, 0x36, 0x05, 0x1f, + 0x6b, 0xff, 0xcd, 0x2e + ], + is_coinbase: 0, + has_sapling: 1, + has_orchard: 0, + transparent_inputs: 0, + transparent_outputs: 1, + tx: vec![ + 0x05, 0x00, 0x00, 0x80, 0x0a, 0x27, 0xa7, 0x26, 0xb4, 0xd0, 0xd6, 0xc2, 0x8c, 0x31, + 0xa0, 0x7d, 0x76, 0xda, 0x6a, 0x0a, 0x00, 0x01, 0xc7, 0x25, 0xcc, 0x95, 0xb4, 0xcf, + 0x00, 0x00, 0x01, 0x52, 0x01, 0x19, 0x6a, 0x18, 0x6f, 0x9d, 0x08, 0x96, 0x28, 0xbb, + 0x67, 0xe0, 0x4d, 0xda, 0x46, 0xf8, 0xd3, 0xd1, 0xd0, 0x79, 0x51, 0x93, 0xda, 0x7e, + 0xec, 0x25, 0x61, 0x0e, 0x4f, 0x02, 0x7b, 0x78, 0x8a, 0x40, 0x6f, 0xfe, 0xec, 0x4b, + 0xfd, 0x7a, 0x9f, 0xd8, 0x77, 0xce, 0x52, 0x03, 0xfe, 0x6b, 0x05, 0x8d, 0x23, 0x1e, + 0xc7, 0x1a, 0xf9, 0xca, 0x18, 0xed, 0x5c, 0x73, 0x55, 0x06, 0xd7, 0xba, 0x28, 0x96, + 0xed, 0x05, 0x2d, 0xff, 0x00, 0x83, 0x61, 0xfc, 0x59, 0xfd, 0x9c, 0x48, 0xd2, 0x62, + 0xb9, 0x3a, 0xee, 0x45, 0x65, 0x2c, 0x78, 0x78, 0x05, 0xdf, 0xac, 0xe8, 0x3d, 0x04, + 0xe5, 0x24, 0x40, 0x00, 0x65, 0xb0, 0x9a, 0x50, 0xfc, 0xc2, 0x05, 0x00, 0x03, 0xac, + 0x84, 0xd0, 0xfe, 0x14, 0xba, 0x6a, 0xc1, 0x9a, 0xaf, 0x94, 0x00, 0xf2, 0xe3, 0x58, + 0x3f, 0xb1, 0x68, 0xd3, 0x03, 0xca, 0x7a, 0x88, 0x71, 0xdd, 0xd9, 0xa2, 0x95, 0x04, + 0x1b, 0x30, 0x3a, 0x25, 0xa1, 0x66, 0xa1, 0xf4, 0x8e, 0xcc, 0x8f, 0xff, 0x84, 0x4f, + 0x09, 0xde, 0x67, 0x48, 0x04, 0x52, 0xa6, 0x78, 0x9d, 0x48, 0xb7, 0xbd, 0xbd, 0x81, + 0x1f, 0x0e, 0xda, 0xda, 0xa8, 0xee, 0x8e, 0xb9, 0x16, 0x17, 0x99, 0x2e, 0xad, 0x6f, + 0x8a, 0x8b, 0x9e, 0xf4, 0xc5, 0xad, 0xb6, 0xf2, 0x52, 0x48, 0xb2, 0x13, 0xf3, 0xd6, + 0x93, 0xf6, 0x3c, 0x0d, 0x5d, 0x15, 0xab, 0x54, 0x32, 0x88, 0x07, 0x14, 0x27, 0x35, + 0x79, 0x37, 0x3c, 0x49, 0xcb, 0xf1, 0x47, 0xf9, 0x4a, 0x84, 0xad, 0xe6, 0x48, 0x49, + 0xeb, 0x5a, 0x94, 0x04, 0x40, 0x13, 0x38, 0x96, 0xa2, 0x45, 0x55, 0xe4, 0x01, 0x55, + 0x99, 0xc0, 0x46, 0xdf, 0xa6, 0xf1, 0x4a, 0x28, 0x70, 0x53, 0x3a, 0xe4, 0x7d, 0x33, + 0xff, 0x81, 0x6b, 0x8e, 0x46, 0x63, 0xf0, 0x70, 0xc8, 0x0d, 0x8d, 0xb0, 0x1b, 0x43, + 0xc6, 0x0f, 0x5f, 0xc0, 0x2c, 0x85, 0xac, 0xf5, 0xe1, 0x06, 0xd3, 0xba, 0x71, 0xea, + 0x69, 0x3b, 0xa4, 0x65, 0xdd, 0x61, 0xff, 0x1d, 0x80, 0xfe, 0xee, 0xa1, 0xb6, 0xd5, + 0xa1, 0x63, 0xd0, 0xc9, 0x62, 0x43, 0x16, 0x36, 0xe1, 0xed, 0x62, 0x19, 0x66, 0xfe, + 0x28, 0x5b, 0xc9, 0x70, 0xa2, 0x66, 0xbb, 0x40, 0x8d, 0x4d, 0x48, 0xd5, 0x5e, 0xf7, + 0x17, 0x04, 0xf5, 0xb7, 0x98, 0x62, 0xbd, 0x80, 0x6a, 0x6a, 0x33, 0xe1, 0x60, 0x26, + 0x5b, 0xfd, 0x51, 0x91, 0x7c, 0x19, 0x22, 0x83, 0x7d, 0x00, 0xe2, 0x6b, 0x6d, 0xf4, + 0x37, 0xbe, 0x53, 0xdb, 0xd8, 0x88, 0x77, 0x83, 0xfa, 0x7a, 0x29, 0xee, 0x21, 0x57, + 0x23, 0x84, 0x31, 0xee, 0x2e, 0x11, 0xfd, 0x6e, 0x48, 0x39, 0x0f, 0xf8, 0xb8, 0xa2, + 0x44, 0x0a, 0xd2, 0x40, 0x67, 0x1f, 0x1f, 0x9a, 0x74, 0x3e, 0xb5, 0xdd, 0x54, 0x05, + 0xa0, 0xdf, 0x06, 0xda, 0xda, 0x01, 0x72, 0x5a, 0x19, 0x29, 0xaf, 0xf7, 0x0b, 0xb9, + 0x98, 0xa8, 0xff, 0x47, 0x13, 0xe4, 0x76, 0x76, 0xa0, 0xfe, 0x8c, 0x00, 0x3a, 0x70, + 0x36, 0x7f, 0x70, 0xf5, 0xe5, 0x19, 0xe7, 0xec, 0x6e, 0x0c, 0x2f, 0x55, 0xa3, 0x35, + 0x25, 0x69, 0x5f, 0xf8, 0x18, 0x40, 0xae, 0x26, 0xeb, 0xfb, 0xee, 0xdc, 0x7e, 0x98, + 0xb1, 0xd1, 0x88, 0x3f, 0x40, 0xe3, 0xde, 0xa5, 0x1c, 0x28, 0x01, 0x35, 0xe7, 0x0c, + 0x00 + ], + }, + TestVector { + description: "NU5 transaction #2", + version: 5, + lock_time: 2944908881, + expiry_height: 379832470, + txid: [ + 0x1b, 0x92, 0x98, 0x61, 0xe8, 0x07, 0x60, 0x99, 0xd1, 0xdf, 0xe4, 0x6a, 0x42, 0x49, + 0x19, 0xef, 0x2e, 0xba, 0xf0, 0x1f, 0x68, 0x38, 0xa6, 0x08, 0x9e, 0xb5, 0x1f, 0x16, + 0x92, 0xd1, 0x20, 0x38 + ], + is_coinbase: 0, + has_sapling: 1, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 3, + tx: vec![ + 0x05, 0x00, 0x00, 0x80, 0x0a, 0x27, 0xa7, 0x26, 0xb4, 0xd0, 0xd6, 0xc2, 0x51, 0xbe, + 0x87, 0xaf, 0x96, 0xc8, 0xa3, 0x16, 0x01, 0x31, 0x04, 0xca, 0x6e, 0xdd, 0x29, 0x28, + 0x0c, 0xda, 0x86, 0x55, 0x67, 0xbd, 0xd4, 0xb4, 0xba, 0x47, 0x37, 0xe6, 0x1c, 0x3f, + 0x0a, 0xd8, 0x75, 0xa8, 0xde, 0xe6, 0xe6, 0xcd, 0xff, 0x26, 0x81, 0x88, 0x08, 0xff, + 0x9b, 0x05, 0x63, 0x6a, 0x63, 0x65, 0x63, 0x2a, 0x95, 0xb4, 0x56, 0x03, 0x22, 0x2b, + 0xf4, 0x14, 0x9d, 0xad, 0x03, 0x00, 0x08, 0x53, 0x53, 0x65, 0x53, 0x63, 0x6a, 0x53, + 0x53, 0xc2, 0xae, 0xb3, 0x9a, 0x15, 0xe0, 0x06, 0x00, 0x08, 0x51, 0x65, 0x52, 0x52, + 0x6a, 0x65, 0x6a, 0x65, 0xe8, 0xc0, 0x29, 0x78, 0x4c, 0x02, 0x02, 0x00, 0x06, 0x51, + 0x63, 0x51, 0x52, 0x65, 0x00, 0x02, 0x08, 0x61, 0xec, 0x7e, 0xde, 0x66, 0xa9, 0x55, + 0x50, 0x7b, 0x08, 0x98, 0x22, 0x19, 0x91, 0x7b, 0xdb, 0x2c, 0xde, 0x57, 0x50, 0x2f, + 0x9c, 0x27, 0xbe, 0x5f, 0x26, 0x87, 0x8f, 0x80, 0x97, 0xda, 0xe1, 0xd3, 0xa4, 0xb9, + 0x9c, 0xd6, 0x6c, 0x7a, 0xfd, 0xe6, 0x7f, 0xac, 0x47, 0xf0, 0x35, 0x8e, 0xc7, 0x83, + 0xbe, 0x35, 0x95, 0x47, 0x96, 0xe5, 0x97, 0x3a, 0xcf, 0xf6, 0x31, 0x98, 0xa3, 0x55, + 0xfc, 0x9f, 0xf5, 0xa0, 0x8f, 0x48, 0x66, 0x9e, 0x1c, 0x99, 0x68, 0xf5, 0x21, 0x7a, + 0xd3, 0x45, 0x2d, 0xad, 0x04, 0x78, 0x39, 0x07, 0x44, 0xe9, 0xd1, 0x67, 0x85, 0xcd, + 0x54, 0xa5, 0x03, 0x98, 0x37, 0xb1, 0x6f, 0x6a, 0x78, 0x86, 0x4a, 0xcb, 0xfd, 0xa2, + 0xf3, 0xc8, 0x8b, 0x20, 0x9d, 0xea, 0xd0, 0x7b, 0x2e, 0x10, 0xce, 0x8a, 0xaf, 0x9d, + 0x56, 0xfc, 0xd7, 0x9d, 0x7c, 0x76, 0x36, 0xc0, 0x60, 0xc4, 0xa7, 0x56, 0x19, 0x1d, + 0xcf, 0x50, 0xbb, 0x0f, 0x97, 0x6f, 0x00, 0xe4, 0x36, 0x36, 0xa6, 0x83, 0x08, 0x69, + 0x2f, 0x40, 0x24, 0x4c, 0x39, 0x15, 0x34, 0x4b, 0x6f, 0x1f, 0x5e, 0xe7, 0x0e, 0x51, + 0xe1, 0x2b, 0x28, 0x53, 0x85, 0x53, 0x40, 0x3b, 0xe1, 0x49, 0x8e, 0x00, 0x75, 0xdb, + 0xda, 0x3e, 0x66, 0x6d, 0x9e, 0xbd, 0x18, 0xa1, 0x27, 0x21, 0xc9, 0x73, 0x49, 0xac, + 0x10, 0xe8, 0x01, 0x76, 0xce, 0xf2, 0x00, 0xde, 0xd6, 0x45, 0xea, 0x17, 0x1e, 0x59, + 0xc0, 0x03, 0xe9, 0xbd, 0x96, 0x90, 0x46, 0x51, 0x96, 0x3b, 0xa4, 0x46, 0xd0, 0x06, + 0xed, 0xf8, 0x4c, 0x1e, 0x75, 0xf4, 0xa9, 0x8e, 0xf9, 0x76, 0x21, 0xae, 0xfc, 0xe8, + 0x97, 0x28, 0x10, 0xa4, 0xc7, 0xfc, 0x1b, 0x3c, 0x7e, 0xaa, 0x83, 0xd4, 0xa6, 0x2b, + 0xd7, 0x10, 0x98, 0x96, 0x11, 0xdd, 0x7e, 0x2f, 0x4b, 0xdf, 0x15, 0x11, 0x36, 0xcc, + 0xa1, 0x4c, 0x55, 0xbb, 0xc5, 0x9d, 0x4e, 0x04, 0xc5, 0xfd, 0x9a, 0x61, 0x7c, 0x0b, + 0x70, 0x34, 0x1c, 0x98, 0xda, 0x53, 0xb6, 0x91, 0xa9, 0x65, 0xe0, 0x85, 0xa2, 0x25, + 0x20, 0x25, 0x21, 0x62, 0x26, 0x3f, 0x3a, 0x4b, 0xa5, 0x65, 0x7b, 0x8d, 0x0e, 0xcf, + 0x03, 0x86, 0x44, 0x1f, 0x87, 0x30, 0xd0, 0xf1, 0x4e, 0x86, 0x8a, 0x32, 0x46, 0x37, + 0xb0, 0xd3, 0x4a, 0x9d, 0x1d, 0xd6, 0xc3, 0x9f, 0x28, 0xfd, 0x9a, 0xf3, 0x50, 0xdc, + 0x23, 0x93, 0x79, 0x29, 0xe3, 0x79, 0x70, 0xf8, 0x87, 0x37, 0x01, 0xd3, 0xfa, 0x47, + 0x10, 0x10, 0xa7, 0x21, 0x40, 0x68, 0xad, 0x1b, 0x89, 0x02, 0x52, 0x26, 0x1d, 0xd9, + 0x0d, 0x89, 0xc5, 0xa6, 0xf2, 0x90, 0x4b, 0xc6, 0x16, 0xb0, 0x27, 0xd7, 0xbe, 0xc8, + 0x79, 0xb7, 0xa1, 0x78, 0x25, 0x4f, 0xdc, 0xaa, 0x99, 0x1b, 0x42, 0x2b, 0x7a, 0x96, + 0x93, 0xe7, 0x64, 0xa1, 0x27, 0xb1, 0x72, 0xa0, 0xdc, 0xca, 0xc4, 0x4f, 0x15, 0x27, + 0x08, 0x6c, 0x48, 0x89, 0x85, 0xf9, 0x23, 0x5e, 0x28, 0x82, 0xb4, 0x78, 0x16, 0x44, + 0xeb, 0xa9, 0xed, 0x09, 0x61, 0xca, 0x7a, 0x68, 0x45, 0xb5, 0x73, 0x65, 0xd8, 0x75, + 0x4b, 0xdc, 0x79, 0x1f, 0x81, 0xc8, 0x09, 0xd0, 0x12, 0xbd, 0x32, 0x9b, 0x6a, 0x44, + 0xbd, 0x3d, 0xfa, 0x34, 0x73, 0x5c, 0xe4, 0xc7, 0x38, 0xed, 0xef, 0xa4, 0x2d, 0x3c, + 0x74, 0x09, 0x2b, 0x5c, 0xba, 0x9c, 0x35, 0x81, 0x57, 0xd2, 0xab, 0x8a, 0x68, 0x83, + 0x04, 0x0f, 0x40, 0xce, 0xc7, 0x98, 0xa6, 0x9d, 0x7e, 0x0e, 0xa3, 0xb4, 0x76, 0xd9, + 0x93, 0xd6, 0x96, 0xdb, 0x0a, 0xdd, 0xd5, 0x43, 0x3f, 0x9e, 0x7a, 0x0f, 0xfb, 0xe0, + 0x24, 0x26, 0x1e, 0x79, 0x8d, 0xad, 0x05, 0x8e, 0xc8, 0xde, 0x26, 0x7c, 0x94, 0x78, + 0xc8, 0x01, 0xff, 0x37, 0x1e, 0x41, 0xc0, 0xbc, 0x0c, 0xf4, 0x6a, 0x4a, 0x84, 0xd0, + 0xac, 0xa4, 0x73, 0xe8, 0x80, 0xde, 0x96, 0x29, 0x69, 0xe9, 0xde, 0x23, 0x99, 0xa2, + 0x99, 0x56, 0x80, 0xdd, 0x76, 0x8f, 0xd7, 0x6b, 0xc6, 0x89, 0x6f, 0xe0, 0x2a, 0xa4, + 0x82, 0xf7, 0x6c, 0x72, 0x52, 0xe6, 0x65, 0x04, 0xe8, 0x80, 0xd2, 0x76, 0xbf, 0x7d, + 0x55, 0x7b, 0x39, 0x6a, 0xde, 0x3b, 0xb4, 0x7a, 0x6b, 0x0e, 0x0d, 0xcf, 0x06, 0x3b, + 0x1a, 0xd8, 0x56, 0x69, 0x4f, 0x8e, 0xef, 0x54, 0xca, 0x7d, 0xf4, 0x2b, 0x41, 0xf9, + 0xc6, 0x15, 0x3e, 0xa7, 0x47, 0x1c, 0xd5, 0x4f, 0x90, 0x54, 0x7c, 0xc4, 0xd4, 0xef, + 0x5f, 0xb1, 0xbf, 0xe5, 0x82, 0x88, 0x22, 0x59, 0xc7, 0x77, 0xef, 0xc4, 0xeb, 0x8f, + 0x5d, 0x75, 0x53, 0x1c, 0x1b, 0x80, 0x1b, 0x72, 0x12, 0xc6, 0xf1, 0x45, 0x09, 0x78, + 0x40, 0x20, 0xcb, 0xc3, 0xb0, 0x0e, 0xb5, 0x31, 0xc5, 0x62, 0x44, 0x36, 0x89, 0x28, + 0xa8, 0x51, 0xae, 0x53, 0x7c, 0x74, 0x80, 0xee, 0x6e, 0x45, 0x1b, 0x29, 0x74, 0x32, + 0xee, 0x17, 0x58, 0x22, 0x99, 0x50, 0xcf, 0x78, 0x08, 0x49, 0x32, 0x6c, 0x3f, 0x28, + 0xdd, 0x53, 0xd6, 0x81, 0x19, 0xd2, 0x96, 0x95, 0x50, 0x12, 0xa2, 0x6f, 0x83, 0x3c, + 0xdd, 0x29, 0xc6, 0xf4, 0xc7, 0x16, 0xf1, 0xd3, 0x37, 0xd3, 0xf4, 0xd2, 0x1c, 0x7a, + 0x63, 0xf8, 0x54, 0xc9, 0xf4, 0xc1, 0xc4, 0xcc, 0xf1, 0x81, 0xad, 0x43, 0x16, 0xca, + 0xb1, 0x36, 0x46, 0x7c, 0x01, 0xd9, 0x6d, 0x36, 0xe2, 0x98, 0x1c, 0x86, 0xc4, 0x76, + 0x56, 0x7d, 0x83, 0x77, 0x6b, 0x73, 0x37, 0x35, 0xd5, 0x65, 0x8a, 0x48, 0xf9, 0x89, + 0x7c, 0xf1, 0xe5, 0x05, 0x2b, 0x37, 0xec, 0x1c, 0x88, 0x91, 0x47, 0x36, 0xd9, 0xf9, + 0x7c, 0x54, 0x99, 0xd7, 0x3d, 0x92, 0x3b, 0x45, 0x00, 0x69, 0x4f, 0xfa, 0x57, 0x35, + 0xc9, 0x3c, 0xdb, 0x87, 0xb3, 0x5d, 0x82, 0x95, 0x49, 0xb1, 0xc6, 0x38, 0x3e, 0x95, + 0xfd, 0x19, 0x02, 0xad, 0x29, 0x80, 0xf2, 0xa3, 0xa2, 0x48, 0x3a, 0xce, 0x74, 0xb7, + 0x64, 0x3d, 0x8e, 0xae, 0x8d, 0x07, 0x9a, 0xa0, 0x06, 0x75, 0x41, 0x00, 0x6b, 0x94, + 0xa6, 0xf9, 0x13, 0xdc, 0xff, 0x13, 0xd6, 0x7c, 0xd9, 0xa8, 0xcf, 0xdf, 0x30, 0xb0, + 0xc3, 0xd1, 0x5a, 0xaa, 0x47, 0x0b, 0x3f, 0x89, 0x56, 0x10, 0x51, 0x42, 0xfa, 0x26, + 0x11, 0xfe, 0xda, 0xa4, 0x3f, 0xac, 0xbb, 0x3f, 0x05, 0x96, 0xf6, 0x78, 0x87, 0xcd, + 0xee, 0x91, 0x42, 0xc5, 0x09, 0x0a, 0x84, 0xe6, 0x25, 0x29, 0x31, 0xff, 0xcf, 0x61, + 0xa5, 0x0a, 0x4b, 0x92, 0x85, 0x30, 0x60, 0xe8, 0xb8, 0x7e, 0x10, 0xce, 0xa8, 0xce, + 0x00, 0xe4, 0x66, 0x5e, 0x5f, 0x93, 0x1f, 0x0e, 0x08, 0xdc, 0x52, 0x47, 0xbe, 0x1a, + 0xed, 0xc7, 0x9e, 0xbb, 0x7c, 0x20, 0x16, 0x2f, 0xca, 0x7b, 0xf9, 0x0e, 0x58, 0x83, + 0x02, 0x5f, 0xc9, 0x24, 0x36, 0x8d, 0x42, 0x45, 0x0b, 0x4f, 0xb7, 0xa7, 0xe1, 0x91, + 0x0e, 0xdd, 0x8d, 0x29, 0x5f, 0x03, 0xd4, 0xde, 0x03, 0xde, 0x60, 0x51, 0xd1, 0xfc, + 0xf2, 0x87, 0xf5, 0x09, 0xfd, 0x5a, 0x1e, 0x20, 0x2f, 0x07, 0x00, 0x9f, 0x24, 0x9e, + 0x2f, 0x82, 0x32, 0x73, 0x7b, 0x8b, 0x33, 0x6b, 0x5f, 0x55, 0x40, 0x0b, 0x06, 0x79, + 0xba, 0x0c, 0x1e, 0xf0, 0x20, 0xc9, 0x26, 0x85, 0xa4, 0x24, 0x91, 0x79, 0x95, 0xea, + 0x63, 0xb0, 0x14, 0xa0, 0x7b, 0x78, 0x45, 0x99, 0x7a, 0x5b, 0x11, 0x6b, 0xb2, 0xc2, + 0xf4, 0xc4, 0xe5, 0x64, 0x6e, 0x63, 0x08, 0x2c, 0x5e, 0x3f, 0xee, 0x50, 0x92, 0xff, + 0x2f, 0xa8, 0x9a, 0xe3, 0x2a, 0xd6, 0x99, 0x07, 0x50, 0x4d, 0x68, 0x85, 0xb5, 0xbd, + 0x72, 0xc8, 0x23, 0xd4, 0xc7, 0x0d, 0x5e, 0xd4, 0x5c, 0xb0, 0x0c, 0x3e, 0x04, 0x05, + 0x89, 0x2c, 0x88, 0x83, 0x74, 0x53, 0xfe, 0xf2, 0xef, 0xb7, 0x51, 0x37, 0xf3, 0xc2, + 0xab, 0xbc, 0x35, 0x47, 0xdf, 0x86, 0xee, 0x01, 0x36, 0xb6, 0xe8, 0x5f, 0x33, 0xc5, + 0x25, 0x58, 0x3f, 0xfe, 0x27, 0xe6, 0xff, 0x48, 0xa8, 0x0d, 0x12, 0x4e, 0xf8, 0x01, + 0xd3, 0x24, 0x75, 0x4e, 0x16, 0x1d, 0x8b, 0xd6, 0x77, 0x44, 0xdf, 0x8a, 0xc5, 0x84, + 0x9b, 0x65, 0x5a, 0xcf, 0x9f, 0xa7, 0xb2, 0xea, 0x84, 0x62, 0x1d, 0x8e, 0x4d, 0xd8, + 0x57, 0x6d, 0xa7, 0x5e, 0xd1, 0xb4, 0x8a, 0xcb, 0x91, 0x08, 0x03, 0x27, 0x3e, 0x48, + 0x37, 0x73, 0xa9, 0x9d, 0x58, 0xcb, 0x70, 0x40, 0x8f, 0x3f, 0x23, 0xa3, 0xea, 0x71, + 0xd6, 0x73, 0x23, 0xb8, 0xf9, 0xfd, 0x51, 0x93, 0xb8, 0xdb, 0x90, 0x6a, 0x18, 0x86, + 0xe4, 0x26, 0xd0, 0xd3, 0x21, 0x6e, 0x7f, 0x0f, 0x42, 0xa9, 0xaa, 0xe0, 0x0f, 0xc3, + 0x79, 0x12, 0x20, 0xdb, 0xb1, 0x03, 0x15, 0x19, 0xbc, 0x1e, 0xcc, 0xfa, 0x2d, 0x6a, + 0x59, 0xb2, 0x23, 0x56, 0xa7, 0x71, 0x96, 0x18, 0xaa, 0xb5, 0xc7, 0x57, 0xf8, 0x82, + 0x1e, 0xfc, 0x3e, 0x07, 0x1b, 0x75, 0xf2, 0x15, 0xb2, 0x00, 0xb7, 0xd2, 0x99, 0x98, + 0xed, 0x7a, 0xe0, 0x05, 0x7f, 0xb2, 0x32, 0x9c, 0xa9, 0x13, 0x6d, 0xd2, 0xbc, 0x51, + 0xa6, 0x59, 0x01, 0x71, 0xdf, 0xca, 0x3b, 0xcb, 0x93, 0x6b, 0x11, 0xc6, 0x3c, 0x03, + 0xbb, 0x7f, 0xce, 0x30, 0xa0, 0x5f, 0x9b, 0x6f, 0x8f, 0xf3, 0x54, 0x06, 0x04, 0x50, + 0xa3, 0x45, 0x2d, 0xa1, 0x86, 0xe9, 0x3d, 0x6c, 0x32, 0xda, 0x62, 0x72, 0xb8, 0x9b, + 0xc4, 0xd6, 0xd5, 0xe8, 0x47, 0x8f, 0x29, 0x91, 0x01, 0x98, 0x97, 0x11, 0xa9, 0xd2, + 0x20, 0x97, 0xcd, 0xb7, 0x0c, 0x15, 0x0e, 0xd2, 0x6d, 0xf4, 0x7b, 0x0c, 0xdd, 0xee, + 0x52, 0x1b, 0x4f, 0x1e, 0x98, 0x96, 0xa1, 0xb6, 0x97, 0x86, 0x53, 0xa4, 0xe3, 0x8b, + 0x0d, 0x28, 0x52, 0x6e, 0x1e, 0x3a, 0x87, 0x43, 0x5a, 0xc4, 0xfd, 0x30, 0x97, 0xaf, + 0xe3, 0x21, 0xe7, 0x2d, 0x40, 0xc4, 0x70, 0xf3, 0xb5, 0x3f, 0x5c, 0x35, 0x8d, 0x2e, + 0x53, 0x69, 0x7c, 0xaf, 0x66, 0x9d, 0xea, 0xa1, 0x1d, 0xe7, 0x7c, 0x98, 0x4a, 0x73, + 0x0e, 0x5b, 0xf7, 0xb3, 0x8e, 0xf6, 0x58, 0x9a, 0x5a, 0xa7, 0x55, 0x81, 0xbf, 0xd3, + 0xc0, 0x07, 0x8a, 0x63, 0xa3, 0x92, 0x96, 0xe9, 0xbf, 0xa8, 0x0e, 0x6e, 0x89, 0x85, + 0xce, 0x8d, 0x0b, 0x32, 0xd0, 0x7c, 0xea, 0x70, 0xe6, 0x79, 0xe0, 0x27, 0x45, 0xc1, + 0xa6, 0x3e, 0x93, 0x14, 0xd8, 0xd9, 0x98, 0xd7, 0x34, 0x57, 0xa6, 0xb9, 0x22, 0x25, + 0xe1, 0xaf, 0x36, 0x9f, 0x67, 0xc5, 0x0f, 0xc1, 0xed, 0x2a, 0x0a, 0x6b, 0x3a, 0x16, + 0x88, 0xda, 0x4c, 0x88, 0x00, 0xe7, 0x77, 0x98, 0x0e, 0x28, 0x60, 0xf2, 0x71, 0x60, + 0x0c, 0x68, 0x1e, 0xdb, 0xe6, 0xbb, 0x01, 0xf0, 0x6c, 0x6e, 0xb4, 0x7c, 0x0a, 0x69, + 0xf0, 0xeb, 0x40, 0x43, 0xdc, 0xea, 0x0e, 0x60, 0x82, 0x25, 0x67, 0x74, 0x33, 0xa5, + 0x06, 0xbd, 0x1a, 0x69, 0x4b, 0xfc, 0x01, 0x59, 0x62, 0xba, 0xad, 0x62, 0x96, 0xb8, + 0xcf, 0x2f, 0xe2, 0x8d, 0x34, 0x9a, 0xfc, 0x7f, 0x80, 0x55, 0x22, 0x21, 0x74, 0xa0, + 0x39, 0x3a, 0x1e, 0x7d, 0xb3, 0xb3, 0x4d, 0xb4, 0x0b, 0x4f, 0x38, 0x24, 0x41, 0xdd, + 0xe0, 0x0c, 0xb6, 0x83, 0xa4, 0x04, 0x8c, 0xe5, 0x4d, 0x42, 0x20, 0x90, 0x57, 0x24, + 0xb3, 0x09, 0xc7, 0x99, 0x92, 0x4b, 0x85, 0x4a, 0xfa, 0x37, 0x7b, 0x80, 0x1a, 0x03, + 0x52, 0xfc, 0x44, 0x50, 0xb3, 0x35, 0x27, 0x7a, 0xda, 0xd7, 0x61, 0xe4, 0x8a, 0x1d, + 0x1d, 0xd3, 0x78, 0x93, 0x6a, 0x49, 0x1e, 0x28, 0x6c, 0xaf, 0xc7, 0x00, 0xb4, 0x8e, + 0xdf, 0x15, 0xf1, 0xc2, 0xd6, 0xed, 0xf1, 0xa2, 0x4e, 0x0e, 0x51, 0xb3, 0x98, 0x55, + 0x64, 0xeb, 0xa9, 0x69, 0xcd, 0x6e, 0xe6, 0x59, 0xba, 0xae, 0xf7, 0x46, 0xe1, 0x3a, + 0xba, 0x64, 0xaf, 0xad, 0x58, 0xaf, 0x52, 0xf4, 0x28, 0x17, 0x36, 0x45, 0x75, 0x7a, + 0x40, 0x7e, 0x1f, 0xdf, 0xd9, 0x89, 0x38, 0x0c, 0x02, 0xbc, 0xc3, 0xc3, 0x7f, 0x48, + 0x90, 0xc0, 0x8e, 0xb9, 0x31, 0x62, 0xcf, 0x78, 0xbc, 0x3c, 0x74, 0x53, 0xf3, 0xf9, + 0x92, 0xa7, 0x94, 0x53, 0x4c, 0x07, 0xe3, 0x96, 0x8d, 0x82, 0x70, 0xaa, 0x19, 0x1f, + 0x67, 0x80, 0x0a, 0x0b, 0xb3, 0xe7, 0xbf, 0xa5, 0x4b, 0x0f, 0x6f, 0xa5, 0x3e, 0xe8, + 0xfb, 0x13, 0x69, 0x82, 0xce, 0x71, 0xf4, 0x08, 0x64, 0xb5, 0x4d, 0x00, 0x45, 0x1a, + 0xf3, 0xf5, 0x32, 0x74, 0x22, 0x42, 0x16, 0x06, 0xea, 0x10, 0xc0, 0xd6, 0x12, 0x7c, + 0x02, 0xf9, 0x1a, 0xd3, 0xae, 0x96, 0x76, 0xdc, 0xf5, 0xae, 0x00, 0xba, 0xef, 0xa7, + 0x08, 0x47, 0xe0, 0x37, 0xb5, 0xb4, 0x8d, 0x0f, 0x20, 0x9b, 0x35, 0xa3, 0x91, 0xc3, + 0x93, 0x8a, 0xe1, 0x45, 0x6e, 0x53, 0x93, 0xb9, 0xa7, 0x54, 0x69, 0xd8, 0x4b, 0xd0, + 0x2c, 0x7f, 0xc4, 0x1b, 0xfc, 0xdf, 0x98, 0x95, 0x1f, 0x50, 0xe8, 0x3f, 0x19, 0xa0, + 0x00, 0xa9, 0xe4, 0x53, 0xf6, 0x21, 0x67, 0xe7, 0x35, 0x0f, 0x92, 0x36, 0x08, 0x00 + ], + }, + TestVector { + description: "NU5 transaction #3", + version: 5, + lock_time: 575831229, + expiry_height: 351879606, + txid: [ + 0x8c, 0xab, 0x39, 0xac, 0xc5, 0x34, 0xce, 0xc8, 0xfe, 0x21, 0x7c, 0x1d, 0x25, 0x47, + 0xc4, 0x4e, 0xca, 0x2b, 0x46, 0xf8, 0x6a, 0x94, 0xd0, 0x5b, 0x8d, 0x69, 0xe3, 0xe5, + 0xbf, 0x1b, 0xad, 0x18 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 0, + transparent_inputs: 1, + transparent_outputs: 0, + tx: vec![ + 0x05, 0x00, 0x00, 0x80, 0x0a, 0x27, 0xa7, 0x26, 0xb4, 0xd0, 0xd6, 0xc2, 0xbd, 0x7c, + 0x52, 0x22, 0xb6, 0x41, 0xf9, 0x14, 0x01, 0x6c, 0xa8, 0xa2, 0x35, 0x50, 0xca, 0xd8, + 0xac, 0x0d, 0xdb, 0x76, 0x45, 0xe2, 0xb9, 0x71, 0x3b, 0xe7, 0xe1, 0xd7, 0x68, 0xf8, + 0x8e, 0xeb, 0x74, 0xc5, 0xd3, 0x2d, 0x8a, 0x2f, 0x42, 0x7f, 0x72, 0x16, 0xc7, 0x67, + 0xa6, 0x07, 0x00, 0x65, 0xac, 0x51, 0x65, 0x53, 0xac, 0x24, 0xbe, 0x7b, 0x9d, 0x00, + 0x00, 0x00, 0x00 + ], + }, + TestVector { + description: "NU5 transaction #4", + version: 5, + lock_time: 307746698, + expiry_height: 111487689, + txid: [ + 0x5a, 0xcc, 0xf8, 0x36, 0xec, 0x98, 0x67, 0xc6, 0xbb, 0x92, 0xab, 0xdf, 0x3d, 0x7f, + 0x30, 0x82, 0xc7, 0x64, 0x9a, 0xcc, 0x8e, 0xfb, 0x35, 0xeb, 0x97, 0x6c, 0xfe, 0xe1, + 0xa8, 0xc0, 0xb3, 0x41 + ], + is_coinbase: 0, + has_sapling: 0, + has_orchard: 1, + transparent_inputs: 0, + transparent_outputs: 0, + tx: vec![ + 0x05, 0x00, 0x00, 0x80, 0x0a, 0x27, 0xa7, 0x26, 0xb4, 0xd0, 0xd6, 0xc2, 0x8a, 0xd7, + 0x57, 0x12, 0xc9, 0x2a, 0xa5, 0x06, 0x00, 0x00, 0x00, 0x00, 0x04, 0x32, 0x0d, 0x2f, + 0x68, 0x0a, 0xe2, 0xa0, 0x90, 0x0a, 0x5c, 0xe0, 0x72, 0xf0, 0x01, 0xa2, 0x16, 0x51, + 0x9d, 0x70, 0xb4, 0x4f, 0x5c, 0xb8, 0x43, 0x22, 0xc1, 0xd2, 0x64, 0x9e, 0x91, 0xa9, + 0x15, 0xa8, 0xa2, 0xc0, 0x49, 0xde, 0xf0, 0x97, 0x7f, 0x61, 0xea, 0x11, 0x23, 0x14, + 0x06, 0xcd, 0x10, 0x95, 0x6d, 0x16, 0x55, 0x78, 0xbb, 0x29, 0xe4, 0x76, 0x96, 0x76, + 0x9a, 0x58, 0x0e, 0x07, 0x01, 0x05, 0x40, 0x76, 0x5a, 0x2a, 0xe1, 0x9c, 0xf2, 0x09, + 0x9f, 0x34, 0x14, 0xbb, 0x01, 0xd2, 0x6e, 0x6e, 0x29, 0x6c, 0x81, 0x18, 0xc6, 0x36, + 0xae, 0x6e, 0x20, 0x08, 0xca, 0x61, 0x24, 0x35, 0xba, 0xf8, 0x3e, 0xff, 0x94, 0x6b, + 0x09, 0xfc, 0x66, 0xfe, 0x02, 0x55, 0xd3, 0x2d, 0x51, 0x8f, 0x69, 0x5d, 0x78, 0xc7, + 0xea, 0xe2, 0x55, 0x7c, 0xc6, 0xe8, 0xec, 0x67, 0xa3, 0x65, 0x7e, 0x80, 0x21, 0xe1, + 0x8c, 0x92, 0xc8, 0xd5, 0xd8, 0xa7, 0xc3, 0xb8, 0xcd, 0x74, 0xf6, 0xe5, 0x1d, 0x68, + 0xbe, 0xda, 0x41, 0xec, 0xb5, 0x7b, 0x6f, 0x67, 0x87, 0x82, 0xb5, 0x39, 0x3f, 0xfc, + 0xdc, 0x67, 0x8b, 0xca, 0xb0, 0x28, 0x33, 0xce, 0x47, 0xd4, 0xa0, 0x98, 0x65, 0x72, + 0x94, 0xec, 0x10, 0xb2, 0x99, 0x74, 0x22, 0x22, 0xd0, 0xbf, 0x74, 0x3f, 0xfa, 0xa3, + 0x60, 0xec, 0xad, 0xc3, 0x7b, 0x86, 0x2c, 0x62, 0xad, 0xa0, 0x6c, 0x8e, 0x73, 0x2c, + 0x71, 0x53, 0xb4, 0x29, 0x19, 0x66, 0x29, 0x35, 0x86, 0xaf, 0xb5, 0x01, 0x98, 0xca, + 0xce, 0x9b, 0x40, 0xc8, 0xea, 0x97, 0x14, 0x32, 0x5c, 0x8a, 0x37, 0x05, 0x08, 0x24, + 0xfa, 0x75, 0x62, 0xd2, 0xc9, 0x25, 0x2c, 0x34, 0xa9, 0x84, 0x50, 0x27, 0xd6, 0x63, + 0x90, 0xe9, 0x56, 0xb2, 0x5e, 0x16, 0x6c, 0x44, 0x95, 0xd3, 0xde, 0xd3, 0xf7, 0xac, + 0xcf, 0x74, 0x76, 0x38, 0x99, 0x47, 0x35, 0x11, 0x34, 0x12, 0x98, 0xfe, 0xb1, 0x89, + 0xb7, 0xed, 0x34, 0xe5, 0x67, 0xd7, 0x2f, 0x1d, 0xf4, 0xbf, 0x69, 0x7f, 0x71, 0x46, + 0x49, 0x3f, 0xa5, 0xc2, 0x36, 0x91, 0x22, 0x7b, 0x90, 0xb2, 0x51, 0x22, 0xc5, 0x40, + 0xdf, 0x0a, 0x6f, 0x2e, 0xc0, 0x6f, 0x9d, 0x89, 0xa3, 0xf7, 0x71, 0xe9, 0xb8, 0xed, + 0x74, 0x79, 0x40, 0x85, 0x51, 0x06, 0xd5, 0xea, 0x71, 0xba, 0x89, 0xe8, 0xf2, 0x0c, + 0xde, 0xa6, 0x9a, 0x77, 0x8a, 0x59, 0xe4, 0xdf, 0x79, 0x28, 0xc0, 0x35, 0x56, 0x23, + 0x31, 0xc8, 0xe1, 0x62, 0xb8, 0xfd, 0x5e, 0xbb, 0xd5, 0xe2, 0xb3, 0x7b, 0xea, 0x7a, + 0xf0, 0x69, 0x07, 0x10, 0x40, 0xc3, 0x7c, 0x1a, 0x1c, 0x37, 0xf0, 0x76, 0x0f, 0xed, + 0x7d, 0xb7, 0xfa, 0x70, 0xa9, 0x48, 0x94, 0x03, 0x00, 0x45, 0x76, 0xa2, 0xcc, 0xe9, + 0x0a, 0x39, 0x4b, 0x5e, 0xc5, 0x8b, 0x2e, 0x5d, 0x0e, 0x1a, 0xf8, 0xb0, 0x29, 0x6d, + 0x0b, 0xf0, 0x2c, 0x55, 0x97, 0xa4, 0x33, 0x54, 0x14, 0x43, 0x35, 0xe0, 0x6a, 0x80, + 0x1c, 0x6e, 0x7c, 0x73, 0x29, 0x7d, 0xfe, 0x0b, 0x32, 0xfc, 0xb8, 0x75, 0x33, 0x81, + 0x71, 0xdd, 0x1e, 0xeb, 0xeb, 0x12, 0x3f, 0xea, 0xfa, 0x32, 0xa5, 0xd8, 0xc7, 0xce, + 0x58, 0x39, 0x0e, 0xa2, 0xdf, 0x26, 0xc6, 0x88, 0x88, 0xda, 0xf3, 0x81, 0x6b, 0x7d, + 0x02, 0x97, 0xa1, 0x7b, 0x5f, 0x5d, 0x20, 0x8d, 0xe9, 0x22, 0xe7, 0x73, 0x97, 0x2b, + 0x95, 0xe6, 0x96, 0x5e, 0x58, 0xfb, 0xf6, 0x4f, 0xae, 0x06, 0xf0, 0xc3, 0x89, 0x6e, + 0x0b, 0x57, 0x89, 0x0d, 0xd7, 0xf3, 0xc6, 0x4c, 0x3d, 0x5c, 0xeb, 0xb6, 0xa7, 0x44, + 0xc5, 0x93, 0x38, 0x61, 0x22, 0x71, 0x82, 0x08, 0x04, 0x95, 0x7a, 0xfd, 0x67, 0xc5, + 0x07, 0x79, 0x90, 0x3e, 0x7c, 0x53, 0xd1, 0x15, 0x54, 0x5f, 0x06, 0x6c, 0x73, 0x41, + 0x67, 0x18, 0x9b, 0x55, 0x70, 0x50, 0xee, 0x81, 0x4a, 0x88, 0xf9, 0x93, 0xd9, 0xca, + 0x56, 0x08, 0x4d, 0x09, 0xb7, 0x84, 0x88, 0xa3, 0xba, 0x1d, 0x8a, 0x3d, 0x6b, 0x48, + 0x9a, 0xfd, 0xf2, 0x32, 0xd6, 0xd0, 0x70, 0xa1, 0xb5, 0x06, 0x0c, 0xaa, 0x44, 0x3d, + 0x0c, 0x7e, 0xe5, 0x19, 0x04, 0x54, 0x7f, 0xaf, 0x53, 0x95, 0xcb, 0xd0, 0xba, 0x99, + 0x48, 0x0a, 0xd0, 0x4a, 0xe0, 0xe1, 0x91, 0x5b, 0xd7, 0x7f, 0xa2, 0x6d, 0x04, 0x17, + 0x5b, 0x00, 0xfd, 0xc8, 0x1e, 0xf6, 0xf3, 0x79, 0x23, 0x72, 0x49, 0x27, 0xf0, 0x82, + 0x66, 0xb6, 0x86, 0x40, 0x93, 0x13, 0xdc, 0x13, 0xbc, 0x39, 0x9d, 0x19, 0x77, 0xb8, + 0xf6, 0x58, 0x8c, 0x0e, 0x08, 0x72, 0x10, 0xf0, 0x51, 0xcf, 0x6e, 0x36, 0xe1, 0x4e, + 0x32, 0xaa, 0x23, 0xba, 0x6a, 0xe4, 0x33, 0x1f, 0x22, 0x39, 0xe7, 0x05, 0xf6, 0x79, + 0x54, 0x2f, 0xbd, 0x4e, 0xd2, 0xbf, 0x31, 0x91, 0x24, 0x36, 0x81, 0xf8, 0x27, 0x89, + 0x6b, 0x1b, 0xb1, 0xc4, 0xb7, 0x8b, 0x34, 0xc4, 0x87, 0xa4, 0xed, 0xfa, 0x97, 0xd3, + 0x6d, 0x62, 0xee, 0x32, 0x49, 0xef, 0xe0, 0x94, 0xc3, 0x87, 0x8a, 0xde, 0xdf, 0x9f, + 0x2b, 0x17, 0xd5, 0x11, 0x99, 0x80, 0x4f, 0x42, 0x9c, 0xd7, 0x04, 0xa7, 0xc8, 0x6c, + 0x85, 0x0c, 0xe1, 0x5d, 0x3c, 0x5f, 0x01, 0xd1, 0xad, 0x17, 0xeb, 0xb6, 0xc2, 0x88, + 0x3f, 0x28, 0xe8, 0x15, 0xbc, 0x45, 0x2a, 0x56, 0x07, 0x98, 0x05, 0xa5, 0xdd, 0x69, + 0x00, 0xe5, 0x5f, 0x47, 0x7e, 0xca, 0xc2, 0x14, 0x3f, 0x02, 0xee, 0x98, 0xc8, 0xd9, + 0xb1, 0xb7, 0x03, 0x93, 0xa1, 0x70, 0xba, 0x25, 0x48, 0x06, 0xb4, 0x08, 0x5b, 0x8d, + 0xf9, 0xca, 0x04, 0x07, 0x18, 0x42, 0xa3, 0xaf, 0x93, 0x33, 0x16, 0x83, 0x0d, 0x53, + 0xa7, 0xcb, 0x88, 0xd2, 0xa9, 0x82, 0x3b, 0xcd, 0xfb, 0xec, 0x8f, 0x18, 0xc8, 0x6a, + 0xc3, 0xdf, 0x89, 0x42, 0x38, 0x00, 0x1b, 0xa8, 0xfa, 0x31, 0x3f, 0x80, 0xcf, 0xe7, + 0x5f, 0x7c, 0xb5, 0xd9, 0x73, 0xcc, 0x77, 0xf3, 0x21, 0xf1, 0x95, 0x2f, 0x30, 0x50, + 0x18, 0xc0, 0xbf, 0x23, 0x8b, 0x92, 0x30, 0x79, 0xa1, 0x89, 0x66, 0x3a, 0xca, 0xc1, + 0x29, 0xa2, 0x36, 0xda, 0x19, 0x03, 0x6b, 0x1c, 0x2b, 0xc1, 0xea, 0x96, 0x75, 0xda, + 0xfc, 0x71, 0x6b, 0x4c, 0xf3, 0x9b, 0xcb, 0x1c, 0xbf, 0x6d, 0x1b, 0xd7, 0x68, 0xb5, + 0xf2, 0x38, 0x12, 0xd9, 0xcc, 0x96, 0x7d, 0x2a, 0xcf, 0x53, 0xe9, 0xd5, 0xc0, 0xf5, + 0xf8, 0x07, 0xd6, 0x86, 0x53, 0xb4, 0xd7, 0x77, 0xda, 0xca, 0x65, 0x91, 0x2f, 0xe0, + 0x90, 0x26, 0x75, 0x0d, 0x5d, 0xdd, 0x41, 0xb5, 0xc8, 0xce, 0x33, 0xfa, 0x10, 0x35, + 0x83, 0x58, 0xdc, 0xf8, 0x3f, 0xf5, 0x41, 0x39, 0xf9, 0xe6, 0x8c, 0xe8, 0x42, 0x0c, + 0xe7, 0xa1, 0xb7, 0x46, 0x27, 0x9d, 0x01, 0x67, 0x5b, 0xab, 0x09, 0x60, 0xbd, 0x84, + 0x57, 0xd1, 0x8d, 0x75, 0xc0, 0x14, 0x3b, 0xd0, 0x23, 0x9d, 0x5f, 0x72, 0x79, 0xef, + 0x02, 0xa1, 0x63, 0xa4, 0xd5, 0x2f, 0x28, 0x15, 0x82, 0x72, 0x6b, 0x89, 0x3b, 0x86, + 0x01, 0xbb, 0x07, 0xac, 0xf0, 0xbb, 0xe7, 0x32, 0x89, 0xa2, 0x7d, 0x16, 0x0c, 0x9b, + 0x74, 0x84, 0x4a, 0xa7, 0x23, 0x65, 0x7e, 0x37, 0xe7, 0xe0, 0xb9, 0x1b, 0x11, 0xf7, + 0x99, 0x35, 0x28, 0x5b, 0xfa, 0x5e, 0x79, 0x08, 0xea, 0x28, 0x13, 0x4a, 0xf6, 0x12, + 0x5d, 0x36, 0x84, 0xa8, 0x95, 0x3f, 0x49, 0x8c, 0xc9, 0x68, 0x19, 0xa8, 0x9f, 0x16, + 0x0a, 0x08, 0x7b, 0xa7, 0xdb, 0x00, 0x1a, 0x34, 0xfa, 0xc2, 0xd1, 0xf5, 0x0e, 0x7d, + 0xb2, 0xcc, 0xcc, 0xaf, 0xb6, 0x01, 0xd3, 0x53, 0xa6, 0x46, 0x38, 0xc9, 0x08, 0x47, + 0xe2, 0x4c, 0xe1, 0xd7, 0xa8, 0xd0, 0xaf, 0xab, 0x74, 0xdb, 0x88, 0xdb, 0x5c, 0xa1, + 0x02, 0xd5, 0xe0, 0xe1, 0xaa, 0xc7, 0xcc, 0xf9, 0x66, 0xb0, 0xa8, 0x13, 0x67, 0x09, + 0x5d, 0xa2, 0x1d, 0xc4, 0xb7, 0x36, 0x55, 0x95, 0x30, 0x80, 0xe3, 0x54, 0xbd, 0x22, + 0x09, 0xf2, 0x66, 0x82, 0x10, 0xe9, 0x47, 0x41, 0x27, 0x31, 0x1d, 0x93, 0x45, 0xce, + 0x1e, 0xbd, 0x3a, 0xe5, 0x24, 0x24, 0x5b, 0xbb, 0x44, 0x7a, 0x44, 0x50, 0x80, 0xb5, + 0xfa, 0x23, 0xcd, 0xfe, 0x98, 0xb3, 0xf6, 0xf6, 0x3c, 0x44, 0xeb, 0xe7, 0x22, 0xb9, + 0x7a, 0x79, 0x10, 0xdf, 0x7e, 0xa6, 0x22, 0x5e, 0xd9, 0xdc, 0xb4, 0x49, 0x84, 0x93, + 0xe8, 0xef, 0x55, 0x31, 0xf9, 0xf9, 0x77, 0x31, 0x84, 0xd7, 0xb4, 0xf5, 0x36, 0x77, + 0xb1, 0xd0, 0x44, 0xf6, 0xf1, 0x44, 0x07, 0xde, 0x5d, 0x67, 0xe0, 0x77, 0xd2, 0x0f, + 0x2e, 0x9d, 0x7f, 0xd7, 0x15, 0xbf, 0x9b, 0x19, 0x9b, 0x93, 0xb9, 0x84, 0x02, 0x46, + 0xef, 0x9c, 0x07, 0x35, 0xe4, 0x88, 0xff, 0x7c, 0x80, 0xb9, 0x41, 0x78, 0xac, 0xa3, + 0x1b, 0x13, 0xc3, 0x7c, 0x9a, 0xeb, 0x7f, 0x62, 0xe2, 0xd8, 0x58, 0x97, 0xea, 0x2e, + 0x2a, 0x23, 0x28, 0xee, 0x03, 0xc9, 0x7f, 0x2f, 0x3f, 0x4d, 0x20, 0xa8, 0xe7, 0x30, + 0x24, 0xc5, 0x50, 0x8e, 0xee, 0xbd, 0x3a, 0x12, 0x67, 0x31, 0xcd, 0xbf, 0x21, 0xfd, + 0xad, 0xb1, 0x4b, 0x4e, 0x59, 0x1c, 0xba, 0xb1, 0x44, 0xbe, 0xc3, 0x5a, 0x72, 0xac, + 0xbf, 0x94, 0x84, 0xf4, 0x7a, 0x10, 0xb9, 0x1e, 0xfc, 0x04, 0x27, 0xfe, 0xcf, 0x3f, + 0xfc, 0xf1, 0x69, 0xd7, 0x00, 0x59, 0xb4, 0x02, 0x79, 0xff, 0xa0, 0x2c, 0x51, 0x06, + 0x74, 0x27, 0xa0, 0xda, 0xea, 0xd6, 0xf9, 0x4b, 0xaf, 0xe4, 0xc1, 0x23, 0x3a, 0x22, + 0x25, 0xeb, 0x56, 0x00, 0x3f, 0xc3, 0x85, 0x42, 0x0d, 0x5a, 0x9f, 0xf3, 0xd5, 0x91, + 0x55, 0x23, 0xa0, 0x8c, 0x87, 0xeb, 0x2e, 0xa6, 0x69, 0x17, 0x23, 0x3a, 0x73, 0x25, + 0xfe, 0x79, 0x3f, 0x41, 0x07, 0x6d, 0x64, 0x25, 0x5a, 0xbd, 0x15, 0x21, 0x47, 0x66, + 0x60, 0xe9, 0x5e, 0xc1, 0x08, 0x1e, 0x14, 0x79, 0x33, 0x94, 0xfd, 0xc0, 0xf4, 0xdc, + 0x57, 0xfa, 0xff, 0xc5, 0xb3, 0x31, 0xe9, 0x3a, 0x82, 0x15, 0x1e, 0xba, 0xbb, 0x77, + 0x86, 0x9c, 0x32, 0xae, 0x6e, 0x88, 0x1b, 0xbc, 0xd3, 0x6f, 0x9f, 0xde, 0x0b, 0x35, + 0x38, 0xb2, 0xf8, 0xac, 0x5f, 0xf9, 0xb2, 0x17, 0x1a, 0xbc, 0xe1, 0x11, 0x7c, 0x4b, + 0xec, 0xa7, 0xe8, 0x7e, 0x87, 0x84, 0x23, 0x93, 0x20, 0x89, 0xe0, 0x0c, 0x06, 0xfc, + 0xee, 0x78, 0x40, 0x0d, 0xe6, 0xce, 0x30, 0x7a, 0x7d, 0xb5, 0xf0, 0x0e, 0xc5, 0xaf, + 0x91, 0x66, 0x96, 0x1b, 0xdc, 0x5d, 0x4a, 0xea, 0x19, 0xd9, 0x2a, 0x1a, 0xf7, 0x4b, + 0xa3, 0x98, 0x67, 0x3e, 0xed, 0x9c, 0x76, 0xe3, 0x38, 0x10, 0x49, 0x47, 0x18, 0xd0, + 0x5d, 0xdf, 0xdc, 0x00, 0x7a, 0x54, 0xbc, 0xd1, 0xcc, 0x4c, 0x97, 0x40, 0xf7, 0xe5, + 0x3a, 0x31, 0x68, 0x1d, 0x2b, 0x2c, 0x6e, 0xde, 0x79, 0x28, 0x11, 0x49, 0xea, 0xc3, + 0x0f, 0x6e, 0xe5, 0x83, 0x60, 0x5a, 0xc2, 0xff, 0xae, 0xc1, 0x55, 0x00, 0x35, 0xdc, + 0x5a, 0xbb, 0x35, 0x89, 0x44, 0x68, 0xf1, 0x2d, 0x5d, 0x08, 0xd7, 0x34, 0x36, 0xa8, + 0x59, 0xe5, 0x50, 0x7f, 0xdd, 0x1a, 0x46, 0x38, 0xfb, 0xe6, 0x81, 0xb0, 0xa0, 0xef, + 0xfb, 0xbb, 0xf7, 0x4c, 0x99, 0x39, 0x9d, 0xca, 0x69, 0x02, 0xa0, 0x74, 0xc8, 0x33, + 0x35, 0x60, 0x7a, 0x0c, 0x0d, 0xb0, 0x1c, 0xa3, 0xca, 0x2f, 0xa8, 0x18, 0x57, 0x24, + 0x02, 0xe2, 0xfa, 0xef, 0xb3, 0x07, 0xbe, 0x22, 0xc7, 0xd5, 0x61, 0x1f, 0xf6, 0xfb, + 0x5a, 0x31, 0xb4, 0x62, 0x16, 0x59, 0xd8, 0x4d, 0x8a, 0x7a, 0x1a, 0xdc, 0xa2, 0xfc, + 0x4e, 0xb8, 0xb8, 0x97, 0x04, 0x43, 0x93, 0x27, 0x64, 0x46, 0x31, 0xa7, 0xbb, 0xc1, + 0xa8, 0x41, 0xf3, 0x65, 0x83, 0x0d, 0x27, 0xc8, 0xaa, 0x4d, 0x75, 0xc8, 0x07, 0x87, + 0xbd, 0x10, 0xb7, 0x14, 0xcb, 0x97, 0x9c, 0x1b, 0x0f, 0x3f, 0x0b, 0x41, 0xee, 0x94, + 0x22, 0x94, 0x24, 0x8c, 0x48, 0x5c, 0xf9, 0x9c, 0x6b, 0xc4, 0x63, 0x20, 0x7a, 0xf3, + 0x83, 0x61, 0x97, 0x83, 0x57, 0x41, 0x41, 0x5d, 0xe6, 0x1f, 0xf2, 0x9f, 0xad, 0xc3, + 0xc5, 0xbd, 0xd7, 0x71, 0x45, 0x10, 0x39, 0x11, 0x41, 0x07, 0x09, 0x70, 0x6b, 0xe2, + 0x81, 0xf7, 0x38, 0xc7, 0x81, 0x04, 0xb7, 0x86, 0x94, 0x5c, 0xfd, 0x62, 0x04, 0xfb, + 0xb9, 0xfd, 0x8a, 0x0c, 0xa2, 0xc0, 0xe5, 0x3f, 0x21, 0x22, 0xd5, 0x40, 0xea, 0xa1, + 0x1e, 0x5e, 0x71, 0x1c, 0xee, 0x3a, 0x5f, 0x92, 0x64, 0x16, 0xc0, 0xbd, 0x5d, 0xa1, + 0xae, 0x65, 0x69, 0xd3, 0xa4, 0xee, 0x0d, 0xa1, 0xac, 0xa5, 0xc3, 0x7b, 0x91, 0xf8, + 0xa7, 0x76, 0xdc, 0x88, 0xe7, 0x36, 0x33, 0x02, 0xd6, 0xb6, 0x3d, 0xd3, 0x7c, 0x1e, + 0xe6, 0x37, 0x56, 0x91, 0x3c, 0x63, 0x0e, 0x0d, 0x75, 0xf8, 0xbd, 0xbe, 0xf7, 0xab, + 0x5d, 0xdc, 0x61, 0xce, 0x96, 0xeb, 0x18, 0xaf, 0x66, 0xdc, 0xc7, 0x44, 0x35, 0xb7, + 0x9e, 0xcf, 0xd4, 0x3c, 0x3b, 0xf1, 0x0e, 0x7f, 0x2c, 0x8d, 0x4d, 0xe3, 0xa7, 0x36, + 0x38, 0x77, 0x63, 0x9f, 0x5b, 0x0f, 0x2c, 0xef, 0xff, 0xa2, 0xfb, 0xbe, 0xa4, 0x84, + 0x08, 0x73, 0xd0, 0x24, 0x15, 0xc7, 0x9a, 0x60, 0xfb, 0x25, 0xfe, 0x07, 0x6c, 0x27, + 0x29, 0x81, 0x42, 0x2d, 0x84, 0x53, 0x5c, 0xf3, 0x05, 0xce, 0xcc, 0x78, 0x56, 0xa4, + 0xd4, 0x8a, 0x6d, 0xec, 0x17, 0xa2, 0x4b, 0x6d, 0x27, 0xfe, 0x26, 0x64, 0xbc, 0x2b, + 0x2b, 0x71, 0x1d, 0x67, 0x13, 0x90, 0x6c, 0xed, 0x8a, 0x80, 0x66, 0x62, 0x18, 0x40, + 0xd9, 0x0c, 0x23, 0xae, 0x33, 0x77, 0x30, 0x67, 0x9d, 0x2c, 0xde, 0x32, 0x69, 0xab, + 0x1f, 0x42, 0xac, 0x03, 0xff, 0xdb, 0xa0, 0x32, 0xd3, 0x2c, 0xa8, 0x79, 0x63, 0x82, + 0x56, 0x56, 0x5d, 0xe1, 0xd2, 0xde, 0x39, 0xf5, 0x6f, 0x94, 0x57, 0x95, 0xd6, 0xe9, + 0x58, 0xe6, 0x93, 0xdc, 0x8c, 0xbf, 0x6d, 0x04, 0x30, 0x00, 0xcc, 0x7a, 0x40, 0x15, + 0xf0, 0x2d, 0x0f, 0xe3, 0x97, 0xec, 0x57, 0xf8, 0xfe, 0x29, 0x2e, 0x85, 0x14, 0x24, + 0xe8, 0x40, 0x6d, 0x38, 0xdd, 0xb8, 0xd1, 0xde, 0x9d, 0xef, 0x67, 0x2e, 0x92, 0x7d, + 0x3d, 0xc1, 0xf4, 0x11, 0xdc, 0x78, 0xad, 0xa7, 0x61, 0x00, 0x91, 0xbf, 0xe2, 0x63, + 0xcd, 0x79, 0x96, 0xd1, 0x80, 0x5e, 0xe4, 0x91, 0xe9, 0x95, 0x91, 0xd6, 0xef, 0xdb, + 0x2e, 0x3c, 0x79, 0x71, 0x57, 0x41, 0xd0, 0xd4, 0x72, 0xac, 0x11, 0xdb, 0x78, 0x64, + 0x4f, 0x3d, 0x23, 0xe5, 0x8f, 0x0b, 0x01, 0xa8, 0x61, 0xe0, 0x85, 0x65, 0x53, 0x52, + 0x07, 0xcd, 0x5e, 0x71, 0x0f, 0xc3, 0x3e, 0xb2, 0xf8, 0x92, 0x8b, 0xc7, 0xd4, 0x01, + 0x7e, 0x4e, 0x56, 0xc0, 0xc2, 0xeb, 0x95, 0x85, 0xd6, 0x99, 0x74, 0x5e, 0x3b, 0xb9, + 0x61, 0x8b, 0x2c, 0x1b, 0x90, 0xf2, 0x35, 0x1b, 0xaf, 0x27, 0x6a, 0x70, 0x17, 0xb0, + 0xfc, 0xfa, 0xcb, 0x52, 0xea, 0x27, 0x31, 0x95, 0xa8, 0xde, 0xe1, 0x67, 0x79, 0x13, + 0xc7, 0x86, 0xcc, 0x3a, 0xcb, 0x06, 0xa9, 0xec, 0x7a, 0x37, 0xb0, 0x58, 0x98, 0x0c, + 0xeb, 0x3c, 0x82, 0xaa, 0xb0, 0x3e, 0xaf, 0xc1, 0xbb, 0x88, 0xcf, 0x7a, 0xb7, 0x98, + 0xf1, 0x65, 0x1d, 0x67, 0xbf, 0x22, 0x30, 0xd5, 0x34, 0xec, 0x55, 0x23, 0x1d, 0x21, + 0x31, 0x7b, 0x1c, 0xb3, 0x0b, 0x3c, 0x38, 0xff, 0x8d, 0x21, 0x1b, 0x76, 0x36, 0x70, + 0x2a, 0x25, 0xca, 0x7c, 0xa1, 0xbf, 0xf1, 0xf2, 0xc1, 0x58, 0xc6, 0xef, 0x22, 0x13, + 0xff, 0xab, 0xb9, 0xc0, 0x9f, 0x5c, 0x47, 0xe7, 0x3b, 0xbe, 0xbb, 0xd3, 0x7f, 0x3d, + 0x3e, 0xbc, 0x24, 0xa6, 0x65, 0xb2, 0x9f, 0x10, 0xde, 0x8b, 0x9c, 0xf1, 0x94, 0x2d, + 0x90, 0xb4, 0xc3, 0x1d, 0x89, 0xa9, 0x88, 0x3b, 0xf5, 0xa0, 0x27, 0xe9, 0x20, 0xd1, + 0xb8, 0x51, 0x19, 0xf2, 0xf2, 0xf9, 0x5f, 0xd5, 0x5e, 0xda, 0x85, 0x75, 0xa4, 0xdb, + 0x62, 0x69, 0x05, 0x68, 0x1c, 0x29, 0xe8, 0xd8, 0xe7, 0x41, 0xd4, 0x20, 0xa8, 0x34, + 0x42, 0xa9, 0xd3, 0x8a, 0xf4, 0x19, 0x9e, 0xf9, 0x5c, 0xb3, 0x0b, 0xc4, 0x4e, 0x93, + 0xfe, 0x4d, 0x0e, 0xb7, 0x42, 0x22, 0xfc, 0x10, 0xac, 0x8d, 0x40, 0x0e, 0x10, 0xed, + 0x4e, 0x56, 0xfa, 0x39, 0xda, 0x01, 0x2a, 0xc1, 0x8d, 0xee, 0x4d, 0x99, 0x42, 0x5c, + 0x8f, 0x71, 0x4c, 0x51, 0xac, 0x0c, 0x59, 0x91, 0x7e, 0xcc, 0x66, 0x65, 0xdc, 0x88, + 0x56, 0x97, 0x02, 0xe2, 0xad, 0xb0, 0x0a, 0xe8, 0xd4, 0x2d, 0x7b, 0x76, 0x57, 0xaa, + 0xa3, 0x63, 0xa4, 0x7b, 0x55, 0x98, 0x0b, 0xe0, 0x13, 0xb9, 0x13, 0xa6, 0x41, 0x15, + 0xbc, 0xcb, 0xbb, 0x2e, 0xcc, 0x89, 0x81, 0x55, 0x21, 0xe5, 0x6e, 0x07, 0xc8, 0x8b, + 0xbb, 0x4a, 0x55, 0xe9, 0x94, 0x5d, 0x03, 0xdb, 0x2d, 0xa0, 0xfc, 0xae, 0x3c, 0x64, + 0xe2, 0xcd, 0x3c, 0x99, 0xb6, 0x28, 0xe4, 0x49, 0x43, 0x29, 0xa8, 0x56, 0x1d, 0x6b, + 0xd9, 0x20, 0x82, 0x55, 0x23, 0x24, 0x60, 0x9c, 0x23, 0xcb, 0x62, 0xe1, 0x45, 0x57, + 0x50, 0x23, 0x54, 0x43, 0x22, 0xbe, 0x0f, 0xb2, 0x49, 0xbf, 0xd3, 0xc5, 0xe7, 0xfb, + 0x38, 0x37, 0xe9, 0xff, 0x21, 0x35, 0x07, 0x3a, 0xe1, 0x36, 0x0d, 0xcf, 0xaf, 0x5f, + 0xb6, 0x78, 0x56, 0x8f, 0xd8, 0x4d, 0x99, 0xa5, 0x1f, 0x32, 0xeb, 0x94, 0xcc, 0xf5, + 0xf2, 0x39, 0x02, 0x5b, 0x2b, 0x97, 0xbe, 0xf6, 0x25, 0xdb, 0xb6, 0x7f, 0x20, 0xc3, + 0xe0, 0xd9, 0x51, 0x73, 0x12, 0x9c, 0x06, 0x37, 0x50, 0x39, 0x52, 0x13, 0x41, 0x49, + 0x24, 0xe0, 0xa3, 0xfd, 0xd3, 0x66, 0xff, 0xd4, 0x69, 0xc9, 0xeb, 0xea, 0x79, 0xfb, + 0x76, 0xaf, 0x10, 0xea, 0x45, 0xb5, 0x66, 0xf1, 0xfc, 0x92, 0xaf, 0x48, 0xce, 0xe2, + 0x11, 0xf8, 0xe1, 0xb0, 0x58, 0xfb, 0x72, 0x1a, 0x8b, 0x22, 0xce, 0x43, 0x0c, 0x54, + 0x94, 0x0e, 0x24, 0xb3, 0x30, 0x8e, 0x57, 0x0a, 0xb8, 0x57, 0x25, 0x0d, 0x10, 0xcd, + 0xec, 0xe1, 0x05, 0x07, 0x1b, 0xc8, 0x66, 0xea, 0x4d, 0x6d, 0x5c, 0x69, 0xf9, 0x59, + 0x28, 0xf3, 0x9f, 0x7f, 0x1f, 0xcd, 0xf1, 0x1c, 0xeb, 0x26, 0xf2, 0x6f, 0x39, 0x37, + 0x29, 0xab, 0x9a, 0x5f, 0xd0, 0x7c, 0x52, 0x7b, 0xb8, 0x3d, 0xda, 0xbe, 0x32, 0xed, + 0x67, 0xc5, 0x1f, 0x18, 0xdb, 0xaf, 0x27, 0xa5, 0x12, 0x03, 0xb1, 0x6a, 0x28, 0xae, + 0x4b, 0xb3, 0xa4, 0x73, 0xa1, 0x8d, 0xd3, 0x74, 0x3d, 0x88, 0x7e, 0xac, 0x54, 0x8e, + 0xb7, 0xca, 0x4d, 0x46, 0x15, 0x7c, 0x62, 0xb7, 0x29, 0xf3, 0x66, 0xa9, 0x56, 0x02, + 0x28, 0x5a, 0x61, 0xdf, 0x37, 0x7b, 0x32, 0xa6, 0xb9, 0x7e, 0xc7, 0x75, 0xbb, 0xc8, + 0x8a, 0xc5, 0xfa, 0xe9, 0xd6, 0x2b, 0xca, 0xfe, 0x47, 0x1b, 0x10, 0x97, 0xd1, 0xc2, + 0x1b, 0xdb, 0x5d, 0x82, 0xa9, 0xb1, 0x60, 0x0f, 0x12, 0x15, 0x98, 0x2b, 0xc9, 0xf7, + 0xb5, 0xb5, 0x55, 0xb5, 0xbc, 0x04, 0x0b, 0x3d, 0x99, 0xa4, 0xba, 0x9b, 0x9a, 0x19, + 0xa9, 0x21, 0x60, 0x6c, 0x56, 0x20, 0xc1, 0x67, 0x14, 0xb5, 0xc0, 0x49, 0x9c, 0x73, + 0x4d, 0x2a, 0xbb, 0x9f, 0x80, 0xe2, 0xbc, 0x92, 0xda, 0x93, 0xd7, 0x51, 0x88, 0xaf, + 0xb0, 0x08, 0x4d, 0xb0, 0x38, 0x8e, 0xdf, 0xde, 0xa2, 0x33, 0xb8, 0x90, 0x82, 0x6c, + 0x93, 0xb9, 0xf0, 0xc3, 0xdd, 0xd3, 0x2f, 0x7b, 0xec, 0xb2, 0xd7, 0x7d, 0x79, 0xa1, + 0x61, 0x8a, 0x79, 0xf7, 0x3c, 0x45, 0x9b, 0x0d, 0xf5, 0x29, 0x7f, 0x8e, 0xab, 0xd6, + 0xed, 0x06, 0xfd, 0x23, 0x40, 0xe8, 0x60, 0x0a, 0x95, 0xd7, 0x2c, 0xef, 0xd1, 0x2e, + 0x62, 0x2c, 0x57, 0xb4, 0x57, 0xa4, 0xe8, 0x39, 0x75, 0x93, 0x74, 0x6a, 0x6b, 0xcf, + 0x04, 0xc4, 0x9c, 0x6d, 0xd4, 0xa3, 0x36, 0x68, 0xda, 0x53, 0x8d, 0x90, 0x93, 0xa4, + 0x50, 0xa4, 0xd8, 0x24, 0x51, 0xb6, 0x12, 0xff, 0x54, 0x70, 0x73, 0x8e, 0x62, 0xbf, + 0xdf, 0xc7, 0x9b, 0x3e, 0x31, 0xbb, 0x47, 0xfc, 0xa1, 0xe9, 0x87, 0x22, 0xa5, 0x98, + 0x3a, 0xff, 0xe5, 0xf6, 0x32, 0x84, 0x0b, 0x92, 0x3a, 0xb5, 0x6b, 0x1d, 0xa1, 0x53, + 0xd3, 0x5d, 0x82, 0x23, 0x24, 0xe7, 0xd5, 0x6d, 0x61, 0x3c, 0x73, 0xeb, 0xc6, 0x34, + 0x1e, 0xa0, 0x3b, 0xee, 0x3a, 0xb9, 0x73, 0xe8, 0x4d, 0x8f, 0xfc, 0x4a, 0x7c, 0x58, + 0x13, 0x83, 0xe2, 0x14, 0x2d, 0x29, 0x2a, 0x58, 0x0b, 0x6d, 0x30, 0x83, 0x43, 0xdc, + 0xf1, 0xef, 0x49, 0x29, 0xa9, 0xe3, 0xe6, 0x15, 0x32, 0xfc, 0xff, 0xb7, 0x4d, 0x30, + 0x19, 0xf4, 0xe2, 0xd6, 0xd3, 0x11, 0x78, 0x57, 0x5a, 0xca, 0x94, 0x12, 0x99, 0x22, + 0x50, 0x44, 0xe1, 0xd3, 0x7b, 0xab, 0x9f, 0x10, 0xe2, 0x9f, 0xd9, 0x6f, 0x9c, 0xf6, + 0x84, 0xaf, 0x98, 0xed, 0x64, 0x8b, 0x83, 0xd6, 0x1e, 0x52, 0x5b, 0xe3, 0x2c, 0xdb, + 0x45, 0x3d, 0x2d, 0x38, 0x93, 0x5f, 0xee, 0xb3, 0x22, 0xce, 0xb9, 0xd2, 0xa2, 0xe9, + 0x5e, 0xb7, 0xfc, 0x61, 0x2d, 0x89, 0xf4, 0xcf, 0xe8, 0x93, 0x22, 0x8e, 0x88, 0x28, + 0xb1, 0x89, 0x00, 0x90, 0x45, 0x62, 0x90, 0x75, 0xc0, 0xc2, 0x03, 0x9d, 0x5a, 0x73, + 0x32, 0xfd, 0xbc, 0xd7, 0xc7, 0xb0, 0x91, 0x01, 0x5c, 0x45, 0x69, 0xa3, 0x00, 0x53, + 0x23, 0x56, 0xbb, 0xad, 0x08, 0xff, 0xa3, 0xbb, 0x16, 0x7a, 0x3e, 0xbe, 0xb4, 0x62, + 0x66, 0xb7, 0x06, 0x06, 0x49, 0x4a, 0xda, 0xe9, 0x14, 0x9e, 0x1a, 0x64, 0xc0, 0xa0, + 0xaa, 0x5d, 0xaa, 0x53, 0x62, 0xd3, 0xc7, 0xa8, 0x96, 0xfd, 0x52, 0x78, 0x08, 0xd0, + 0xa3, 0xc1, 0xcf, 0x70, 0x61, 0xba, 0x67, 0x89, 0x39, 0x80, 0x78, 0x85, 0x0b, 0xe4, + 0xb9, 0x94, 0x0e, 0x01, 0xae, 0xbb, 0x93, 0x6d, 0xd8, 0x1a, 0x31, 0x82, 0x04, 0x28, + 0x1d, 0x43, 0x97, 0x6f, 0x4e, 0x0f, 0xa2, 0x07, 0xe4, 0xbe, 0x1f, 0xb8, 0x2c, 0x91, + 0xbb, 0x26, 0x42, 0xf7, 0x36, 0x85, 0x6d, 0xcd, 0x5a, 0xeb, 0x75, 0xc5, 0x0a, 0xf2, + 0x00, 0xe1, 0x4b, 0xe5, 0xb7, 0x8c, 0xe6, 0x9a, 0x88, 0x51, 0x54, 0xef, 0xe3, 0x0e, + 0xdd, 0x09, 0xae, 0x8c, 0x5e, 0xb5, 0x3f, 0x4b, 0x8b, 0x7c, 0x75, 0x35, 0x37, 0x3c, + 0x0f, 0xe6, 0xcf, 0xe4, 0x48, 0xa9, 0xb9, 0xf4, 0xd9, 0xe3, 0x10, 0x93, 0x03, 0xd6, + 0xce, 0xe9, 0x10, 0x6a, 0xa2, 0x2b, 0xd5, 0x9a, 0xe0, 0xe0, 0x27, 0xd3, 0x25, 0x6a, + 0x75, 0xb9, 0xc5, 0xd6, 0x07, 0x09, 0x09, 0x97, 0x53, 0xce, 0x57, 0x2c, 0x9e, 0x29, + 0xdc, 0x92, 0x56, 0x2d, 0x1c, 0x3f, 0x4a, 0x0b, 0x4d, 0x36, 0xa6, 0xfe, 0xc2, 0x1b, + 0xa4, 0x94, 0x17, 0x3e, 0x44, 0xd7, 0x9b, 0xc2, 0x34, 0x18, 0x95, 0xbd, 0x0c, 0x70, + 0x96, 0xf0, 0x97, 0x4f, 0x12, 0x67, 0xfe, 0xf6, 0x72, 0x1d, 0x58, 0xb8, 0xc4, 0xe3, + 0x34, 0xf1, 0x4d, 0x86, 0xc0, 0xee, 0x3b, 0xc4, 0x59, 0x8c, 0x62, 0x64, 0xf9, 0x3a, + 0x18, 0x7b, 0x1d, 0x4d, 0xcc, 0xd1, 0x8f, 0x7c, 0x1e, 0x62, 0x0b, 0xef, 0xa5, 0x8f, + 0x97, 0xc1, 0x2c, 0xd6, 0xd7, 0x3e, 0xe4, 0x5c, 0xbb, 0x3d, 0x5a, 0xe8, 0x9d, 0x5d, + 0xe3, 0x24, 0x09, 0x10, 0xc5, 0x9c, 0x36, 0xec, 0x8f, 0xe9, 0x9b, 0x32, 0x49, 0x16, + 0x30, 0xab, 0x35, 0xb1, 0x24, 0x53, 0x1d, 0x9c, 0x29, 0xe0, 0x46, 0xc4, 0x78, 0xe6, + 0x2a, 0xc0, 0xc0, 0xe9, 0x7a, 0x70, 0x87, 0xab, 0x69, 0x4a, 0x63, 0x15, 0x87, 0x04, + 0xbf, 0x6f, 0x97, 0x23, 0xdb, 0xc4, 0xc7, 0x09, 0x18, 0xe8, 0x90, 0x4b, 0x22, 0x4d, + 0xce, 0x84, 0x95, 0x77, 0xa4, 0x88, 0xb7, 0x61, 0x3d, 0x4c, 0x39, 0x3c, 0xac, 0x21, + 0x05, 0xb2, 0x8f, 0xd0, 0x46, 0x7a, 0x0b, 0xf0, 0x23, 0xf0, 0x0d, 0x1a, 0x17, 0xf6, + 0x53, 0xcd, 0xb6, 0xb5, 0xa8, 0x3e, 0x4c, 0xf1, 0x5c, 0x34, 0x7b, 0x34, 0xb9, 0x7f, + 0xbf, 0xe6, 0xea, 0xee, 0x13, 0xbb, 0x90, 0x15, 0x3a, 0xfd, 0xc9, 0x11, 0x26, 0x37, + 0xfa, 0xd1, 0xcf, 0xe1, 0x7e, 0xdd, 0xcb, 0x0c, 0x81, 0x9e, 0x60, 0xd3, 0x50, 0x39, + 0x34, 0x9b, 0x69, 0xf7, 0xca, 0x9b, 0xa6, 0x4d, 0xf9, 0xf5, 0xe4, 0x71, 0x11, 0x5c, + 0xd6, 0x00, 0x7a, 0xb4, 0x60, 0x09, 0xb0, 0x9b, 0x05, 0x00, 0x01, 0x12, 0x63, 0xbb, + 0x71, 0xe7, 0xcd, 0x2a, 0x39, 0x6c, 0x8a, 0xd5, 0x21, 0xf4, 0xf8, 0xa6, 0xc8, 0xff, + 0x85, 0x98, 0xd2, 0x4b, 0x51, 0x23, 0x2a, 0x99, 0x38, 0x56, 0x5d, 0x0f, 0x68, 0x3e, + 0x9f, 0x3a, 0x53, 0x36, 0x4a, 0xcc, 0x69, 0x21, 0xa3, 0x5b, 0xc5, 0x99, 0x10, 0xbb, + 0x71, 0xfb, 0x58, 0xb8, 0x67, 0x37, 0x3c, 0xe9, 0x5f, 0x19, 0x84, 0x09, 0xaa, 0xef, + 0x97, 0xf4, 0x01, 0xe4, 0x33, 0x00, 0x4b, 0x99, 0x19, 0x04, 0x9f, 0x93, 0x7f, 0xd7, + 0x76, 0xc4, 0xb6, 0x31, 0xa5, 0x91, 0x2a, 0x08, 0xd4, 0x9f, 0xdf, 0x65, 0x28, 0xf8, + 0x1a, 0x6f, 0x32, 0x00, 0x09, 0x37, 0x67, 0xbb, 0x77, 0x89, 0xd9, 0x5a, 0x75, 0x03, + 0x0a, 0xc1, 0xd2, 0x4c, 0x2c, 0x75, 0xbd, 0x60, 0x38, 0x25, 0x52, 0x86, 0x3f, 0x09, + 0x8d, 0x36, 0xbd, 0x48, 0x33, 0x28, 0x3d, 0x3a, 0x2d, 0x21, 0x5d, 0x10, 0xc7, 0xff, + 0xe9, 0xc8, 0x40, 0x37, 0x23, 0x14, 0x45, 0x58, 0x33, 0x29, 0x26, 0x16, 0x74, 0x19, + 0x3b, 0xdd, 0x1c, 0x64, 0x81, 0xbe, 0xf9, 0xf2, 0x26, 0xe1, 0xe6, 0x0b, 0xb1, 0xc7, + 0x76, 0xa4, 0xbe, 0x7d, 0xc6, 0x9b, 0x44, 0x30, 0xa7, 0x5a, 0x0c, 0xbd, 0x55, 0x86, + 0x7a, 0x6f, 0x46, 0xff, 0x93, 0x03, 0xf9, 0xa2, 0x9b, 0x6f, 0x3f, 0x7c, 0x7a, 0x9c, + 0x9f, 0xbc, 0xf7, 0x47, 0xb2, 0x3f, 0x95, 0xe3, 0xd9, 0xf8, 0x16, 0xad, 0xd5, 0x24, + 0xa2, 0x3f, 0x75, 0x6e, 0xfa, 0x72, 0x9d, 0x29, 0x89, 0xe7, 0xea, 0x8e, 0x99, 0xa9, + 0xa0, 0x54, 0xf4, 0x87, 0xc7, 0x86, 0x91, 0xd8, 0x72, 0x9b, 0x5d, 0x16, 0x04, 0xa6, + 0xb3, 0x79, 0x52, 0xfb, 0x7c, 0x18, 0xb6, 0x62, 0xb7, 0xbc, 0x5c, 0xb2, 0x32, 0x91, + 0x45, 0x80, 0x3c, 0x6b, 0xfb, 0x82, 0xe2, 0x9a, 0xb0, 0x29, 0x9f, 0x91, 0x7a, 0x34, + 0x34, 0xab, 0x8e, 0xf5, 0xd8, 0xe6, 0xb0, 0x0a, 0xcc, 0xa2, 0xa2, 0x96, 0x35, 0xff, + 0xa2, 0x04, 0x78, 0xa6, 0x6f, 0xae, 0x1b, 0x8d, 0x05, 0x19, 0xe6, 0xbc, 0x3e, 0x26, + 0x0f, 0xf8, 0x57, 0x13, 0x54, 0x10, 0xe6, 0xad, 0xb6, 0x65, 0xf9, 0x9f, 0xa4, 0xca, + 0xfa, 0xc2, 0xe0, 0xf2, 0xc0, 0xf1, 0x34, 0xbd, 0xba, 0x83, 0x81, 0xc2, 0xbb, 0xac, + 0x43, 0x33, 0x2a, 0xcd, 0xcb, 0x10, 0x08, 0x2e, 0x15, 0x94, 0x9e, 0xee, 0x28, 0x48, + 0x9c, 0xfa, 0xd7, 0x3f, 0xc5, 0x31, 0x22, 0xbc, 0xaf, 0x3c, 0x4f, 0xb1, 0x07, 0x7a, + 0xde, 0x45, 0xbf, 0x79, 0xf1, 0x68, 0xa6, 0x2e, 0x54, 0x1d, 0xf1, 0x3e, 0x75, 0xb9, + 0x89, 0xbc, 0x15, 0x1d, 0xb9, 0x7f, 0x28, 0xc1, 0x02, 0x81, 0x2c, 0xb3, 0x3d, 0xb1, + 0x1d, 0x59, 0x1a, 0x60, 0x9a, 0xbf, 0xf9, 0xa2, 0x00, 0xd3, 0x4e, 0x1a, 0xc6, 0x3a, + 0x38, 0x30, 0xfe, 0xb1, 0x52, 0x5f, 0x63, 0x6b, 0x98, 0xba, 0x53, 0x43, 0x3f, 0xf1, + 0x02, 0x44, 0x28, 0x34, 0x1b, 0x39, 0x1d, 0xd2, 0x09, 0x6a, 0xf3, 0x9a, 0xd1, 0xf3, + 0x6a, 0x1d, 0xd6, 0x37, 0x50, 0x3c, 0x92, 0x97, 0xa9, 0xf3, 0x75, 0xdb, 0xdd, 0x65, + 0xee, 0x98, 0xf0, 0xde, 0x4f, 0x6c, 0xc2, 0x46, 0x6d, 0x42, 0x3f, 0xa1, 0x5f, 0xbc, + 0xdf, 0xa3, 0xc9, 0x9b, 0x9a, 0x39, 0x70, 0xee, 0x74, 0x28, 0x59, 0x16, 0x94, 0x01, + 0x48, 0xb1, 0xdf, 0xbc, 0xd3, 0x07, 0x56, 0xd4, 0x8d, 0xfd, 0xe0, 0xb0, 0x88, 0x61, + 0x55, 0x4f, 0x96, 0xe8, 0xe8, 0xc4, 0x45, 0x1f, 0x5b, 0x42, 0x29, 0xdf, 0xcf, 0x11, + 0xa5, 0x1d, 0xbf, 0xbd, 0x59, 0xdb, 0x48, 0x3d, 0x5c, 0x01, 0xb6, 0xe2, 0xc3, 0x4d, + 0x3c, 0xf2, 0x47, 0xd4, 0xeb, 0x6c, 0xad, 0x41, 0x67, 0x33, 0xad, 0xfd, 0xcc, 0x87, + 0x08, 0xdd, 0xe8, 0x3e + ], + }, + TestVector { + description: "NU5 transaction #5", + version: 5, + lock_time: 3965571124, + expiry_height: 71596958, + txid: [ + 0xb6, 0x59, 0xcf, 0x8c, 0x59, 0x4b, 0x52, 0xfb, 0x76, 0xe8, 0x05, 0xd9, 0xd7, 0x01, + 0x57, 0x9a, 0x75, 0x18, 0x73, 0x94, 0x0e, 0xbc, 0x76, 0x5b, 0x3b, 0x59, 0xbd, 0x55, + 0xf0, 0x1f, 0x09, 0x43 + ], + is_coinbase: 0, + has_sapling: 1, + has_orchard: 0, + transparent_inputs: 3, + transparent_outputs: 2, + tx: vec![ + 0x05, 0x00, 0x00, 0x80, 0x0a, 0x27, 0xa7, 0x26, 0xb4, 0xd0, 0xd6, 0xc2, 0x34, 0xd0, + 0x5d, 0xec, 0x9e, 0x7b, 0x44, 0x04, 0x03, 0x5a, 0xda, 0xc4, 0x1a, 0x6d, 0x23, 0xa2, + 0x24, 0xa0, 0x4f, 0xdc, 0x0d, 0x96, 0x73, 0x87, 0x98, 0x0f, 0x95, 0xe6, 0x27, 0xe6, + 0xb3, 0xdc, 0xe1, 0x9c, 0xaf, 0x01, 0x09, 0x84, 0x8c, 0xa9, 0xda, 0xea, 0x2e, 0x24, + 0x6e, 0x08, 0x52, 0x63, 0x6a, 0x52, 0x65, 0x53, 0x53, 0x51, 0x18, 0xf1, 0xf6, 0xef, + 0x97, 0x6e, 0x4a, 0x31, 0xa0, 0xe4, 0x14, 0x3c, 0x43, 0x60, 0xd8, 0xb1, 0x79, 0xb3, + 0x0e, 0x4b, 0xfa, 0x7e, 0x16, 0x1b, 0x1e, 0x6c, 0x70, 0x7d, 0x8e, 0xae, 0x76, 0x28, + 0x71, 0x59, 0x21, 0x94, 0x1e, 0x78, 0x54, 0xe1, 0x03, 0x51, 0x51, 0x52, 0x58, 0xc4, + 0x3f, 0xe6, 0xc4, 0x45, 0x29, 0xf6, 0x61, 0x4b, 0x58, 0x41, 0x61, 0x5d, 0x3e, 0x4e, + 0x77, 0xfb, 0x09, 0xa6, 0xf0, 0x20, 0xe0, 0xb8, 0x32, 0x28, 0xac, 0x17, 0x55, 0xad, + 0x47, 0x71, 0x16, 0xde, 0xca, 0xac, 0x51, 0x7b, 0xfb, 0xcf, 0x03, 0x6a, 0x63, 0x53, + 0x99, 0xe0, 0x07, 0xeb, 0x02, 0xae, 0xd7, 0x98, 0xbc, 0x73, 0xe6, 0x01, 0x00, 0x08, + 0x6a, 0x6a, 0xac, 0x63, 0x52, 0x65, 0x53, 0x51, 0xbe, 0x31, 0x8e, 0x90, 0x9b, 0xbe, + 0x06, 0x00, 0x06, 0x00, 0xac, 0x65, 0x65, 0x53, 0x65, 0x02, 0xae, 0x0b, 0x07, 0x77, + 0xab, 0x68, 0x77, 0x9a, 0x13, 0x2d, 0x7e, 0xbc, 0x77, 0x84, 0x01, 0xea, 0x77, 0xc4, + 0xa9, 0xe9, 0x8c, 0xfc, 0xe6, 0x4a, 0xe2, 0xc5, 0x69, 0x0b, 0x11, 0xe5, 0x3f, 0x72, + 0xfd, 0xf6, 0x33, 0x01, 0x8d, 0x21, 0x7c, 0x58, 0x8c, 0x52, 0x98, 0x6f, 0xc5, 0x24, + 0xe7, 0x97, 0x97, 0xab, 0x65, 0x58, 0x43, 0xc2, 0x61, 0xae, 0x7f, 0xc9, 0xcc, 0x3f, + 0x47, 0x05, 0x46, 0x00, 0xe4, 0xcd, 0x38, 0x5c, 0x46, 0x7a, 0x78, 0x8a, 0x9f, 0xff, + 0xc3, 0x7e, 0x9d, 0xdb, 0xb5, 0xd3, 0xe8, 0xa4, 0xbd, 0x0c, 0x4e, 0x8f, 0x56, 0xe5, + 0x69, 0x5a, 0xfa, 0x90, 0xfe, 0x50, 0xce, 0x0a, 0x95, 0x59, 0x34, 0x09, 0x8f, 0x10, + 0x0f, 0xe1, 0xc6, 0x1a, 0xf3, 0xb0, 0x88, 0xf4, 0x52, 0x8f, 0xc8, 0x8c, 0x17, 0xbd, + 0xd5, 0xc1, 0xf5, 0xb2, 0x13, 0x23, 0x53, 0x22, 0x83, 0x71, 0xe5, 0xbd, 0x04, 0x50, + 0x55, 0x06, 0x35, 0xaa, 0x21, 0x58, 0x18, 0xf7, 0xf5, 0x03, 0x78, 0x90, 0xf0, 0x53, + 0x23, 0x3f, 0x9a, 0xa5, 0x0a, 0xe2, 0x9c, 0x05, 0x56, 0xc3, 0x6d, 0x67, 0xb2, 0x64, + 0x7e, 0x54, 0xeb, 0xe7, 0x58, 0x8e, 0x1f, 0x02, 0xb3, 0xc7, 0x17, 0xdf, 0x02, 0x98, + 0x43, 0x0e, 0xc9, 0xd2, 0xbb, 0x11, 0x4b, 0x35, 0x42, 0xb7, 0x5d, 0x01, 0x0d, 0x93, + 0x4e, 0x58, 0x96, 0xe1, 0xd2, 0xd1, 0x02, 0x4a, 0x36, 0x3a, 0x1a, 0x19, 0x50, 0x1b, + 0xec, 0x45, 0x5c, 0x71, 0x65, 0xc6, 0x95, 0x96, 0x65, 0xbe, 0x88, 0x47, 0x66, 0x09, + 0x19, 0x52, 0x55, 0x8e, 0xa8, 0xc0, 0x87, 0xe2, 0x88, 0xe3, 0xc3, 0x8b, 0x54, 0xbb, + 0x33, 0x58, 0x75, 0x13, 0xc4, 0x2e, 0x03, 0xb5, 0x2c, 0xeb, 0x9a, 0x19, 0x57, 0xa9, + 0xe9, 0x05, 0x84, 0x72, 0x37, 0xce, 0x44, 0x56, 0xe5, 0x33, 0x50, 0x68, 0x26, 0x49, + 0x0e, 0xd7, 0x38, 0xc5, 0xe9, 0x99, 0x4e, 0x8c, 0x12, 0xcc, 0xcf, 0x39, 0x86, 0xc5, + 0x86, 0x18, 0x8b, 0xd9, 0x08, 0x98, 0x52, 0xdf, 0x68, 0xd6, 0xad, 0x2d, 0x23, 0xae, + 0xa8, 0x0d, 0xb3, 0x96, 0x0a, 0xc0, 0x80, 0x44, 0x51, 0x18, 0x80, 0x1a, 0xc1, 0x0d, + 0xc0, 0xf5, 0x78, 0x8f, 0x47, 0x86, 0x69, 0x34, 0xb9, 0x8a, 0xad, 0xb9, 0xc6, 0x8d, + 0xd8, 0x84, 0x83, 0xc1, 0x5d, 0x47, 0xaf, 0x8f, 0xf4, 0x2e, 0x6b, 0xfb, 0xb8, 0xe0, + 0xe5, 0x3a, 0x04, 0x7e, 0x58, 0xe5, 0xba, 0x90, 0xd1, 0xdb, 0x1e, 0xa1, 0x26, 0x01, + 0x7c, 0x65, 0x6d, 0x01, 0x1c, 0x68, 0x7b, 0xb0, 0x4f, 0x47, 0xa5, 0x60, 0xef, 0x7c, + 0xed, 0x23, 0x1b, 0x24, 0x38, 0x7f, 0xf4, 0x01, 0x90, 0x43, 0xcf, 0xfd, 0x67, 0xfb, + 0x9d, 0x89, 0x20, 0x06, 0xc3, 0x91, 0x7f, 0xd7, 0xa9, 0x6f, 0xe0, 0x3d, 0x7b, 0xea, + 0xa2, 0x17, 0x12, 0x8d, 0x71, 0xf0, 0xa2, 0x8a, 0x83, 0x78, 0x7a, 0x86, 0xcf, 0xc9, + 0x33, 0x69, 0xd0, 0xdd, 0x54, 0x65, 0x32, 0x7f, 0xc4, 0x29, 0x4d, 0xae, 0x81, 0xc4, + 0x35, 0x1c, 0x42, 0xa6, 0xf0, 0xa8, 0x0e, 0xef, 0xa6, 0x1d, 0xb6, 0xa4, 0x0b, 0xb6, + 0x81, 0xf5, 0x58, 0xf8, 0x1b, 0x10, 0x1e, 0xb6, 0x57, 0xf6, 0x57, 0x27, 0xd6, 0x17, + 0x69, 0x1b, 0x8b, 0xee, 0x3a, 0xa7, 0xe5, 0x75, 0xb4, 0x11, 0xa0, 0x12, 0x8a, 0x3f, + 0x24, 0x75, 0x3e, 0x52, 0xee, 0x34, 0x90, 0x04, 0xcf, 0x6d, 0x25, 0xfa, 0xd6, 0xc4, + 0x68, 0x1b, 0x02, 0xa2, 0xe1, 0x96, 0x14, 0xe8, 0x0c, 0x95, 0x83, 0x81, 0x36, 0x2a, + 0x91, 0xd3, 0xcd, 0x3b, 0x4e, 0x76, 0x58, 0x32, 0x94, 0x31, 0x0c, 0x82, 0x41, 0x11, + 0x29, 0xac, 0x97, 0xf2, 0xad, 0x5a, 0x5b, 0x9f, 0xa8, 0x64, 0xa9, 0xc5, 0xd0, 0x2d, + 0x8c, 0x92, 0xd6, 0x42, 0x44, 0xfa, 0x6c, 0x40, 0x9c, 0x21, 0x69, 0x48, 0x62, 0xc4, + 0x42, 0x7d, 0xc5, 0x1a, 0xec, 0x57, 0x7f, 0x6e, 0xa3, 0x38, 0x05, 0x03, 0x13, 0x99, + 0x91, 0xe6, 0xe8, 0x89, 0x09, 0x87, 0x64, 0x9f, 0xa7, 0xc4, 0x3a, 0xc8, 0x03, 0xf6, + 0x89, 0xb6, 0x9d, 0x70, 0xab, 0xd7, 0xef, 0xa7, 0x1c, 0xf9, 0xa0, 0xf2, 0xa4, 0x1d, + 0xf9, 0x41, 0x89, 0x76, 0xa4, 0xff, 0xa4, 0x4f, 0x43, 0x75, 0x92, 0xf1, 0x9c, 0x09, + 0xcb, 0x49, 0x31, 0xb3, 0xd3, 0xcd, 0x01, 0x59, 0x31, 0xcf, 0xfa, 0xe1, 0x71, 0xe0, + 0x8a, 0xc5, 0x92, 0x88, 0x61, 0xfc, 0xc3, 0x2e, 0x08, 0x81, 0x15, 0x59, 0x76, 0x49, + 0x66, 0xbe, 0xbc, 0x14, 0x14, 0x36, 0xb9, 0x17, 0xc5, 0x27, 0x1b, 0x2c, 0x68, 0x0c, + 0xdc, 0x50, 0x2c, 0xba, 0xd5, 0x27, 0xac, 0x08, 0x7b, 0x34, 0x65, 0x6f, 0x75, 0x5d, + 0xfb, 0xf0, 0xae, 0x5a, 0xed, 0xc8, 0x09, 0x85, 0xf6, 0x3d, 0x0c, 0xa4, 0x4a, 0x76, + 0x2f, 0x9b, 0x31, 0x1f, 0x15, 0x6d, 0xe6, 0x27, 0x74, 0x19, 0x19, 0x99, 0x8e, 0x67, + 0x44, 0x66, 0xc7, 0x77, 0x25, 0xfa, 0x04, 0xc4, 0x9e, 0xb1, 0x87, 0xfb, 0xf7, 0x5e, + 0x5f, 0x7c, 0xee, 0x26, 0x1e, 0x30, 0x75, 0xc2, 0xb2, 0xc2, 0x81, 0x2f, 0xe8, 0x32, + 0x32, 0xc4, 0x1a, 0x5f, 0x10, 0xf4, 0x0b, 0x91, 0x1e, 0xbc, 0xeb, 0xb7, 0x8c, 0x91, + 0xc2, 0x0b, 0x82, 0xc0, 0x05, 0x0f, 0xe2, 0xee, 0x10, 0x4b, 0x39, 0x20, 0xed, 0x0a, + 0x05, 0xd1, 0x7b, 0x06, 0x0d, 0x99, 0xd5, 0x87, 0x01, 0x98, 0xe6, 0x3c, 0xcf, 0x51, + 0xb1, 0x5d, 0xf8, 0x0e, 0x87, 0xac, 0xbd, 0x30, 0x12, 0x6c, 0xda, 0x2a, 0xff, 0xb8, + 0xf1, 0xce, 0xcb, 0x1b, 0xaa, 0x6a, 0x91, 0x9e, 0x0a, 0x97, 0x87, 0x91, 0x39, 0x69, + 0x04, 0x44, 0x9a, 0xde, 0x4b, 0x0b, 0x02, 0x92, 0x0f, 0xb8, 0xc0, 0xbf, 0x7f, 0xc0, + 0x82, 0xeb, 0x74, 0x98, 0x73, 0xc1, 0x0d, 0x17, 0xdb, 0xd9, 0x1f, 0xfe, 0xa9, 0x36, + 0x10, 0xee, 0xea, 0x62, 0x57, 0x90, 0xad, 0xa2, 0x8e, 0x3a, 0x2c, 0xf2, 0x2c, 0x0d, + 0x4e, 0xa2, 0xb9, 0x26, 0x41, 0xf2, 0x16, 0xd3, 0x92, 0x2c, 0x1f, 0xc3, 0x2d, 0xbc, + 0x1e, 0x0e, 0x99, 0x00, 0x38, 0x6c, 0xf8, 0x98, 0xcb, 0x8e, 0xd5, 0x6c, 0x06, 0x4e, + 0x5b, 0x12, 0xb0, 0x26, 0xbf, 0x03, 0x5d, 0xfb, 0xc4, 0xeb, 0x92, 0xce, 0x33, 0xf8, + 0x2b, 0xbe, 0x48, 0xca, 0x94, 0x5f, 0x12, 0x44, 0x83, 0x10, 0xd7, 0xb9, 0xdb, 0x85, + 0xf1, 0xb0, 0x46, 0xdc, 0x9c, 0x56, 0x51, 0x2f, 0x61, 0xe0, 0xa3, 0x96, 0x6f, 0xa4, + 0xab, 0x71, 0xd1, 0x5f, 0x4e, 0x23, 0xe4, 0xe3, 0x1c, 0xb9, 0x62, 0x10, 0x60, 0x14, + 0xc4, 0xc2, 0x9e, 0xc3, 0xb9, 0x10, 0xe0, 0x72, 0x2d, 0xac, 0x38, 0xaa, 0x4d, 0xc8, + 0x1e, 0x17, 0x6d, 0x72, 0xfe, 0xaf, 0x2f, 0x93, 0xf9, 0xec, 0xd5, 0x04, 0xcb, 0xaf, + 0x95, 0x59, 0x83, 0x30, 0x09, 0xd9, 0x2c, 0x9d, 0x2f, 0x81, 0x68, 0x7b, 0xf5, 0x89, + 0xa4, 0x93, 0x66, 0xcd, 0x0a, 0xba, 0xe7, 0xa1, 0x74, 0xa4, 0x8f, 0xf7, 0x6c, 0xd7, + 0x2f, 0x02, 0xb1, 0x8a, 0xf8, 0x18, 0x75, 0xc7, 0x90, 0xc1, 0x40, 0x20, 0x8e, 0xc6, + 0x2a, 0x84, 0x98, 0x8e, 0x7c, 0x54, 0x97, 0x52, 0x88, 0xf5, 0x08, 0xd5, 0x02, 0xbb, + 0x7f, 0xad, 0xbc, 0x3e, 0xe8, 0x80, 0x6d, 0x6d, 0x59, 0xe3, 0xa6, 0x94, 0xf7, 0x85, + 0x70, 0xfd, 0x19, 0x56, 0xcb, 0x22, 0x7c, 0x65, 0x00, 0x01, 0xf2, 0x7f, 0x94, 0x23, + 0xf4, 0xed, 0x12, 0x56, 0x0b, 0x2e, 0x1c, 0x8d, 0xbc, 0xb4, 0xc3, 0x02, 0x15, 0xb2, + 0x16, 0x3a, 0x49, 0x29, 0x95, 0xef, 0xda, 0xd8, 0xc0, 0xe7, 0x6c, 0xc9, 0x98, 0x9f, + 0x56, 0x69, 0x23, 0x6f, 0xf9, 0x39, 0xb5, 0x8a, 0x7f, 0x47, 0x1b, 0x93, 0x00, 0x59, + 0x04, 0xc9, 0x9c, 0x48, 0xec, 0x6d, 0xed, 0x50, 0xa3, 0xf5, 0x1b, 0xc9, 0xe0, 0x17, + 0x07, 0xbf, 0x57, 0x95, 0x6f, 0x01, 0xb7, 0xda, 0x7c, 0x23, 0xe6, 0x93, 0x52, 0x06, + 0x57, 0x28, 0x6f, 0xe7, 0x3e, 0xee, 0x9e, 0xb1, 0xd5, 0x83, 0x75, 0x22, 0x03, 0xf3, + 0xd9, 0x2b, 0xd4, 0x04, 0x7b, 0x83, 0xfd, 0x38, 0xf5, 0x66, 0xdd, 0x25, 0xb9, 0x6d, + 0x11, 0xb7, 0x22, 0x2b, 0x67, 0x82, 0xda, 0xde, 0xf5, 0xee, 0x78, 0x82, 0x14, 0x7c, + 0xbb, 0x4f, 0xcf, 0xe7, 0x0d, 0x2c, 0xa7, 0xf3, 0x9a, 0x29, 0x7b, 0x21, 0xd5, 0x6d, + 0x66, 0x10, 0xe9, 0xda, 0x9d, 0x8e, 0xef, 0xdc, 0x69, 0x9e, 0x4a, 0x30, 0x06, 0x8a, + 0x14, 0x57, 0xcf, 0x5e, 0xaf, 0x69, 0x87, 0x78, 0x21, 0xd3, 0x9e, 0xa0, 0x85, 0x94, + 0xc2, 0xfb, 0x9e, 0xb9, 0xd8, 0x04, 0x64, 0x50, 0xe4, 0x13, 0x03, 0xf1, 0x95, 0xbd, + 0xc9, 0x05, 0xe4, 0xf2, 0x58, 0x3c, 0x6a, 0xe3, 0x86, 0x1b, 0x87, 0x19, 0xbb, 0xce, + 0xd1, 0xce, 0x58, 0xc4, 0x68, 0x81, 0x6d, 0x45, 0x15, 0xe6, 0x09, 0x7b, 0x3e, 0x2e, + 0x81, 0x82, 0x21, 0x0f, 0x6c, 0x1b, 0xb3, 0xaa, 0xa6, 0x2a, 0xe0, 0xf6, 0x9f, 0x79, + 0xfc, 0xc5, 0x47, 0xba, 0xab, 0x31, 0x1d, 0x99, 0x7c, 0x84, 0x95, 0xd6, 0xab, 0xe3, + 0xa5, 0x1f, 0x56, 0x53, 0xf3, 0x1c, 0x5a, 0x2e, 0xea, 0x8d, 0x31, 0x90, 0x97, 0xf3, + 0x04, 0x5e, 0x6c, 0x3c, 0x3d, 0x8c, 0x87, 0xc9, 0xbd, 0x55, 0xb4, 0x19, 0x2e, 0xbf, + 0x00, 0xff, 0x8f, 0xc7, 0xf4, 0x1e, 0x18, 0x93, 0x0a, 0x99, 0x72, 0xa3, 0x4d, 0x9e, + 0x6a, 0xa9, 0xd9, 0x1d, 0x2e, 0x28, 0x17, 0xeb, 0x6d, 0xe9, 0xba, 0x38, 0x9e, 0x69, + 0xaa, 0x51, 0x2f, 0x3f, 0xb4, 0xdf, 0xf8, 0xca, 0x1c, 0xe7, 0xc9, 0xca, 0x39, 0x6e, + 0x8a, 0x9d, 0x99, 0xd4, 0x96, 0x51, 0xb0, 0x58, 0x2f, 0xc5, 0x86, 0xce, 0x92, 0x7e, + 0xa2, 0x64, 0x5b, 0xda, 0xa3, 0x79, 0x28, 0x6f, 0x95, 0xd3, 0x9b, 0x95, 0x81, 0xde, + 0xb2, 0xc5, 0x37, 0x75, 0xae, 0xef, 0x20, 0xe7, 0xbd, 0xbc, 0x3b, 0x19, 0xd8, 0x9b, + 0xac, 0xee, 0xa1, 0x3b, 0x74, 0xe6, 0xc7, 0xf5, 0x20, 0x89, 0x39, 0x7d, 0x11, 0x6e, + 0xbf, 0xac, 0x6a, 0x30, 0xed, 0x27, 0xd6, 0x27, 0x81, 0xa0, 0x3b, 0x66, 0xb0, 0x52, + 0xf7, 0x51, 0xfb, 0x36, 0x88, 0x2b, 0x9a, 0x14, 0x34, 0x23, 0xad, 0x02, 0xf3, 0x36, + 0x0a, 0xfa, 0x54, 0xc4, 0xcf, 0x23, 0x53, 0x0c, 0x68, 0xd6, 0x0e, 0x99, 0x56, 0x1c, + 0xce, 0x0d, 0x6a, 0x9c, 0x32, 0xef, 0xc7, 0x1f, 0xef, 0xaf, 0x23, 0x57, 0x86, 0x3f, + 0xa0, 0xb9, 0xf7, 0xbe, 0x76, 0xc2, 0xd1, 0xd3, 0x88, 0x49, 0xa0, 0x0a, 0xb0, 0x41, + 0xf1, 0x82, 0xad, 0x63, 0x35, 0xe9, 0x55, 0xcc, 0x65, 0xcd, 0xfd, 0x3b, 0x69, 0x1a, + 0x3d, 0x96, 0xc4, 0xbd, 0x56, 0xf5, 0x25, 0xce, 0xdb, 0x7f, 0xdc, 0xb7, 0x33, 0xe7, + 0x67, 0x06, 0x2f, 0xd8, 0xa4, 0xef, 0x1a, 0x4b, 0x71, 0x5e, 0x5e, 0xdf, 0x76, 0x26, + 0x14, 0x4e, 0x28, 0x5f, 0x2b, 0x3c, 0x4e, 0x2c, 0xb4, 0x1b, 0x7d, 0xb9, 0x66, 0x35, + 0x82, 0xad, 0x65, 0xa5, 0x41, 0x6e, 0x57, 0xf7, 0x48, 0x5f, 0x39, 0xc0, 0x5e, 0x8e, + 0x7a, 0xf9, 0x6b, 0x36, 0x78, 0xc8, 0x0a, 0x8d, 0x4b, 0xa2, 0xf9, 0x5d, 0x5f, 0xeb, + 0x0c, 0xcb, 0x0f, 0x71, 0x7b, 0x9d, 0xb7, 0x24, 0xab, 0xf4, 0xcc, 0xd4, 0x10, 0x49, + 0x00, 0x18, 0x6f, 0x4a, 0x93, 0x0d, 0x4b, 0x2a, 0xcb, 0x9f, 0x9a, 0x16, 0xaf, 0x89, + 0x77, 0x27, 0x7d, 0x6f, 0x0b, 0xc9, 0x0a, 0xb8, 0x59, 0xc3, 0x33, 0x3b, 0x3d, 0xe8, + 0x6f, 0x41, 0xfa, 0x85, 0xd5, 0x70, 0xf1, 0x6c, 0x74, 0x82, 0x0a, 0x70, 0x41, 0xfe, + 0xa1, 0x5e, 0xe9, 0x50, 0xc3, 0x30, 0xac, 0xa3, 0xf1, 0xe5, 0x1c, 0x69, 0x44, 0x74, + 0x72, 0xf2, 0x6a, 0x3d, 0x67, 0x41, 0xbc, 0x67, 0xe9, 0x2e, 0x00, 0xa0, 0x83, 0xb6, + 0x95, 0x33, 0x03, 0xb3, 0x73, 0x1c, 0xf2, 0x84, 0x8d, 0x81, 0x7c, 0xeb, 0x77, 0xf1, + 0xcc, 0xa7, 0x1e, 0xc9, 0x13, 0x91, 0x20, 0x2b, 0x73, 0x4d, 0x54, 0x8f, 0xa3, 0x14, + 0x2c, 0x37, 0xe6, 0xfc, 0xac, 0x51, 0x92, 0xfc, 0xa2, 0x8d, 0x63, 0x98, 0x1f, 0x67, + 0xdd, 0xdc, 0x28, 0xb3, 0x1f, 0xd0, 0xb9, 0x3a, 0x7f, 0x21, 0x88, 0xc1, 0xec, 0xa2, + 0xc1, 0xef, 0xa4, 0x61, 0xd2, 0xdd, 0x73, 0x38, 0xdf, 0x07, 0x05, 0xae, 0x70, 0x10, + 0x62, 0xfb, 0xcd, 0x8d, 0x50, 0x29, 0x98, 0x85, 0xd8, 0xe3, 0xd4, 0xfb, 0xd6, 0xa4, + 0xf2, 0x15, 0x5d, 0xc8, 0xd8, 0xfd, 0x0b, 0x05, 0x8f, 0x3c, 0x77, 0x50, 0x83, 0xf5, + 0x96, 0x12, 0xac, 0x66, 0x02, 0xd9, 0xad, 0xfa, 0x49, 0xe2, 0x60, 0x2a, 0x12, 0xf2, + 0x90, 0x0d, 0x22, 0xb9, 0x9c, 0x0b, 0x8a, 0x0a, 0x25, 0x95, 0x1b, 0xfd, 0xf5, 0x05, + 0x00, 0x9e, 0x50, 0x0b, 0xa8, 0x77, 0x44, 0xb8, 0xc1, 0xd6, 0xd7, 0x26, 0x37, 0xa8, + 0x4e, 0xe2, 0x78, 0xa6, 0xb2, 0x41, 0xda, 0x80, 0x4f, 0xde, 0x4e, 0x23, 0x5e, 0xf0, + 0x93, 0x65, 0xaa, 0xb6, 0x03, 0x30, 0x04, 0xfe, 0xd7, 0x12, 0xb4, 0xde, 0x15, 0xad, + 0x5f, 0x01, 0x71, 0xad, 0x51, 0xed, 0xfa, 0x54, 0xdb, 0xd4, 0x8b, 0x1f, 0xcc, 0x5e, + 0xf6, 0xac, 0x73, 0xcf, 0x0a, 0x28, 0xe9, 0xd9, 0x3e, 0x0c, 0xaf, 0xad, 0x88, 0x16, + 0x76, 0x1b, 0x3b, 0xe6, 0x38, 0x39, 0x8c, 0x00, 0x14, 0x33, 0x38, 0xea, 0x27, 0xa9, + 0xff, 0xf2, 0x2e, 0xc4, 0x73, 0x16, 0x36, 0x96, 0x12, 0x25, 0xca, 0x49, 0xe0, 0x13, + 0xa6, 0xdc, 0x80, 0x2b, 0xc7, 0xfb, 0x77, 0xca, 0xd1, 0x0a, 0xca, 0xfe, 0xfc, 0xe5, + 0xfa, 0x9a, 0x37, 0x35, 0x63, 0xb3, 0x91, 0x7a, 0x3a, 0x37, 0x39, 0xcc, 0x97, 0x80, + 0xea, 0x81, 0x50, 0x73, 0xde, 0x8e, 0xb4, 0x2e, 0x3f, 0x66, 0x93, 0xe8, 0x52, 0xbe, + 0xfd, 0xde, 0xdd, 0x61, 0x91, 0x29, 0xd0, 0xaa, 0x13, 0xc4, 0xbd, 0x83, 0x86, 0x22, + 0xb5, 0xe3, 0x28, 0x56, 0x35, 0x8e, 0x6d, 0x82, 0x78, 0x78, 0x95, 0x7e, 0x5d, 0x15, + 0x6a, 0x5f, 0x46, 0x6d, 0xed, 0x00, 0x3f, 0x79, 0x11, 0x87, 0x6a, 0xae, 0x40, 0xeb, + 0x9e, 0xcc, 0x76, 0xe3, 0x8b, 0x69, 0x04, 0x22, 0xab, 0xce, 0xfb, 0x38, 0x4a, 0x13, + 0x82, 0x0d, 0x04, 0x7f, 0x9f, 0x9e, 0x66, 0x0f, 0xfa, 0xa7, 0x71, 0x80, 0xcb, 0xa2, + 0x6f, 0x90, 0xda, 0x00, 0x7c, 0xda, 0x40, 0x57, 0xa6, 0xce, 0xa2, 0xe2, 0x6b, 0xfd, + 0xe5, 0x0a, 0x09, 0x20, 0x11, 0x9d, 0xf7, 0x29, 0x2c, 0x8c, 0x28, 0x47, 0x65, 0x0f, + 0xbf, 0x42, 0x80, 0x57, 0x12, 0x8a, 0x02, 0x04, 0x0e, 0xb3, 0xe3, 0x2d, 0xb5, 0x0c, + 0xa7, 0xd8, 0xda, 0x7f, 0xf4, 0xc4, 0xa7, 0xa0, 0xe9, 0xcf, 0x4b, 0x65, 0x2b, 0x65, + 0x3d, 0x42, 0x8f, 0x83, 0xf4, 0x85, 0x33, 0x57, 0x84, 0x1b, 0x28, 0x13, 0x80, 0x55, + 0xb9, 0x13, 0x81, 0x17, 0x79, 0x0a, 0x91, 0xe2, 0x8f, 0xaa, 0x41, 0x2f, 0xd7, 0xd0, + 0x73, 0x32, 0x56, 0x73, 0x44, 0x85, 0xd1, 0xd6, 0xd1, 0xa9, 0x8c, 0xc2, 0xd7, 0xc8, + 0x2b, 0x37, 0x9e, 0x60, 0x72, 0x5d, 0x31, 0x8c, 0x14, 0x77, 0xce, 0x49, 0x6c, 0x95, + 0x86, 0x31, 0x08, 0xa1, 0xc7, 0xe4, 0xf0, 0x20, 0x0b, 0x7a, 0x3c, 0x08, 0x8d, 0xe7, + 0x7e, 0xb4, 0xbc, 0x95, 0xa1, 0xc6, 0xc8, 0x39, 0xd7, 0x5f, 0xab, 0x59, 0x40, 0xd3, + 0x07, 0x94, 0x24, 0xd5, 0x23, 0xd6, 0xd9, 0xa4, 0x6b, 0xe5, 0x4e, 0x18, 0xf5, 0x29, + 0xdc, 0x9e, 0x56, 0x77, 0x6c, 0x5e, 0xc4, 0x51, 0xce, 0x28, 0x07, 0x9d, 0x37, 0x82, + 0x6a, 0xec, 0x40, 0x97, 0xca, 0x7a, 0xee, 0xc8, 0x08, 0x3f, 0xf5, 0xc4, 0x29, 0x56, + 0x9f, 0x91, 0x53, 0xf6, 0x96, 0xbe, 0x62, 0xbd, 0x38, 0xa3, 0xe7, 0x27, 0xa6, 0x8a, + 0xcc, 0xdf, 0xab, 0x02, 0x9b, 0x0b, 0x21, 0xe6, 0xd0, 0xcd, 0x46, 0xa7, 0x7e, 0x40, + 0x89, 0x00, 0x07, 0x39, 0x1c, 0x17, 0x8b, 0x39, 0xe5, 0x43, 0xf2, 0xba, 0xc0, 0xc9, + 0x0e, 0x54, 0xad, 0x2c, 0x8a, 0xd0, 0x1f, 0x4b, 0xdc, 0x02, 0xd8, 0x64, 0x35, 0xff, + 0x00, 0x16, 0xb8, 0xcf, 0x95, 0xe6, 0x78, 0xfb, 0x71, 0x09, 0x47, 0xcf, 0xd9, 0x04, + 0x3e, 0xc5, 0xe6, 0x0d, 0x3d, 0x4e, 0xd1, 0x1b, 0x3c, 0x95, 0xdb, 0xa0, 0xd2, 0xf6, + 0xac, 0x67, 0xa6, 0x89, 0x0b, 0x34, 0x42, 0x88, 0xa1, 0xc3, 0xa1, 0xc9, 0xa0, 0x87, + 0xcd, 0x10, 0x7d, 0xf3, 0xbe, 0x99, 0x87, 0xcd, 0xb1, 0x1c, 0xd1, 0x30, 0xf1, 0x6f, + 0x37, 0x0d, 0x73, 0xd5, 0xf2, 0x11, 0x1d, 0x45, 0x86, 0x0e, 0x9f, 0xca, 0x0e, 0xae, + 0x7d, 0x70, 0xd9, 0xee, 0x8e, 0x49, 0x2a, 0xa6, 0x4f, 0x1c, 0x4c, 0x2f, 0x76, 0x8d, + 0xcb, 0xf6, 0x99, 0x49, 0x8c, 0x1e, 0x4a, 0x61, 0x77, 0x80, 0xde, 0xcb, 0x06, 0x26, + 0xd4, 0x70, 0x94, 0x9c, 0xb8, 0xd9, 0x3e, 0xfe, 0x6c, 0x5b, 0xc7, 0x91, 0xca, 0x93, + 0xb1, 0x10, 0xc1, 0x82, 0x5b, 0x6a, 0xfb, 0x04, 0x5d, 0x9d, 0x8c, 0xa3, 0x51, 0xf7, + 0xad, 0xa3, 0x28, 0xfd, 0xd5, 0x2a, 0xec, 0x29, 0x77, 0xd2, 0x94, 0x0e, 0x2c, 0xdc, + 0xb2, 0x66, 0x4d, 0x78, 0xb7, 0x6a, 0xc0, 0xe0, 0x6d, 0x78, 0x8e, 0x57, 0xf8, 0x24, + 0x4f, 0x44, 0x2c, 0x88, 0x6a, 0x8f, 0x31, 0x13, 0x7c, 0xd7, 0xf1, 0x9e, 0x82, 0x21, + 0xa3, 0x85, 0xcb, 0xfb, 0x3f, 0x7f, 0x2a, 0x1e, 0x79, 0x50, 0x4b, 0xcf, 0x1a, 0xe0, + 0x83, 0xb1, 0x29, 0x02, 0xa5, 0x01, 0x2c, 0xd5, 0xea, 0x2f, 0xc8, 0x56, 0x43, 0xdd, + 0xec, 0xee, 0xf4, 0xab, 0x95, 0x93, 0x43, 0x21, 0x9b, 0x0c, 0x63, 0xdd, 0x0a, 0x8b, + 0x0e, 0x23, 0x3e, 0xfc, 0x68, 0xfc, 0x63, 0x30, 0x73, 0xe6, 0x6c, 0x59, 0x97, 0x5f, + 0x23, 0x52, 0x4b, 0x6a, 0xa1, 0xab, 0x9a, 0xe7, 0xb1, 0x33, 0xd5, 0xf3, 0x0c, 0xf9, + 0xe1, 0xd0, 0xf9, 0xba, 0xd7, 0x1f, 0x67, 0x3f, 0x5b, 0x75, 0x4c, 0xf4, 0x00, 0x99, + 0x77, 0x57, 0xa6, 0x45, 0x8a, 0xd3, 0xb9, 0xdc, 0x8e, 0xc0, 0xc6, 0x9c, 0x66, 0x09, + 0x66, 0x3b, 0x42, 0xbb, 0xb0, 0xca, 0x1a, 0x55, 0x73, 0x37, 0x42, 0x81, 0x1f, 0x0d, + 0x71, 0x30, 0xe0, 0x13, 0xfe, 0x2f, 0x88, 0x05, 0x8e, 0x32, 0x68, 0xa0, 0x19, 0xc0, + 0xdd, 0xf3, 0x14, 0x3e, 0x8a, 0xf4, 0x13, 0x07, 0xd9, 0x26, 0x74, 0x02, 0x13, 0x08, + 0x59, 0xee, 0x92, 0x43, 0x4d, 0x23, 0x79, 0xe9, 0x4b, 0xcb, 0xbe, 0x56, 0x1d, 0xe0, + 0x42, 0x92, 0xb5, 0x32, 0xab, 0xc3, 0x5d, 0xde, 0x53, 0xd2, 0xad, 0x86, 0x7f, 0x7a, + 0xd9, 0x42, 0x00, 0xe4, 0x8e, 0x50, 0x3e, 0x7d, 0x41, 0x6b, 0xcf, 0x98, 0x29, 0x9f, + 0x82, 0xfc, 0xba, 0xe2, 0xdc, 0x42, 0xae, 0xc1, 0x8a, 0x29, 0x3b, 0x63, 0x79, 0x5b, + 0x68, 0x63, 0xf3, 0x22, 0x49, 0xcd, 0x20, 0x5e, 0x54, 0xd7, 0xcb, 0x7c, 0x82, 0x3b, + 0x00, 0x74, 0x77, 0x35, 0x96, 0xc1, 0xc5, 0x33, 0x92, 0x1d, 0x3b, 0xae, 0x11, 0xfe, + 0x1c, 0x6b, 0xfb, 0x77, 0x74, 0xe1, 0x49, 0x88, 0x64, 0xf3, 0xb6, 0x26, 0xd4, 0xcb, + 0x14, 0x47, 0x95, 0xd8, 0xf3, 0x59, 0xf5, 0xc5, 0x5d, 0xa3, 0xd7, 0x11, 0x70, 0x4e, + 0x74, 0x29, 0x58, 0x95, 0x5e, 0xaf, 0xa4, 0xb7, 0xd0, 0x31, 0xb2, 0xd6, 0xda, 0x0c, + 0x52, 0x9d, 0x41, 0xf3, 0x16, 0x93, 0xe4, 0xe5, 0x10, 0xb6, 0xb1, 0xe4, 0xab, 0xb6, + 0x01, 0x5f, 0x0d, 0x6d, 0x12, 0x61, 0x5e, 0xc1, 0xea, 0xf2, 0x75, 0xd4, 0x62, 0x96, + 0x2f, 0x17, 0x68, 0x4a, 0x7a, 0x25, 0x30, 0x1a, 0x99, 0x55, 0x5d, 0xef, 0x47, 0x15, + 0xff, 0x62, 0xce, 0x3c, 0xa6, 0x71, 0x9e, 0x16, 0x78, 0x6f, 0x61, 0x07, 0x87, 0x56, + 0x76, 0x2c, 0xa5, 0x51, 0x2a, 0xef, 0xc8, 0x07, 0x59, 0xd6, 0xe3, 0x1c, 0x95, 0x26, + 0x71, 0x1b, 0x99, 0xb0, 0xc7, 0x8f, 0xb1, 0x98, 0x58, 0xa1, 0x11, 0xde, 0x43, 0xd7, + 0xe2, 0xd0, 0x05, 0x6c, 0x78, 0x48, 0x65, 0x69, 0x47, 0x6c, 0xd3, 0xa4, 0x47, 0x93, + 0x57, 0xf7, 0x8c, 0x29, 0xdb, 0x65, 0xd3, 0xf8, 0x8b, 0xec, 0xbb, 0xed, 0x02, 0x00 + ], + }, + ]; +} + +/// Returns reference to transaction test vectors +#[allow(missing_docs)] +pub fn get_test_vectors() -> &'static Vec { + &TEST_VECTORS +} diff --git a/zainod/Cargo.toml b/zainod/Cargo.toml index 8e3aaeb53..8e2302863 100644 --- a/zainod/Cargo.toml +++ b/zainod/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "zainod" description = "Crate containing the Zaino Indexer binary." -edition = { workspace = true } authors = { workspace = true } -license = { workspace = true } repository = { workspace = true } +homepage = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +version = { workspace = true } [[bin]] name = "zainod" @@ -14,17 +16,50 @@ path = "src/main.rs" name = "zainodlib" path = "src/lib.rs" +[features] +# Removes network restrictions. +no_tls_use_unencrypted_traffic = ["zaino-serve/no_tls_use_unencrypted_traffic"] + +# **Experimental and alpha features** +# Exposes the **complete** set of experimental / alpha features currently implemented in Zaino. +experimental_features = ["transparent_address_history_experimental"] + +# Activates transparent address history capability in zaino +# +# NOTE: currently this is only implemented in the finalised state. +transparent_address_history_experimental = [ + "zaino-state/transparent_address_history_experimental", + "zaino-serve/transparent_address_history_experimental" +] + [dependencies] -zaino-fetch = { path = "../zaino-fetch" } -zaino-serve = { path = "../zaino-serve" } +zaino-common = { workspace = true } +zaino-fetch = { workspace = true } +zaino-state = { workspace = true } +zaino-serve = { workspace = true } + +# Zebra +zebra-chain = { workspace = true } +zebra-state = { workspace = true } -# Miscellaneous Workspace +# Runtime tokio = { workspace = true, features = ["full"] } + +# CLI +clap = { workspace = true, features = ["derive"] } + +# Tracing +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["fmt", "env-filter", "time"] } + +# Network / RPC http = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +# Utility thiserror = { workspace = true } -# Miscellaneous Crate -serde = { workspace = true, features = ["derive"] } -ctrlc = { workspace = true } +# Formats toml = { workspace = true } -clap = { workspace = true, features = ["derive"] } +config = { workspace = true } +tempfile = { workspace = true } diff --git a/zainod/src/cli.rs b/zainod/src/cli.rs new file mode 100644 index 000000000..481ac2ad7 --- /dev/null +++ b/zainod/src/cli.rs @@ -0,0 +1,78 @@ +//! Command-line interface for Zaino. + +use std::path::PathBuf; + +use clap::{Parser, Subcommand}; +use zaino_common::xdg::resolve_path_with_xdg_config_defaults; + +/// Returns the default config path following XDG Base Directory spec. +/// +/// Uses `$XDG_CONFIG_HOME/zaino/zainod.toml` if set, +/// otherwise falls back to `$HOME/.config/zaino/zainod.toml`, +/// or `/tmp/zaino/.config/zaino/zainod.toml` if HOME is unset. +pub fn default_config_path() -> PathBuf { + resolve_path_with_xdg_config_defaults("zaino/zainod.toml") +} + +/// The Zcash Indexing Service. +#[derive(Parser, Debug)] +#[command( + name = "zainod", + version, + about = "Zaino - The Zcash Indexing Service", + long_about = None +)] +pub struct Cli { + /// Subcommand to execute. + #[command(subcommand)] + pub command: Command, +} + +/// Available subcommands. +#[derive(Subcommand, Debug, Clone)] +pub enum Command { + /// Start the Zaino indexer service. + Start { + /// Path to the configuration file. Defaults to $XDG_CONFIG_HOME/zaino/zainod.toml + #[arg(short, long, value_name = "FILE")] + config: Option, + }, + /// Generate a configuration file with default values. + GenerateConfig { + /// Output path for the generated config file. Defaults to $XDG_CONFIG_HOME/zaino/zainod.toml + #[arg(short, long, value_name = "FILE")] + output: Option, + }, +} + +impl Command { + /// Generate a configuration file with default values. + pub fn generate_config(output: Option) { + let path = output.unwrap_or_else(default_config_path); + + let content = match crate::config::generate_default_config() { + Ok(content) => content, + Err(e) => { + eprintln!("Error generating config: {}", e); + std::process::exit(1); + } + }; + + // Create parent directories if needed + if let Some(parent) = path.parent() { + if !parent.exists() { + if let Err(e) = std::fs::create_dir_all(parent) { + eprintln!("Error creating directory {}: {}", parent.display(), e); + std::process::exit(1); + } + } + } + + if let Err(e) = std::fs::write(&path, &content) { + eprintln!("Error writing to {}: {}", path.display(), e); + std::process::exit(1); + } + + eprintln!("Generated config file: {}", path.display()); + } +} diff --git a/zainod/src/config.rs b/zainod/src/config.rs index e366d5ade..cc0847b11 100644 --- a/zainod/src/config.rs +++ b/zainod/src/config.rs @@ -1,86 +1,1002 @@ //! Zaino config. +use std::{ + net::{IpAddr, SocketAddr}, + path::PathBuf, +}; + +use serde::{Deserialize, Serialize}; +use tracing::info; +#[cfg(feature = "no_tls_use_unencrypted_traffic")] +use tracing::warn; + use crate::error::IndexerError; +use zaino_common::{ + try_resolve_address, AddressResolution, Network, ServiceConfig, StorageConfig, ValidatorConfig, +}; +use zaino_serve::server::config::{GrpcServerConfig, JsonRpcServerConfig}; +#[allow(deprecated)] +use zaino_state::{BackendType, FetchServiceConfig, StateServiceConfig}; + +/// Header for generated configuration files. +pub const GENERATED_CONFIG_HEADER: &str = r#"# Zaino Configuration +# +# Generated with `zainod generate-config` +# +# Configuration sources are layered (highest priority first): +# 1. Environment variables (prefix: ZAINO_) +# 2. TOML configuration file +# 3. Built-in defaults +# +# For detailed documentation, see: +# https://github.com/zingolabs/zaino + +"#; -/// Config information required for Zaino. -#[derive(Debug, Clone, serde::Deserialize)] -pub struct IndexerConfig { - /// Sets the TcpIngestor's status. - pub tcp_active: bool, - /// TcpIngestors listen port - pub listen_port: Option, - /// LightWalletD listen port [DEPRECATED]. - /// Used by zingo-testutils. - pub lightwalletd_port: u16, - /// Full node / validator listen port. - pub zebrad_port: u16, - /// Full node Username. - pub node_user: Option, - /// full node Password. - pub node_password: Option, - /// Maximum requests allowed in the request queue. - pub max_queue_size: u16, - /// Maximum workers allowed in the worker pool - pub max_worker_pool_size: u16, - /// Minimum number of workers held in the workerpool when idle. - pub idle_worker_pool_size: u16, +/// Generate default configuration file content. +/// +/// Returns the full config file content including header and TOML-serialized defaults. +pub fn generate_default_config() -> Result { + let config = ZainodConfig::default(); + + let toml_content = toml::to_string_pretty(&config) + .map_err(|e| IndexerError::ConfigError(format!("Failed to serialize config: {}", e)))?; + + Ok(format!("{}{}", GENERATED_CONFIG_HEADER, toml_content)) } -impl IndexerConfig { - /// Performs checks on config data. +/// Sensitive key suffixes that should not be set via environment variables. +const SENSITIVE_KEY_SUFFIXES: [&str; 5] = ["password", "secret", "token", "cookie", "private_key"]; + +/// Checks if a key is sensitive and should not be set via environment variables. +fn is_sensitive_leaf_key(leaf_key: &str) -> bool { + let key = leaf_key.to_ascii_lowercase(); + SENSITIVE_KEY_SUFFIXES + .iter() + .any(|suffix| key.ends_with(suffix)) +} + +/// Zaino daemon configuration. +/// +/// Field order matters for TOML serialization: simple values must come before tables. +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct ZainodConfig { + // Simple values first (TOML requirement) + /// Backend type for fetching blockchain data. + pub backend: BackendType, + /// Path to Zebra's state database. /// - /// - Checks that at least 1 ingestor is active. - /// - Checks listen port is given is tcp is active. + /// Required when using the `state` backend. + pub zebra_db_path: PathBuf, + /// Network to connect to (Mainnet, Testnet, or Regtest). + pub network: Network, + + // Table sections + /// JSON-RPC server settings. Set to enable Zaino's JSON-RPC interface. + pub json_server_settings: Option, + /// gRPC server settings (listen address, TLS configuration). + pub grpc_settings: GrpcServerConfig, + /// Validator connection settings. + pub validator_settings: ValidatorConfig, + /// Service-level settings (timeout, channel size). + pub service: ServiceConfig, + /// Storage settings (cache and database). + pub storage: StorageConfig, +} + +impl ZainodConfig { + /// Performs checks on config data. pub(crate) fn check_config(&self) -> Result<(), IndexerError> { - if !self.tcp_active { - return Err(IndexerError::ConfigError( - "Cannot start server with no ingestors selected.".to_string(), - )); + // Check TLS settings. + if self.grpc_settings.tls.is_some() { + let tls = self.grpc_settings.tls.as_ref().expect("to be Some"); + + if !std::path::Path::new(&tls.cert_path).exists() { + return Err(IndexerError::ConfigError(format!( + "TLS is enabled, but certificate path {:?} does not exist.", + tls.cert_path + ))); + } + + if !std::path::Path::new(&tls.key_path).exists() { + return Err(IndexerError::ConfigError(format!( + "TLS is enabled, but key path {:?} does not exist.", + tls.key_path + ))); + } + } + + // Check validator cookie authentication settings + if let Some(ref cookie_path) = self.validator_settings.validator_cookie_path { + if !std::path::Path::new(cookie_path).exists() { + return Err(IndexerError::ConfigError(format!( + "Validator cookie authentication is enabled, but cookie path '{:?}' does not exist.", + cookie_path + ))); + } + } + + #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] + let grpc_addr = + fetch_socket_addr_from_hostname(&self.grpc_settings.listen_address.to_string())?; + + // Validate the validator address using the richer result type that distinguishes + // between format errors (always fail) and DNS lookup failures (can defer for Docker). + let validator_addr_result = + try_resolve_address(&self.validator_settings.validator_jsonrpc_listen_address); + + // Validator address validation: + // - Resolved IPs: must be private (RFC1918/ULA) + // - Hostnames: validated at connection time (supports Docker/K8s service discovery) + // - Cookie auth: determined by validator_cookie_path config, not enforced by address type + match validator_addr_result { + AddressResolution::Resolved(validator_addr) => { + if !is_private_listen_addr(&validator_addr) { + return Err(IndexerError::ConfigError( + "Zaino may only connect to Zebra with private IP addresses.".to_string(), + )); + } + } + AddressResolution::UnresolvedHostname { ref address, .. } => { + info!( + "Validator address '{}' cannot be resolved at config time.", + address + ); + } + AddressResolution::InvalidFormat { address, reason } => { + // Invalid address format - always fail immediately. + return Err(IndexerError::ConfigError(format!( + "Invalid validator address '{}': {}", + address, reason + ))); + } + } + + #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] + { + // Ensure TLS is used when connecting to external addresses. + if !is_private_listen_addr(&grpc_addr) && self.grpc_settings.tls.is_none() { + return Err(IndexerError::ConfigError( + "TLS required when connecting to external addresses.".to_string(), + )); + } + } + + #[cfg(feature = "no_tls_use_unencrypted_traffic")] + { + warn!( + "Zaino built using no_tls_use_unencrypted_traffic feature, proceed with caution." + ); } - if self.tcp_active && self.listen_port.is_none() { - return Err(IndexerError::ConfigError( - "TCP is active but no address provided.".to_string(), - )); + + // Check gRPC and JsonRPC server are not listening on the same address. + if let Some(ref json_settings) = self.json_server_settings { + if json_settings.json_rpc_listen_address == self.grpc_settings.listen_address { + return Err(IndexerError::ConfigError( + "gRPC server and JsonRPC server must listen on different addresses." + .to_string(), + )); + } } + Ok(()) } + + /// Returns the network type currently being used by the server. + pub fn get_network(&self) -> Result { + Ok(self.network.to_zebra_network()) + } } -impl Default for IndexerConfig { +impl Default for ZainodConfig { fn default() -> Self { Self { - tcp_active: true, - listen_port: Some(8080), - lightwalletd_port: 9067, - zebrad_port: 18232, - node_user: Some("xxxxxx".to_string()), - node_password: Some("xxxxxx".to_string()), - max_queue_size: 1024, - max_worker_pool_size: 32, - idle_worker_pool_size: 4, + backend: BackendType::default(), + json_server_settings: None, + grpc_settings: GrpcServerConfig { + listen_address: "127.0.0.1:8137".parse().unwrap(), + tls: None, + }, + validator_settings: ValidatorConfig { + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), + }, + service: ServiceConfig::default(), + storage: StorageConfig::default(), + zebra_db_path: default_zebra_db_path(), + network: Network::Testnet, } } } -/// Attempts to load config data from a toml file at the specified path. -pub fn load_config(file_path: &std::path::PathBuf) -> IndexerConfig { - let mut config = IndexerConfig::default(); - - if let Ok(contents) = std::fs::read_to_string(file_path) { - if let Ok(parsed_config) = toml::from_str::(&contents) { - config = IndexerConfig { - tcp_active: parsed_config.tcp_active, - listen_port: parsed_config.listen_port.or(config.listen_port), - lightwalletd_port: parsed_config.lightwalletd_port, - zebrad_port: parsed_config.zebrad_port, - node_user: parsed_config.node_user.or(config.node_user), - node_password: parsed_config.node_password.or(config.node_password), - max_queue_size: parsed_config.max_queue_size, - max_worker_pool_size: parsed_config.max_worker_pool_size, - idle_worker_pool_size: parsed_config.idle_worker_pool_size, - }; +/// Returns the default path for Zaino's ephemeral authentication cookie. +pub fn default_ephemeral_cookie_path() -> PathBuf { + zaino_common::xdg::resolve_path_with_xdg_runtime_defaults("zaino/.cookie") +} + +/// Loads the default file path for zebra's local db. +pub fn default_zebra_db_path() -> PathBuf { + zaino_common::xdg::resolve_path_with_xdg_cache_defaults("zebra") +} + +/// Resolves a hostname to a SocketAddr. +fn fetch_socket_addr_from_hostname(address: &str) -> Result { + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| IndexerError::ConfigError(format!("Invalid address '{address}': {e}"))) +} + +/// Validates that the configured `address` is either: +/// - An RFC1918 (private) IPv4 address, or +/// - An IPv6 Unique Local Address (ULA) +pub(crate) fn is_private_listen_addr(addr: &SocketAddr) -> bool { + let ip = addr.ip(); + match ip { + IpAddr::V4(ipv4) => ipv4.is_private() || ipv4.is_loopback(), + IpAddr::V6(ipv6) => ipv6.is_unique_local() || ip.is_loopback(), + } +} + +/// Loads configuration from a TOML file with optional environment variable overrides. +/// +/// Configuration is layered: Defaults → TOML file → Environment variables (prefix: ZAINO_). +/// Sensitive keys (password, secret, token, cookie, private_key) are blocked from env vars. +pub fn load_config(file_path: &std::path::Path) -> Result { + load_config_with_env(file_path, "ZAINO") +} + +/// Loads configuration with a custom environment variable prefix. +pub fn load_config_with_env( + file_path: &std::path::Path, + env_prefix: &str, +) -> Result { + // Check for sensitive keys in environment variables before loading + let required_prefix = format!("{}_", env_prefix); + for (key, _) in std::env::vars() { + if let Some(without_prefix) = key.strip_prefix(&required_prefix) { + if let Some(leaf) = without_prefix.split("__").last() { + if is_sensitive_leaf_key(leaf) { + return Err(IndexerError::ConfigError(format!( + "Environment variable '{}' contains sensitive key '{}' - use config file instead", + key, leaf + ))); + } + } } } - config + let mut builder = config::Config::builder() + .set_default("backend", "fetch") + .map_err(|e| IndexerError::ConfigError(e.to_string()))?; + + // Add TOML file source + builder = builder.add_source( + config::File::from(file_path) + .format(config::FileFormat::Toml) + .required(true), + ); + + // Add environment variable source with ZAINO_ prefix and __ separator for nesting + // Note: config-rs lowercases all env var keys after stripping the prefix + builder = builder.add_source( + config::Environment::with_prefix(env_prefix) + .prefix_separator("_") + .separator("__") + .try_parsing(true), + ); + + let settings = builder + .build() + .map_err(|e| IndexerError::ConfigError(format!("Configuration loading failed: {}", e)))?; + + let mut parsed_config: ZainodConfig = settings + .try_deserialize() + .map_err(|e| IndexerError::ConfigError(format!("Configuration parsing failed: {}", e)))?; + + // Handle empty cookie_dir: if json_server_settings exists with empty cookie_dir, set default + if parsed_config + .json_server_settings + .as_ref() + .is_some_and(|json_settings| { + json_settings + .cookie_dir + .as_ref() + .is_some_and(|dir| dir.as_os_str().is_empty()) + }) + { + if let Some(ref mut json_config) = parsed_config.json_server_settings { + json_config.cookie_dir = Some(default_ephemeral_cookie_path()); + } + } + + parsed_config.check_config()?; + info!( + "Successfully loaded and validated config. Base TOML file checked: '{}'", + file_path.display() + ); + Ok(parsed_config) +} + +#[allow(deprecated)] +impl TryFrom for StateServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + let grpc_listen_address = cfg + .validator_settings + .validator_grpc_listen_address + .as_ref() + .ok_or_else(|| { + IndexerError::ConfigError( + "Missing validator_grpc_listen_address in configuration".to_string(), + ) + })?; + + let validator_grpc_address = + fetch_socket_addr_from_hostname(grpc_listen_address).map_err(|e| { + let msg = match e { + IndexerError::ConfigError(msg) => msg, + other => other.to_string(), + }; + IndexerError::ConfigError(format!( + "Invalid validator_grpc_listen_address '{grpc_listen_address}': {msg}" + )) + })?; + + Ok(StateServiceConfig { + validator_state_config: zebra_state::Config { + cache_dir: cfg.zebra_db_path.clone(), + ephemeral: false, + delete_old_database: true, + debug_stop_at_height: None, + debug_validity_check_interval: None, + should_backup_non_finalized_state: true, + debug_skip_non_finalized_state_backup_task: false, + }, + validator_rpc_address: cfg + .validator_settings + .validator_jsonrpc_listen_address + .clone(), + validator_grpc_address, + validator_cookie_auth: cfg.validator_settings.validator_cookie_path.is_some(), + validator_cookie_path: cfg.validator_settings.validator_cookie_path, + validator_rpc_user: cfg + .validator_settings + .validator_user + .unwrap_or_else(|| "xxxxxx".to_string()), + validator_rpc_password: cfg + .validator_settings + .validator_password + .unwrap_or_else(|| "xxxxxx".to_string()), + service: cfg.service, + storage: cfg.storage, + network: cfg.network, + }) + } +} + +#[allow(deprecated)] +impl TryFrom for FetchServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + Ok(FetchServiceConfig { + validator_rpc_address: cfg.validator_settings.validator_jsonrpc_listen_address, + validator_cookie_path: cfg.validator_settings.validator_cookie_path, + validator_rpc_user: cfg + .validator_settings + .validator_user + .unwrap_or_else(|| "xxxxxx".to_string()), + validator_rpc_password: cfg + .validator_settings + .validator_password + .unwrap_or_else(|| "xxxxxx".to_string()), + service: cfg.service, + storage: cfg.storage, + network: cfg.network, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{env, sync::Mutex}; + use tempfile::TempDir; + + const ZAINO_ENV_PREFIX: &str = "ZAINO_"; + static TEST_MUTEX: Mutex<()> = Mutex::new(()); + + /// RAII guard for managing environment variables in tests. + /// Ensures test isolation by clearing ZAINO_* vars before tests + /// and restoring original values after. + struct EnvGuard { + _guard: std::sync::MutexGuard<'static, ()>, + original_vars: Vec<(String, String)>, + } + + impl EnvGuard { + fn new() -> Self { + let guard = TEST_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); + let original_vars: Vec<_> = env::vars() + .filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) + .collect(); + // Clear all ZAINO_* vars for test isolation + for (key, _) in &original_vars { + env::remove_var(key); + } + Self { + _guard: guard, + original_vars, + } + } + + fn set_var(&self, key: &str, value: &str) { + env::set_var(key, value); + } + } + + impl Drop for EnvGuard { + fn drop(&mut self) { + // Clear test vars + for (k, _) in env::vars().filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) { + env::remove_var(&k); + } + // Restore originals + for (k, v) in &self.original_vars { + env::set_var(k, v); + } + } + } + + fn create_test_config_file(dir: &TempDir, content: &str, filename: &str) -> PathBuf { + let path = dir.path().join(filename); + std::fs::write(&path, content).unwrap(); + path + } + + #[test] + fn test_deserialize_full_valid_config() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Create mock files + let cert_file = temp_dir.path().join("test_cert.pem"); + let key_file = temp_dir.path().join("test_key.pem"); + let validator_cookie_file = temp_dir.path().join("validator.cookie"); + let zaino_cookie_dir = temp_dir.path().join("zaino_cookies_dir"); + let zaino_db_dir = temp_dir.path().join("zaino_db_dir"); + let zebra_db_dir = temp_dir.path().join("zebra_db_dir"); + + std::fs::write(&cert_file, "mock cert content").unwrap(); + std::fs::write(&key_file, "mock key content").unwrap(); + std::fs::write(&validator_cookie_file, "mock validator cookie content").unwrap(); + std::fs::create_dir_all(&zaino_cookie_dir).unwrap(); + std::fs::create_dir_all(&zaino_db_dir).unwrap(); + std::fs::create_dir_all(&zebra_db_dir).unwrap(); + + let toml_content = format!( + r#" +backend = "fetch" +zebra_db_path = "{}" +network = "Mainnet" + +[storage.database] +path = "{}" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" +validator_cookie_path = "{}" +validator_user = "user" +validator_password = "password" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8000" +cookie_dir = "{}" + +[grpc_settings] +listen_address = "0.0.0.0:9000" + +[grpc_settings.tls] +cert_path = "{}" +key_path = "{}" +"#, + zebra_db_dir.display(), + zaino_db_dir.display(), + validator_cookie_file.display(), + zaino_cookie_dir.display(), + cert_file.display(), + key_file.display(), + ); + + let config_path = create_test_config_file(&temp_dir, &toml_content, "full_config.toml"); + let config = load_config(&config_path).expect("load_config failed"); + + assert_eq!(config.backend, BackendType::Fetch); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config + .json_server_settings + .as_ref() + .unwrap() + .json_rpc_listen_address, + "127.0.0.1:8000".parse().unwrap() + ); + assert_eq!(config.network, Network::Mainnet); + assert_eq!( + config.grpc_settings.listen_address, + "0.0.0.0:9000".parse().unwrap() + ); + assert!(config.grpc_settings.tls.is_some()); + assert_eq!( + config.validator_settings.validator_user, + Some("user".to_string()) + ); + assert_eq!( + config.validator_settings.validator_password, + Some("password".to_string()) + ); + } + + #[test] + fn test_deserialize_optional_fields_missing() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "state" +network = "Testnet" +zebra_db_path = "/opt/zebra/data" + +[storage.database] +path = "/opt/zaino/data" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "optional_missing.toml"); + let config = load_config(&config_path).expect("load_config failed"); + let default_values = ZainodConfig::default(); + + assert_eq!(config.backend, BackendType::State); + assert!(config.json_server_settings.is_none()); + assert_eq!( + config.validator_settings.validator_user, + default_values.validator_settings.validator_user + ); + assert_eq!( + config.storage.cache.capacity, + default_values.storage.cache.capacity + ); + } + + #[test] + fn test_cookie_dir_logic() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Scenario 1: auth enabled, cookie_dir empty (should use default ephemeral path) + let toml_content = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "s1.toml"); + let config = load_config(&config_path).expect("Config S1 failed"); + assert!(config.json_server_settings.is_some()); + assert!(config + .json_server_settings + .as_ref() + .unwrap() + .cookie_dir + .is_some()); + + // Scenario 2: auth enabled, cookie_dir specified + let toml_content2 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "/my/cookie/path" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path2 = create_test_config_file(&temp_dir, toml_content2, "s2.toml"); + let config2 = load_config(&config_path2).expect("Config S2 failed"); + assert_eq!( + config2.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/my/cookie/path")) + ); + + // Scenario 3: cookie_dir not specified (should be None) + let toml_content3 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path3 = create_test_config_file(&temp_dir, toml_content3, "s3.toml"); + let config3 = load_config(&config_path3).expect("Config S3 failed"); + assert!(config3.json_server_settings.unwrap().cookie_dir.is_none()); + } + + #[test] + fn test_deserialize_empty_string_yields_default() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Minimal valid config + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "empty.toml"); + let config = load_config(&config_path).expect("Empty TOML load failed"); + let default_config = ZainodConfig::default(); + + assert_eq!(config.network, default_config.network); + assert_eq!(config.backend, default_config.backend); + assert_eq!( + config.storage.cache.capacity, + default_config.storage.cache.capacity + ); + } + + #[test] + fn test_deserialize_invalid_backend_type() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "invalid_type" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_backend.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + if let Err(IndexerError::ConfigError(msg)) = result { + assert!( + msg.contains("unknown variant") || msg.contains("invalid_type"), + "Unexpected error message: {}", + msg + ); + } + } + + #[test] + fn test_deserialize_invalid_socket_address() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[json_server_settings] +json_rpc_listen_address = "not-a-valid-address" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_socket.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + } + + #[test] + fn test_env_override_toml_and_defaults() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_NETWORK", "Mainnet"); + guard.set_var( + "ZAINO_JSON_SERVER_SETTINGS__JSON_RPC_LISTEN_ADDRESS", + "127.0.0.1:0", + ); + guard.set_var("ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR", "/env/cookie/path"); + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "12345"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let config = load_config(&config_path).expect("load_config should succeed"); + + assert_eq!(config.network, Network::Mainnet); + assert_eq!(config.storage.cache.capacity, 12345); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/env/cookie/path")) + ); + } + + #[test] + fn test_toml_overrides_defaults() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // json_server_settings without a listening address is forbidden + let toml_content = r#" +network = "Regtest" + +[json_server_settings] +json_rpc_listen_address = "" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + assert!(load_config(&config_path).is_err()); + } + + #[test] + fn test_invalid_env_var_type() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "not_a_number"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + } + + #[test] + fn test_cookie_auth_not_forced_for_non_loopback_ip() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "fetch" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "no_cookie_auth.toml"); + let config_result = load_config(&config_path); + assert!( + config_result.is_ok(), + "Non-loopback IP without cookie auth should succeed. Error: {:?}", + config_result.err() + ); + + let config = config_result.unwrap(); + assert!(config.validator_settings.validator_cookie_path.is_none()); + } + + #[test] + fn test_public_ip_still_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "fetch" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "8.8.8.8:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "public_ip.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("private IP")); + } + } + + #[test] + fn test_sensitive_env_var_blocked() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_VALIDATOR_SETTINGS__VALIDATOR_PASSWORD", "secret123"); + + let config_path = + create_test_config_file(&temp_dir, toml_content, "sensitive_env_test.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("sensitive key")); + assert!(msg.contains("VALIDATOR_PASSWORD")); + } + } + + #[test] + fn test_sensitive_key_detection() { + assert!(is_sensitive_leaf_key("password")); + assert!(is_sensitive_leaf_key("PASSWORD")); + assert!(is_sensitive_leaf_key("validator_password")); + assert!(is_sensitive_leaf_key("VALIDATOR_PASSWORD")); + assert!(is_sensitive_leaf_key("secret")); + assert!(is_sensitive_leaf_key("api_token")); + assert!(is_sensitive_leaf_key("cookie")); + assert!(is_sensitive_leaf_key("private_key")); + + assert!(!is_sensitive_leaf_key("username")); + assert!(!is_sensitive_leaf_key("address")); + assert!(!is_sensitive_leaf_key("network")); + } + + #[test] + fn test_unknown_fields_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +unknown_field = "value" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "unknown_fields.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + } + + /// Verifies that `generate_default_config()` produces valid TOML. + /// + /// TOML requires simple values before table sections. If ZainodConfig field + /// order changes incorrectly, serialization fails with "values must be + /// emitted before tables". This test catches that regression. + #[test] + fn test_generate_default_config_produces_valid_toml() { + let content = generate_default_config().expect("should generate config"); + assert!(content.starts_with(GENERATED_CONFIG_HEADER)); + + let toml_part = content.strip_prefix(GENERATED_CONFIG_HEADER).unwrap(); + let parsed: Result = toml::from_str(toml_part); + assert!( + parsed.is_ok(), + "Generated config is not valid TOML: {:?}", + parsed.err() + ); + } + + /// Verifies config survives serialize → deserialize → serialize roundtrip. + /// + /// Catches regressions in custom serde impls (DatabaseSize, Network) and + /// ensures field ordering remains stable. If the second serialization differs + /// from the first, something is being lost or transformed during the roundtrip. + #[test] + fn test_config_roundtrip_serialize_deserialize() { + let original = ZainodConfig::default(); + + let toml_str = toml::to_string_pretty(&original).expect("should serialize"); + let roundtripped: ZainodConfig = toml::from_str(&toml_str).expect("should deserialize"); + let toml_str_again = toml::to_string_pretty(&roundtripped).expect("should serialize again"); + + assert_eq!( + toml_str, toml_str_again, + "config roundtrip should be stable" + ); + } } diff --git a/zainod/src/error.rs b/zainod/src/error.rs index ad08383b8..97c8e84cf 100644 --- a/zainod/src/error.rs +++ b/zainod/src/error.rs @@ -1,10 +1,14 @@ //! Hold error types for the Indexer and related functionality. -use zaino_fetch::jsonrpc::error::JsonRpcConnectorError; +use zaino_fetch::jsonrpsee::error::TransportError; use zaino_serve::server::error::ServerError; +#[allow(deprecated)] +use zaino_state::{FetchServiceError, StateServiceError}; + /// Zingo-Indexer errors. #[derive(Debug, thiserror::Error)] +#[allow(deprecated)] pub enum IndexerError { /// Server based errors. #[error("Server error: {0}")] @@ -12,9 +16,15 @@ pub enum IndexerError { /// Configuration errors. #[error("Configuration error: {0}")] ConfigError(String), - /// JSON RPC connector errors. - #[error("JSON RPC connector error: {0}")] - JsonRpcConnectorError(#[from] JsonRpcConnectorError), + /// JSON RPSee connector errors. + #[error("JSON RPSee connector error: {0}")] + TransportError(#[from] TransportError), + /// FetchService errors. + #[error("FetchService error: {0}")] + FetchServiceError(Box), + /// FetchService errors. + #[error("StateService error: {0}")] + StateServiceError(Box), /// HTTP related errors due to invalid URI. #[error("HTTP error: Invalid URI {0}")] HttpError(#[from] http::Error), @@ -24,4 +34,21 @@ pub enum IndexerError { /// Custom indexor errors. #[error("Misc indexer error: {0}")] MiscIndexerError(String), + /// Zaino restart signal. + #[error("Restart Zaino")] + Restart, +} + +#[allow(deprecated)] +impl From for IndexerError { + fn from(value: StateServiceError) -> Self { + IndexerError::StateServiceError(Box::new(value)) + } +} + +#[allow(deprecated)] +impl From for IndexerError { + fn from(value: FetchServiceError) -> Self { + IndexerError::FetchServiceError(Box::new(value)) + } } diff --git a/zainod/src/indexer.rs b/zainod/src/indexer.rs index 71169fc9a..cc4154f8b 100644 --- a/zainod/src/indexer.rs +++ b/zainod/src/indexer.rs @@ -1,242 +1,275 @@ -//! Zingo-Indexer implementation. - -use std::{ - net::SocketAddr, - process, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; +//! Zaino : Zingo-Indexer implementation. + +use tokio::time::Instant; +use tracing::info; -use http::Uri; -use zaino_fetch::jsonrpc::connector::test_node_and_return_uri; -use zaino_serve::server::{ - director::{Server, ServerStatus}, - error::ServerError, - AtomicStatus, StatusType, +use zaino_fetch::jsonrpsee::connector::test_node_and_return_url; +use zaino_serve::server::{config::GrpcServerConfig, grpc::TonicServer, jsonrpc::JsonRpcServer}; +#[allow(deprecated)] +use zaino_state::{ + BackendType, FetchService, FetchServiceConfig, IndexerService, LightWalletService, + StateService, StateServiceConfig, StatusType, ZcashIndexer, ZcashService, }; -use crate::{config::IndexerConfig, error::IndexerError}; +use crate::{config::ZainodConfig, error::IndexerError}; -/// Holds the status of the server and all its components. -#[derive(Debug, Clone)] -pub struct IndexerStatus { - indexer_status: AtomicStatus, - server_status: ServerStatus, - // block_cache_status: BlockCacheStatus, +/// Zaino, the Zingo-Indexer. +pub struct Indexer { + /// JsonRPC server. + /// + /// Disabled by default. + json_server: Option, + /// GRPC server. + server: Option, + /// Chain fetch service state process handler.. + service: Option>, } -impl IndexerStatus { - /// Creates a new IndexerStatus. - pub fn new(max_workers: u16) -> Self { - IndexerStatus { - indexer_status: AtomicStatus::new(5), - server_status: ServerStatus::new(max_workers), - } - } +/// Starts Indexer service. +/// +/// Currently only takes an IndexerConfig. +pub async fn start_indexer( + config: ZainodConfig, +) -> Result>, IndexerError> { + startup_message(); + info!("Starting Zaino.."); + spawn_indexer(config).await +} + +/// Spawns a new Indexer server. +pub async fn spawn_indexer( + config: ZainodConfig, +) -> Result>, IndexerError> { + config.check_config()?; + info!("Checking connection with node.."); + let zebrad_uri = test_node_and_return_url( + &config.validator_settings.validator_jsonrpc_listen_address, + config.validator_settings.validator_cookie_path.clone(), + config.validator_settings.validator_user.clone(), + config.validator_settings.validator_password.clone(), + ) + .await?; + + info!( + " - Connected to node using JsonRPSee at address {}.", + zebrad_uri + ); - /// Returns the IndexerStatus. - pub fn load(&self) -> IndexerStatus { - self.indexer_status.load(); - self.server_status.load(); - self.clone() + #[allow(deprecated)] + match config.backend { + BackendType::State => { + let state_config = StateServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(state_config, config) + .await + .map(|res| res.0) + } + BackendType::Fetch => { + let fetch_config = FetchServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(fetch_config, config) + .await + .map(|res| res.0) + } } } -/// Zingo-Indexer. -pub struct Indexer { - /// Indexer configuration data. - _config: IndexerConfig, - /// GRPC server. - server: Option, - // /// Internal block cache. - // block_cache: BlockCache, - /// Indexers status. - status: IndexerStatus, - /// Online status of the indexer. - online: Arc, -} +impl Indexer +where + IndexerError: From<::Error>, +{ + /// Spawns a new Indexer server. + // TODO: revise whether returning the subscriber here is the best way to access the service after the indexer is spawned. + pub async fn launch_inner( + service_config: Service::Config, + indexer_config: ZainodConfig, + ) -> Result< + ( + tokio::task::JoinHandle>, + Service::Subscriber, + ), + IndexerError, + > { + let service = IndexerService::::spawn(service_config).await?; + let service_subscriber = service.inner_ref().get_subscriber(); -impl Indexer { - /// Starts Indexer service. - /// - /// Currently only takes an IndexerConfig. - pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { - let online = Arc::new(AtomicBool::new(true)); - set_ctrlc(online.clone()); - startup_message(); - self::Indexer::start_indexer_service(config, online) - .await? - .await? - } + let json_server = match indexer_config.json_server_settings { + Some(json_server_config) => Some( + JsonRpcServer::spawn(service.inner_ref().get_subscriber(), json_server_config) + .await + .unwrap(), + ), + None => None, + }; - /// Launches an Indexer service. - /// - /// Spawns an indexer service in a new task. - pub async fn start_indexer_service( - config: IndexerConfig, - online: Arc, - ) -> Result>, IndexerError> { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - println!("Launching Zingdexer!"); - let mut indexer: Indexer = Indexer::new(config, online.clone()).await?; - Ok(tokio::task::spawn(async move { - let server_handle = if let Some(server) = indexer.server.take() { - Some(server.serve().await) - } else { - return Err(IndexerError::MiscIndexerError( - "Server Missing! Fatal Error!.".to_string(), - )); - }; - - indexer.status.indexer_status.store(2); + let grpc_server = TonicServer::spawn( + service.inner_ref().get_subscriber(), + GrpcServerConfig { + listen_address: indexer_config.grpc_settings.listen_address, + tls: indexer_config.grpc_settings.tls, + }, + ) + .await + .unwrap(); + + let mut indexer = Self { + json_server, + server: Some(grpc_server), + service: Some(service), + }; + + let mut server_interval = tokio::time::interval(tokio::time::Duration::from_millis(100)); + let mut last_log_time = Instant::now(); + let log_interval = tokio::time::Duration::from_secs(10); + + let serve_task = tokio::task::spawn(async move { loop { - indexer.status.load(); - // indexer.log_status(); + // Log the servers status. + if last_log_time.elapsed() >= log_interval { + indexer.log_status(); + last_log_time = Instant::now(); + } + + // Check for restart signals. + if indexer.check_for_critical_errors() { + indexer.close().await; + return Err(IndexerError::Restart); + } + + // Check for shutdown signals. if indexer.check_for_shutdown() { - indexer.status.indexer_status.store(4); - indexer.shutdown_components(server_handle).await; - indexer.status.indexer_status.store(5); + indexer.close().await; return Ok(()); } - interval.tick().await; + + server_interval.tick().await; } - })) + }); + + Ok((serve_task, service_subscriber.inner())) } - /// Creates a new Indexer. - /// - /// Currently only takes an IndexerConfig. - async fn new(config: IndexerConfig, online: Arc) -> Result { - config.check_config()?; - let status = IndexerStatus::new(config.max_worker_pool_size); - let tcp_ingestor_listen_addr: Option = config - .listen_port - .map(|port| SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST), port)); - let lightwalletd_uri = Uri::builder() - .scheme("http") - .authority(format!("localhost:{}", config.lightwalletd_port)) - .path_and_query("/") - .build()?; - println!("Checking connection with node.."); - let zebrad_uri = test_node_and_return_uri( - &config.zebrad_port, - config.node_user.clone(), - config.node_password.clone(), - ) - .await?; - status.indexer_status.store(0); - let server = Some( - Server::spawn( - config.tcp_active, - tcp_ingestor_listen_addr, - lightwalletd_uri, - zebrad_uri, - config.max_queue_size, - config.max_worker_pool_size, - config.idle_worker_pool_size, - status.server_status.clone(), - online.clone(), - ) - .await?, - ); - println!("Server Ready."); - Ok(Indexer { - _config: config, - server, - status, - online, - }) + /// Checks indexers status and servers internal statuses for either offline of critical error signals. + fn check_for_critical_errors(&self) -> bool { + let status = self.status_int(); + status == 5 || status >= 7 } - /// Checks indexers online status and servers internal status for closure signal. + /// Checks indexers status and servers internal status for closure signal. fn check_for_shutdown(&self) -> bool { - if self.status() >= 4 { - return true; - } - if !self.check_online() { + if self.status_int() == 4 { return true; } false } /// Sets the servers to close gracefully. - pub fn shutdown(&mut self) { - self.status.indexer_status.store(4) - } + async fn close(&mut self) { + if let Some(mut json_server) = self.json_server.take() { + json_server.close().await; + json_server.status.store(StatusType::Offline); + } - /// Sets the server's components to close gracefully. - async fn shutdown_components( - &mut self, - server_handle: Option>>, - ) { - if let Some(handle) = server_handle { - self.status.server_status.server_status.store(4); - handle.await.ok(); + if let Some(mut server) = self.server.take() { + server.close().await; + server.status.store(StatusType::Offline); } - } - /// Returns the indexers current status usize. - pub fn status(&self) -> usize { - self.status.indexer_status.load() + if let Some(service) = self.service.take() { + let mut service = service.inner(); + service.close(); + } } - /// Returns the indexers current statustype. - pub fn statustype(&self) -> StatusType { - StatusType::from(self.status()) - } + /// Returns the indexers current status usize, calculates from internal statuses. + fn status_int(&self) -> usize { + let service_status = match &self.service { + Some(service) => service.inner_ref().status(), + None => return 7, + }; - /// Returns the status of the indexer and its parts. - pub fn statuses(&mut self) -> IndexerStatus { - self.status.load(); - self.status.clone() + let json_server_status = self + .json_server + .as_ref() + .map(|json_server| json_server.status()); + + let mut server_status = match &self.server { + Some(server) => server.status(), + None => return 7, + }; + + if let Some(json_status) = json_server_status { + server_status = StatusType::combine(server_status, json_status); + } + + usize::from(StatusType::combine(service_status, server_status)) } - /// Check the online status on the indexer. - fn check_online(&self) -> bool { - self.online.load(Ordering::SeqCst) + /// Returns the current StatusType of the indexer. + pub fn status(&self) -> StatusType { + StatusType::from(self.status_int()) } -} -fn set_ctrlc(online: Arc) { - ctrlc::set_handler(move || { - online.store(false, Ordering::SeqCst); - process::exit(0); - }) - .expect("Error setting Ctrl-C handler"); + /// Logs the indexers status. + pub fn log_status(&self) { + let service_status = match &self.service { + Some(service) => service.inner_ref().status(), + None => StatusType::Offline, + }; + + let json_server_status = match &self.json_server { + Some(json_server) => json_server.status(), + None => StatusType::Offline, + }; + + let grpc_server_status = match &self.server { + Some(server) => server.status(), + None => StatusType::Offline, + }; + + let service_status_symbol = service_status.get_status_symbol(); + let json_server_status_symbol = json_server_status.get_status_symbol(); + let grpc_server_status_symbol = grpc_server_status.get_status_symbol(); + + info!( + "Zaino status check - ChainState Service:{}{} JsonRPC Server:{}{} gRPC Server:{}{}", + service_status_symbol, + service_status, + json_server_status_symbol, + json_server_status, + grpc_server_status_symbol, + grpc_server_status + ); + } } +/// Prints Zaino's startup message. fn startup_message() { let welcome_message = r#" - ░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░▒▒░░░░░ - ░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓░▒▒▒░░ - ░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒███▓░▒▓▓████████████████▓▓▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓▓▓▓▒▓████▓▓███████████████████▓▒▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓▓▓▒▒▓▓▓▓████████████████████▓▒▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓▓▓█████████████████████████▓▒▓▓▓▓▓▒▒▒▒▒ - ▒▒▒▒▒▒▒▓▓▓▒▓█████████████████████████▓▓▓▓▓▓▓▓▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓████████████████████████▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▒███████████████████████▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓███████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓██████████▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒███▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ - Thank you for using ZingoLabs Zaino! + ░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓░▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒██▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓██▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▓▓▓▓▒███▓░▒▓▓████████████████▓▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▓▓▓▓▒▓████▓▓███████████████████▓▒▓▓▒▒▒▒ + ▒▒▒▒▒▓▓▓▓▓▒▒▓▓▓▓████████████████████▓▒▓▓▓▒▒▒▒ + ▒▒▒▒▒▓▓▓▓▓█████████████████████████▓▒▓▓▓▓▓▒▒▒ + ▒▒▒▒▓▓▓▒▓█████████████████████████▓▓▓▓▓▓▓▓▒▒▒ + ▒▒▒▒▒▓▓▓████████████████████████▓▓▓▓▓▓▓▓▓▒▒▒▒ + ▒▒▒▒▒▓▒███████████████████████▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒ + ▒▒▒▒▒▒▓███████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒ + ▒▒▒▒▒▒▓███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▓██████████▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒ + ▒▒▒▒███▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▓████▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + Thank you for using ZingoLabs Zaino! - Donate to us at https://free2z.cash/zingolabs. - - Submit any security conserns to us at zingodisclosure@proton.me. ****** Please note Zaino is currently in development and should not be used to run mainnet nodes. ****** "#; - println!("{}", welcome_message); + println!("{welcome_message}"); } diff --git a/zainod/src/lib.rs b/zainod/src/lib.rs index f7fa9bbbe..5b6bb54fb 100644 --- a/zainod/src/lib.rs +++ b/zainod/src/lib.rs @@ -3,6 +3,71 @@ #![warn(missing_docs)] #![forbid(unsafe_code)] +use std::path::PathBuf; + +use tracing::{error, info}; +use tracing_subscriber::EnvFilter; + +use crate::config::load_config; +use crate::error::IndexerError; +use crate::indexer::start_indexer; + +pub mod cli; pub mod config; pub mod error; pub mod indexer; + +/// Run the Zaino indexer. +/// +/// Initializes logging and runs the main indexer loop with restart support. +/// Returns an error if config loading or indexer startup fails. +pub async fn run(config_path: PathBuf) -> Result<(), IndexerError> { + init_logging(); + + info!("zainod v{}", env!("CARGO_PKG_VERSION")); + + let config = load_config(&config_path)?; + + loop { + match start_indexer(config.clone()).await { + Ok(joinhandle_result) => { + info!("Zaino Indexer started successfully."); + match joinhandle_result.await { + Ok(indexer_result) => match indexer_result { + Ok(()) => { + info!("Exiting Zaino successfully."); + return Ok(()); + } + Err(IndexerError::Restart) => { + error!("Zaino encountered critical error, restarting."); + continue; + } + Err(e) => { + error!("Exiting Zaino with error: {}", e); + return Err(e); + } + }, + Err(e) => { + error!("Zaino exited early with error: {}", e); + return Err(e.into()); + } + } + } + Err(e) => { + error!("Zaino failed to start with error: {}", e); + return Err(e); + } + } + } +} + +/// Initialize the tracing subscriber for logging. +fn init_logging() { + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339()) + .with_target(true) + .init(); +} diff --git a/zainod/src/main.rs b/zainod/src/main.rs index 24f2cac5c..843a1d346 100644 --- a/zainod/src/main.rs +++ b/zainod/src/main.rs @@ -1,24 +1,21 @@ -//! Zingo-Indexer daemon +//! Zaino Indexer daemon. use clap::Parser; -use std::path::PathBuf; -use zainodlib::{config::load_config, indexer::Indexer}; -#[derive(Parser, Debug)] -#[command(name = "zindexer", about = "A server for Zingo-Indexer")] -struct Args { - /// Path to the configuration file - #[arg(short, long, value_name = "FILE")] - config: Option, -} +use zainodlib::cli::{default_config_path, Cli, Command}; #[tokio::main] async fn main() { - Indexer::start(load_config( - &Args::parse() - .config - .unwrap_or_else(|| PathBuf::from("./zainod/zindexer.toml")), - )) - .await - .unwrap(); + let cli = Cli::parse(); + + match cli.command { + Command::Start { config } => { + let config_path = config.unwrap_or_else(default_config_path); + if let Err(e) = zainodlib::run(config_path).await { + eprintln!("Error: {}", e); + std::process::exit(1); + } + } + Command::GenerateConfig { output } => Command::generate_config(output), + } } diff --git a/zainod/zebrad.toml b/zainod/zebrad.toml index 2baf58fbf..cd94b0537 100644 --- a/zainod/zebrad.toml +++ b/zainod/zebrad.toml @@ -1,3 +1,101 @@ -network.network = "Testnet" -rpc.listen_addr = '127.0.0.1:18232' -rpc.enable_cookie_auth = false +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# CONFIGURATION SOURCES (in order of precedence, highest to lowest): +# +# 1. Environment variables with ZEBRA_ prefix (highest precedence) +# - Format: ZEBRA_SECTION__KEY (double underscore for nested keys) +# - Examples: +# - ZEBRA_NETWORK__NETWORK=Testnet +# - ZEBRA_RPC__LISTEN_ADDR=127.0.0.1:8232 +# - ZEBRA_STATE__CACHE_DIR=/path/to/cache +# - ZEBRA_TRACING__FILTER=debug +# - ZEBRA_METRICS__ENDPOINT_ADDR=0.0.0.0:9999 +# +# 2. Configuration file (TOML format) +# - At the path specified via -c flag, e.g. `zebrad -c myconfig.toml start`, or +# - At the default path in the user's preference directory (platform-dependent, see below) +# +# 3. Hard-coded defaults (lowest precedence) +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[health] +enforce_on_test_networks = false +min_connected_peers = 1 +ready_max_blocks_behind = 2 +ready_max_tip_age = "5m" + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +internal_miner = false + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", +] +listen_addr = "[::]:8233" +max_connections_per_ip = 1 +network = "Testnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +enable_cookie_auth = false +max_response_body_size = 52428800 +parallel_cpu_threads = 0 +listen_addr = '127.0.0.1:18232' +indexer_listen_addr = '127.0.0.1:18230' + +[state] +delete_old_database = true +ephemeral = false +should_backup_non_finalized_state = true + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zainod/zindexer.toml b/zainod/zindexer.toml deleted file mode 100644 index ba7b650bd..000000000 --- a/zainod/zindexer.toml +++ /dev/null @@ -1,28 +0,0 @@ -# Configuration for Zaino - -# Sets the TcpIngestor's status (true or false) -tcp_active = true - -# Optional TcpIngestors listen port (use None or specify a port number) -listen_port = 8137 - -# LightWalletD listen port [DEPRECATED] -lightwalletd_port = 9067 - -# Full node / validator listen port -zebrad_port = 18232 - -# Optional full node Username -node_user = "xxxxxx" - -# Optional full node Password -node_password = "xxxxxx" - -# Maximum requests allowed in the request queue -max_queue_size = 1024 - -# Maximum workers allowed in the worker pool -max_worker_pool_size = 64 - -# Minimum number of workers held in the worker pool when idle -idle_worker_pool_size = 4