diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 29c54c335a4d..c51267e140f9 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,8 @@ blank_issues_enabled: true contact_links: - - name: Question + - name: Technical Questions url: https://github.com/libp2p/rust-libp2p/discussions/new?category=q-a - about: Please ask questions in the rust-libp2p GitHub Discussions forum. - - name: Libp2p Discourse Forum + about: Please ask technical questions in the rust-libp2p GitHub Discussions forum. + - name: Community-wide libp2p Discussion url: https://discuss.libp2p.io - about: Discussions and questions related to multiple libp2p implementations. + about: Discussions and questions about the libp2p community. diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 59915a71b6c0..6c43a31d2c11 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,10 +9,23 @@ updates: prefix: "deps" rebase-strategy: "disabled" groups: - trust-dns: + hickory-dns: patterns: - - "trust-dns-*" + - "hickory-*" - "async-std-resolver" + opentelemetry: + patterns: + - "opentelemetry*" + - "tracing-opentelemetry" + axum: + patterns: + - "axum" + - "tower" + - "tower-http" + webrtc: + patterns: + - "rcgen" + - "webrtc" - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index f4ef3cc8591c..8c49b335f1b8 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 429511175165..90635ee53e76 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,12 +33,14 @@ jobs: CRATE: ${{ matrix.crate }} steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: shared-key: stable-cache save-if: false @@ -50,7 +52,7 @@ jobs: run: cargo build --package "$CRATE" --no-default-features - name: Enforce no dependency on meta crate - if: env.CRATE != 'libp2p-server' + if: env.CRATE != 'libp2p-server' && env.CRATE != 'libp2p-perf' run: | cargo metadata --format-version=1 --no-deps | \ jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .dependencies | all(.name != "libp2p")' @@ -59,16 +61,48 @@ jobs: with: tool: tomlq + - name: Extract version from manifest + run: | + CRATE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') + + echo "CRATE_VERSION=$CRATE_VERSION" >> $GITHUB_ENV + - name: Enforce version in `workspace.dependencies` matches latest version if: env.CRATE != 'libp2p' run: | - PACKAGE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') SPECIFIED_VERSION=$(tomlq "workspace.dependencies.$CRATE.version" --file ./Cargo.toml) - echo "Package version: $PACKAGE_VERSION"; + echo "Package version: $CRATE_VERSION"; echo "Specified version: $SPECIFIED_VERSION"; - test "$PACKAGE_VERSION" = "$SPECIFIED_VERSION" + test "$CRATE_VERSION" = "$SPECIFIED_VERSION" || test "=$CRATE_VERSION" = "$SPECIFIED_VERSION" + + - name: Enforce version in CHANGELOG.md matches version in manifest + run: | + MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') + DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") + VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") + + echo "Package version: $CRATE_VERSION"; + echo "Changelog version: $VERSION_IN_CHANGELOG"; + + test "$CRATE_VERSION" = "$VERSION_IN_CHANGELOG" + + - name: Ensure manifest and CHANGELOG are properly updated + if: > + github.event_name == 'pull_request' && + !startsWith(github.event.pull_request.title, 'chore') && + !startsWith(github.event.pull_request.title, 'refactor') && + !startsWith(github.event.pull_request.title, 'deps') && + !startsWith(github.event.pull_request.title, 'docs') && + !contains(github.event.pull_request.labels.*.name, 'internal-change') + run: | + git fetch origin master:master + git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} + ./scripts/ensure-version-bump-and-changelog.sh + env: + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + PR_BASE: ${{ github.event.pull_request.base.ref }} wasm_tests: name: Run all WASM tests @@ -124,7 +158,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -149,7 +183,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -170,7 +204,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -187,7 +221,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -200,9 +234,8 @@ jobs: fail-fast: false matrix: rust-version: [ - # 1.72.0, # current stable - # beta, - nightly-2023-09-10 + 1.75.0, # current stable + beta, ] steps: - uses: actions/checkout@v4 @@ -214,12 +247,11 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: save-if: ${{ github.ref == 'refs/heads/master' }} - - name: Run cargo clippy - run: cargo clippy + - run: cargo clippy --all-targets --all-features ipfs-integration-test: name: IPFS Integration tests @@ -231,7 +263,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -250,7 +282,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 with: shared-key: stable-cache save-if: false @@ -263,13 +295,31 @@ jobs: cargo check --manifest-path "$toml"; done + - uses: taiki-e/cache-cargo-install-action@v1 + with: + tool: wasm-pack@0.12.0 + + - name: Build webrtc-browser example + run: | + cd examples/browser-webrtc + wasm-pack build --target web --out-dir static + semver: runs-on: ubuntu-latest + env: + # Unset the global `RUSTFLAGS` env to allow warnings. + # cargo-semver-checks intentionally re-locks dependency versions + # before checking, and we shouldn't fail here if a dep has a warning. + # + # More context: + # https://github.com/libp2p/rust-libp2p/pull/4932#issuecomment-1829014527 + # https://github.com/obi1kenobi/cargo-semver-checks/issues/589 + RUSTFLAGS: '' steps: - uses: actions/checkout@v4 - - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.24.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin + - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.27.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin shell: bash - - uses: obi1kenobi/cargo-semver-checks-action@e275dda72e250d4df5b564e969e1348d67fefa52 # v2 + - uses: obi1kenobi/cargo-semver-checks-action@48f4ef7da6d907d69d18249e0ba79aa98c61b9db # v2 rustfmt: runs-on: ubuntu-latest @@ -299,8 +349,6 @@ jobs: ALL_FEATURES=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features | keys | map(select(. != "full")) | sort | join(" ")') FULL_FEATURE=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features["full"] | sort | join(" ")') - test "$ALL_FEATURES = $FULL_FEATURE" - echo "$ALL_FEATURES"; echo "$FULL_FEATURE"; @@ -326,12 +374,12 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - run: cargo install --version 0.10.0 pb-rs --locked - name: Glob match - uses: tj-actions/glob@v17 + uses: tj-actions/glob@v20 id: glob with: files: | @@ -352,7 +400,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 3b39ef7e1d61..b9cd82897c24 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -34,6 +34,11 @@ jobs: with: context: . file: ./misc/server/Dockerfile - push: ${{ ! github.event.pull_request.head.repo.fork }} # Only push image if we have the required permissions, i.e. not running from a fork + push: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} # Only push image if we have the required permissions, i.e. not running from a fork + cache-from: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} + cache-to: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + env: + AWS_ACCESS_KEY_ID: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index bffcc60d2eaa..b2a761fd8c13 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -24,7 +24,7 @@ jobs: echo "" > target/doc/index.html cp -r target/doc/* ./host-docs - name: Upload documentation - uses: actions/upload-pages-artifact@v2.0.0 + uses: actions/upload-pages-artifact@v3.0.1 with: path: "host-docs/" @@ -42,5 +42,5 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 8fa00f0a8f60..f39508970893 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -10,8 +10,8 @@ concurrency: cancel-in-progress: true jobs: - run-multidim-interop: - name: Run multidimensional interoperability tests + run-transport-interop: + name: Run transport interoperability tests runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} strategy: matrix: @@ -29,7 +29,7 @@ jobs: FLAVOUR: ${{ matrix.flavour }} - name: Run ${{ matrix.flavour }} tests - uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master + uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master with: test-filter: ${{ matrix.flavour }}-rust-libp2p-head extra-versions: ${{ github.workspace }}/interop-tests/${{ matrix.flavour }}-ping-version.json @@ -37,3 +37,20 @@ jobs: s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} worker-count: 16 + run-holepunching-interop: + name: Run hole-punch interoperability tests + runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - name: Build image + run: docker buildx build --load -t rust-libp2p-head . -f hole-punching-tests/Dockerfile + - name: Run tests + uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master + with: + test-filter: rust-libp2p-head + extra-versions: ${{ github.workspace }}/hole-punching-tests/version.json + s3-cache-bucket: libp2p-by-tf-aws-bootstrap + s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} + s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + worker-count: 16 diff --git a/CHANGELOG.md b/CHANGELOG.md index f6b32c35f979..3c3466708c9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,6 @@ ## Transport Protocols & Upgrades -- [`libp2p-deflate` CHANGELOG](transports/deflate/CHANGELOG.md) - [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md) - [`libp2p-noise` CHANGELOG](transports/noise/CHANGELOG.md) - [`libp2p-perf` CHANGELOG](transports/perf/CHANGELOG.md) @@ -32,10 +31,11 @@ - [`libp2p-pnet` CHANGELOG](transports/pnet/CHANGELOG.md) - [`libp2p-quic` CHANGELOG](transports/quic/CHANGELOG.md) - [`libp2p-tcp` CHANGELOG](transports/tcp/CHANGELOG.md) +- [`libp2p-tls` CHANGELOG](transports/tls/CHANGELOG.md) - [`libp2p-uds` CHANGELOG](transports/uds/CHANGELOG.md) - [`libp2p-wasm-ext` CHANGELOG](transports/wasm-ext/CHANGELOG.md) - [`libp2p-websocket` CHANGELOG](transports/websocket/CHANGELOG.md) -- [`libp2p-tls` CHANGELOG](transports/tls/CHANGELOG.md) +- [`libp2p-websocket-websys` CHANGELOG](transports/websocket-websys/CHANGELOG.md) ## Multiplexers diff --git a/Cargo.lock b/Cargo.lock index 7a913d157a54..92929a8bae1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,15 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.5.2" @@ -36,18 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug", -] - [[package]] name = "aes" version = "0.8.3" @@ -55,47 +34,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] [[package]] name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -130,16 +96,15 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.3.2" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "4cd2405b3ac1faab2990b74d728624cd9fd115651fcecc7c2d8daf01376275ba" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] @@ -164,24 +129,24 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "arbitrary" @@ -269,7 +234,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] @@ -279,11 +244,11 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ - "async-lock", + "async-lock 2.7.0", "async-task", "concurrent-queue", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "slab", ] @@ -293,10 +258,10 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" dependencies = [ - "async-lock", + "async-lock 2.7.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -307,10 +272,10 @@ checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "blocking", - "futures-lite", + "futures-lite 1.13.0", "once_cell", ] @@ -320,27 +285,57 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.7.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", - "rustix 0.37.23", + "polling 2.8.0", + "rustix 0.37.25", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +dependencies = [ + "async-lock 3.1.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.0.1", + "parking", + "polling 3.3.0", + "rustix 0.38.31", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ - "event-listener", + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb2ab2aa8a746e221ab826c73f48bc6ba41be6763f0855cb249eb6d154cf1d7" +dependencies = [ + "event-listener 3.1.0", + "event-listener-strategy", + "pin-project-lite", ] [[package]] @@ -349,10 +344,10 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" dependencies = [ - "async-io", + "async-io 1.13.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -361,16 +356,16 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "autocfg", "blocking", "cfg-if", - "event-listener", - "futures-lite", - "rustix 0.37.23", + "event-listener 2.5.3", + "futures-lite 1.13.0", + "rustix 0.37.25", "signal-hook", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -382,20 +377,20 @@ dependencies = [ "async-attributes", "async-channel", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", @@ -403,17 +398,17 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0354a68a52265a3bde76005ddd2726624ef8624614f7f58871301de205a58a59" +checksum = "3c0ed2b6671c13d2c28756c5a64e04759c1e0b5d3d7ac031f521c3561e21fbcb" dependencies = [ "async-std", "async-trait", "futures-io", "futures-util", + "hickory-resolver", "pin-utils", - "socket2 0.5.4", - "trust-dns-resolver", + "socket2 0.5.5", ] [[package]] @@ -424,26 +419,26 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "asynchronous-codec" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -458,7 +453,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http", + "http 0.2.9", "log", "url", ] @@ -474,10 +469,11 @@ name = "autonat-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -487,19 +483,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" +dependencies = [ + "async-trait", + "axum-core 0.4.3", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.1.0", + "hyper-util", "itoa", "matchit", "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", "rustversion", "serde", "serde_json", @@ -510,6 +535,7 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -521,12 +547,33 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.9", + "http-body 0.4.5", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -564,9 +611,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -576,9 +623,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "basic-toml" -version = "0.1.4" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +checksum = "2db21524cad41c5591204d22d75e1970a2d1f71060214ca931dc7d5afe2c14e5" dependencies = [ "serde", ] @@ -606,9 +653,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "blake2" @@ -653,11 +700,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.7.0", "async-task", "atomic-waker", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "log", ] @@ -666,14 +713,12 @@ name = "browser-webrtc-example" version = "0.1.0" dependencies = [ "anyhow", - "axum", - "env_logger 0.10.0", + "axum 0.7.4", "futures", "js-sys", "libp2p", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "rust-embed", @@ -681,9 +726,11 @@ dependencies = [ "tokio-util", "tower", "tower-http", + "tracing", + "tracing-subscriber", + "tracing-wasm", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-logger", "web-sys", ] @@ -714,9 +761,9 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -739,23 +786,26 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] name = "cbor4ii" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e8c816014cad3f58c2f0607677e8d2c6f76754dd8e735461a440b27b95199c" +checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "ccm" @@ -763,9 +813,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ae3c82e4355234767756212c570e29833699ab63e6ffd161887314cc5b43847" dependencies = [ - "aead 0.5.2", - "cipher 0.4.4", - "ctr 0.9.2", + "aead", + "cipher", + "ctr", "subtle", ] @@ -777,25 +827,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher", "poly1305", "zeroize", ] @@ -805,10 +854,11 @@ name = "chat-example" version = "0.1.0" dependencies = [ "async-trait", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -838,15 +888,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array", -] - [[package]] name = "cipher" version = "0.4.4" @@ -855,24 +896,24 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] name = "clap" -version = "4.3.23" +version = "4.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +checksum = "58e54881c004cec7895b0068a0a954cd5d62da01aef83fa35b1e594497bf5445" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.3.23" +version = "4.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +checksum = "59cb82d7f531603d2fd1f507441cdd35184fa81beff7bd489570de7f773460bb" dependencies = [ "anstream", "anstyle", @@ -882,21 +923,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" @@ -913,7 +954,7 @@ dependencies = [ "bytes", "futures-core", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tokio-util", ] @@ -1003,15 +1044,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" version = "0.5.1" @@ -1130,22 +1162,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1161,22 +1184,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -1197,14 +1207,14 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" @@ -1231,11 +1241,13 @@ name = "dcutr-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "futures-timer", "libp2p", "log", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1312,7 +1324,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -1321,9 +1333,10 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1358,15 +1371,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.8", + "subtle", "zeroize", ] @@ -1415,7 +1429,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -1430,9 +1444,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", @@ -1449,30 +1463,40 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "event-listener" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" dependencies = [ - "cc", - "libc", + "concurrent-queue", + "parking", + "pin-project-lite", ] [[package]] -name = "event-listener" -version = "2.5.3" +name = "event-listener-strategy" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +checksum = "d96b852f1345da36d551b9473fa1e2b1eb5c5195585c6c018118bc92a8d91160" +dependencies = [ + "event-listener 3.1.0", + "pin-project-lite", +] [[package]] name = "fantoccini" @@ -1484,8 +1508,8 @@ dependencies = [ "cookie", "futures-core", "futures-util", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "hyper-rustls", "mime", "serde", @@ -1507,9 +1531,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -1534,23 +1558,14 @@ dependencies = [ "async-std", "clap", "either", - "env_logger 0.10.0", "futures", "libp2p", "serde", + "tracing", + "tracing-subscriber", "void", ] -[[package]] -name = "flate2" -version = "1.0.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1574,18 +1589,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1598,8 +1613,9 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.1.0" +version = "0.2.3" dependencies = [ + "futures", "futures-timer", "futures-util", "tokio", @@ -1607,9 +1623,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1617,15 +1633,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1635,9 +1651,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -1650,19 +1666,29 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite", "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -1672,20 +1698,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.7", + "rustls 0.21.9", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -1710,9 +1736,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1721,7 +1747,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", ] @@ -1762,9 +1788,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "js-sys", @@ -1773,16 +1799,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.0" @@ -1790,7 +1806,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -1843,17 +1859,36 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap 1.9.3", + "http 0.2.9", + "indexmap 2.2.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", + "indexmap 2.2.1", "slab", "tokio", "tokio-util", @@ -1874,9 +1909,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -1913,21 +1948,67 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] -name = "hkdf" -version = "0.12.3" +name = "hickory-proto" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" dependencies = [ - "hmac 0.12.1", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand 0.8.5", + "socket2 0.5.5", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", ] [[package]] -name = "hmac" -version = "0.8.1" +name = "hickory-resolver" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" dependencies = [ - "crypto-mac", + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", "digest 0.9.0", ] @@ -1951,6 +2032,22 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "hole-punching-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "either", + "env_logger 0.10.2", + "futures", + "libp2p", + "redis", + "serde", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "hostname" version = "0.3.1" @@ -1973,6 +2070,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -1980,15 +2088,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", - "pin-project-lite 0.2.12", + "http 0.2.9", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "pin-project-lite", ] [[package]] name = "http-range-header" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "3ce4ef31cda248bbdb6e6820603b82dfcd9e833db65a43e997a0ccec777d11fe" [[package]] name = "httparse" @@ -2018,13 +2149,13 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.24", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite", "socket2 0.4.9", "tokio", "tower-service", @@ -2032,19 +2163,50 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.0", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + [[package]] name = "hyper-rustls" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "log", "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.27", + "pin-project-lite", + "tokio", + "tokio-io-timeout", ] [[package]] @@ -2054,21 +2216,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.27", "native-tls", "tokio", "tokio-native-tls", ] +[[package]] +name = "hyper-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.1.0", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tracing", +] + [[package]] name = "identify-example" version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2081,23 +2262,33 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.3.1", "core-foundation", "fnv", "futures", @@ -2113,16 +2304,16 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "log", "rand 0.8.5", "tokio", @@ -2142,12 +2333,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] @@ -2196,18 +2387,18 @@ name = "interop-tests" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.4", "console_error_panic_hook", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", "libp2p", "libp2p-mplex", + "libp2p-noise", + "libp2p-tls", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "redis", @@ -2233,7 +2424,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2242,9 +2433,9 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring", - "windows-sys", + "windows-sys 0.48.0", "winreg", ] @@ -2255,10 +2446,12 @@ dependencies = [ "anyhow", "async-trait", "clap", - "env_logger 0.10.0", + "env_logger 0.10.2", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2267,10 +2460,11 @@ version = "0.1.0" dependencies = [ "async-trait", "either", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2286,8 +2480,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.4", - "windows-sys", + "rustix 0.38.31", + "windows-sys 0.48.0", ] [[package]] @@ -2307,9 +2501,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -2327,7 +2521,7 @@ dependencies = [ name = "keygen" version = "0.1.0" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "clap", "libp2p-core", "libp2p-identity", @@ -2353,30 +2547,28 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libp2p" -version = "0.52.3" +version = "0.53.2" dependencies = [ "async-std", "async-trait", "bytes", "clap", "either", - "env_logger 0.10.0", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom 0.2.12", "instant", "libp2p-allow-block-list", "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", "libp2p-dcutr", - "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", "libp2p-gossipsub", @@ -2386,6 +2578,7 @@ dependencies = [ "libp2p-mdns", "libp2p-memory-connection-limits", "libp2p-metrics", + "libp2p-mplex", "libp2p-noise", "libp2p-ping", "libp2p-plaintext", @@ -2399,18 +2592,21 @@ dependencies = [ "libp2p-tls", "libp2p-uds", "libp2p-upnp", - "libp2p-wasm-ext", "libp2p-websocket", + "libp2p-websocket-websys", "libp2p-webtransport-websys", "libp2p-yamux", "multiaddr", "pin-project", + "rw-stream-sink", + "thiserror", "tokio", + "tracing-subscriber", ] [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" +version = "0.3.0" dependencies = [ "async-std", "libp2p-core", @@ -2423,11 +2619,11 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.11.0" +version = "0.12.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", + "asynchronous-codec", "futures", "futures-timer", "instant", @@ -2436,14 +2632,16 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", - "log", "quick-protobuf", + "quick-protobuf-codec", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-connection-limits" -version = "0.2.1" +version = "0.3.1" dependencies = [ "async-std", "libp2p-core", @@ -2460,7 +2658,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.1" +version = "0.41.2" dependencies = [ "async-std", "either", @@ -2471,7 +2669,6 @@ dependencies = [ "libp2p-identity", "libp2p-mplex", "libp2p-noise", - "log", "multiaddr", "multihash", "multistream-select", @@ -2485,20 +2682,21 @@ dependencies = [ "serde", "smallvec", "thiserror", - "unsigned-varint", + "tracing", + "unsigned-varint 0.8.0", "void", ] [[package]] name = "libp2p-dcutr" -version = "0.10.0" +version = "0.11.0" dependencies = [ "async-std", "asynchronous-codec", "clap", "either", - "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", @@ -2513,80 +2711,68 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", + "lru", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", + "tracing", + "tracing-subscriber", "void", ] -[[package]] -name = "libp2p-deflate" -version = "0.40.1" -dependencies = [ - "async-std", - "flate2", - "futures", - "futures_ringbuf", - "libp2p-core", - "libp2p-tcp", - "quickcheck-ext", - "rand 0.8.5", -] - [[package]] name = "libp2p-dns" -version = "0.40.1" +version = "0.41.1" dependencies = [ "async-std", "async-std-resolver", "async-trait", - "env_logger 0.10.0", "futures", + "hickory-resolver", "libp2p-core", "libp2p-identity", - "log", "parking_lot", "smallvec", "tokio", - "trust-dns-resolver", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-floodsub" -version = "0.43.0" +version = "0.44.0" dependencies = [ "asynchronous-codec", + "bytes", "cuckoofilter", "fnv", "futures", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "smallvec", "thiserror", + "tracing", ] [[package]] name = "libp2p-gossipsub" -version = "0.45.1" +version = "0.46.1" dependencies = [ "async-std", "asynchronous-codec", - "base64 0.21.4", + "base64 0.21.7", "byteorder", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-ticker", - "getrandom 0.2.10", + "getrandom 0.2.12", "hex", "hex_fmt", "instant", @@ -2596,7 +2782,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -2606,18 +2791,18 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-identify" -version = "0.43.1" +version = "0.44.2" dependencies = [ "async-std", "asynchronous-codec", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2625,56 +2810,57 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.5" +version = "0.2.8" dependencies = [ "asn1_der", - "base64 0.21.4", + "base64 0.21.7", "bs58", "criterion", "ed25519-dalek", "hex-literal", "hkdf", "libsecp256k1", - "log", "multihash", "p256", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", - "ring", + "ring 0.17.5", "rmp-serde", "sec1", "serde", "serde_json", "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] [[package]] name = "libp2p-kad" -version = "0.44.6" +version = "0.45.4" dependencies = [ "arrayvec", "async-std", "asynchronous-codec", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", @@ -2684,7 +2870,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", @@ -2693,20 +2878,21 @@ dependencies = [ "sha2 0.10.8", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "uint", - "unsigned-varint", "void", ] [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.45.1" dependencies = [ - "async-io", + "async-io 2.3.1", "async-std", "data-encoding", - "env_logger 0.10.0", "futures", + "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", @@ -2715,18 +2901,18 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "smallvec", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", - "trust-dns-proto", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-memory-connection-limits" -version = "0.1.0" +version = "0.2.0" dependencies = [ "async-std", "libp2p-core", @@ -2735,17 +2921,18 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-derive", "libp2p-swarm-test", - "log", "memory-stats", "rand 0.8.5", "sysinfo", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.1" +version = "0.14.1" dependencies = [ + "futures", "instant", "libp2p-core", "libp2p-dcutr", @@ -2756,32 +2943,32 @@ dependencies = [ "libp2p-ping", "libp2p-relay", "libp2p-swarm", - "once_cell", + "pin-project", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.40.0" +version = "0.41.0" dependencies = [ "async-std", "asynchronous-codec", "bytes", "criterion", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", "libp2p-plaintext", "libp2p-tcp", - "log", "nohash-hasher", "parking_lot", "quickcheck-ext", "rand 0.8.5", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", + "unsigned-varint 0.8.0", ] [[package]] @@ -2792,21 +2979,20 @@ dependencies = [ "futures-timer", "futures_ringbuf", "libp2p-core", - "log", + "tracing", ] [[package]] name = "libp2p-noise" -version = "0.43.1" +version = "0.44.0" dependencies = [ + "asynchronous-codec", "bytes", - "curve25519-dalek 4.1.1", - "env_logger 0.10.0", + "curve25519-dalek", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "multiaddr", "multihash", "once_cell", @@ -2817,46 +3003,48 @@ dependencies = [ "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "tracing", + "tracing-subscriber", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-perf" -version = "0.2.0" +version = "0.3.0" dependencies = [ "anyhow", - "async-trait", "clap", - "env_logger 0.10.0", "futures", + "futures-bounded", + "futures-timer", "instant", + "libp2p", "libp2p-core", "libp2p-dns", "libp2p-identity", "libp2p-quic", - "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-ping" -version = "0.43.1" +version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2864,33 +3052,34 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-plaintext" -version = "0.40.1" +version = "0.41.0" dependencies = [ "asynchronous-codec", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "quick-protobuf", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", - "unsigned-varint", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-pnet" -version = "0.23.0" +version = "0.24.0" dependencies = [ "futures", "libp2p-core", @@ -2900,22 +3089,21 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "salsa20", "sha3", "tokio", + "tracing", ] [[package]] name = "libp2p-quic" -version = "0.9.2" +version = "0.10.2" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", @@ -2926,26 +3114,26 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "parking_lot", "quickcheck", "quinn", "rand 0.8.5", - "ring", - "rustls 0.21.7", - "socket2 0.5.4", + "ring 0.16.20", + "rustls 0.21.9", + "socket2 0.5.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-relay" -version = "0.16.1" +version = "0.17.1" dependencies = [ "asynchronous-codec", "bytes", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2955,25 +3143,26 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-swarm", + "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-rendezvous" -version = "0.13.0" +version = "0.14.0" dependencies = [ "async-trait", "asynchronous-codec", "bimap", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2987,24 +3176,27 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-request-response" -version = "0.25.1" +version = "0.26.2" dependencies = [ + "anyhow", "async-std", "async-trait", "cbor4ii", - "env_logger 0.10.0", "futures", + "futures-bounded", + "futures-timer", "futures_ringbuf", "instant", "libp2p-core", @@ -3014,45 +3206,61 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "smallvec", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-server" -version = "0.12.3" +version = "0.12.6" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "clap", - "env_logger 0.10.0", "futures", "futures-timer", - "hyper", + "hyper 0.14.27", "libp2p", - "log", "prometheus-client", "serde", "serde_derive", "serde_json", "tokio", + "tracing", + "tracing-subscriber", "zeroize", ] +[[package]] +name = "libp2p-stream" +version = "0.1.0-alpha" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "libp2p-swarm-test", + "rand 0.8.5", + "tokio", + "tracing", + "tracing-subscriber", + "void", +] + [[package]] name = "libp2p-swarm" -version = "0.43.5" +version = "0.44.2" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom 0.2.12", "instant", "libp2p-core", "libp2p-identify", @@ -3063,13 +3271,15 @@ dependencies = [ "libp2p-swarm-derive", "libp2p-swarm-test", "libp2p-yamux", - "log", + "lru", "multistream-select", "once_cell", "quickcheck-ext", "rand 0.8.5", "smallvec", "tokio", + "tracing", + "tracing-subscriber", "trybuild", "void", "wasm-bindgen-futures", @@ -3077,18 +3287,17 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" +version = "0.34.3" dependencies = [ "heck", - "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "libp2p-swarm-test" -version = "0.2.0" +version = "0.3.0" dependencies = [ "async-trait", "futures", @@ -3099,31 +3308,31 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", + "tracing", ] [[package]] name = "libp2p-tcp" -version = "0.40.0" +version = "0.41.0" dependencies = [ - "async-io", + "async-io 2.3.1", "async-std", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", "libp2p-identity", - "log", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-tls" -version = "0.2.1" +version = "0.3.0" dependencies = [ "futures", "futures-rustls", @@ -3133,9 +3342,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-yamux", - "rcgen 0.10.0", - "ring", - "rustls 0.21.7", + "rcgen", + "ring 0.16.20", + "rustls 0.21.9", "rustls-webpki", "thiserror", "tokio", @@ -3145,49 +3354,36 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.39.0" +version = "0.40.0" dependencies = [ "async-std", "futures", "libp2p-core", - "log", "tempfile", "tokio", + "tracing", ] [[package]] name = "libp2p-upnp" -version = "0.1.1" +version = "0.2.1" dependencies = [ "futures", "futures-timer", "igd-next", "libp2p-core", "libp2p-swarm", - "log", "tokio", + "tracing", "void", ] -[[package]] -name = "libp2p-wasm-ext" -version = "0.40.0" -dependencies = [ - "futures", - "js-sys", - "libp2p-core", - "send_wrapper 0.6.0", - "wasm-bindgen", - "wasm-bindgen-futures", -] - [[package]] name = "libp2p-webrtc" -version = "0.6.1-alpha" +version = "0.7.1-alpha" dependencies = [ "async-trait", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "hex", @@ -3196,23 +3392,24 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-webrtc-utils", - "log", "multihash", "quickcheck", "rand 0.8.5", - "rcgen 0.11.1", + "rcgen", "serde", "stun", "thiserror", "tinytemplate", "tokio", "tokio-util", + "tracing", + "tracing-subscriber", "webrtc", ] [[package]] name = "libp2p-webrtc-utils" -version = "0.1.0" +version = "0.2.0" dependencies = [ "asynchronous-codec", "bytes", @@ -3222,7 +3419,6 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -3230,30 +3426,24 @@ dependencies = [ "sha2 0.10.8", "thiserror", "tinytemplate", - "unsigned-varint", + "tracing", ] [[package]] name = "libp2p-webrtc-websys" -version = "0.1.0-alpha" +version = "0.3.0-alpha" dependencies = [ "bytes", "futures", - "futures-timer", - "getrandom 0.2.10", + "getrandom 0.2.12", "hex", - "hex-literal", "js-sys", "libp2p-core", "libp2p-identity", - "libp2p-noise", - "libp2p-ping", - "libp2p-swarm", "libp2p-webrtc-utils", - "log", "send_wrapper 0.6.0", - "serde", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3261,7 +3451,7 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.42.1" +version = "0.43.0" dependencies = [ "async-std", "either", @@ -3271,31 +3461,50 @@ dependencies = [ "libp2p-dns", "libp2p-identity", "libp2p-tcp", - "log", "parking_lot", - "quicksink", - "rcgen 0.10.0", + "pin-project-lite", + "rcgen", "rw-stream-sink", "soketto", + "tracing", "url", "webpki-roots", ] +[[package]] +name = "libp2p-websocket-websys" +version = "0.3.1" +dependencies = [ + "bytes", + "futures", + "js-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "libp2p-yamux", + "parking_lot", + "send_wrapper 0.6.0", + "thiserror", + "tracing", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "libp2p-webtransport-websys" -version = "0.1.0" +version = "0.2.0" dependencies = [ "futures", "js-sys", "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "multiaddr", "multibase", "multihash", "send_wrapper 0.6.0", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3303,15 +3512,17 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.44.1" +version = "0.45.1" dependencies = [ "async-std", + "either", "futures", "libp2p-core", "libp2p-muxer-test-harness", - "log", "thiserror", - "yamux", + "tracing", + "yamux 0.12.1", + "yamux 0.13.1", ] [[package]] @@ -3376,9 +3587,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" @@ -3401,11 +3612,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" +checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] @@ -3485,13 +3696,17 @@ dependencies = [ name = "metrics-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", - "hyper", + "hyper 0.14.27", "libp2p", - "log", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_api", "prometheus-client", "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] @@ -3527,20 +3742,20 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "multiaddr" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", @@ -3551,7 +3766,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.7.2", "url", ] @@ -3577,7 +3792,7 @@ dependencies = [ "quickcheck", "rand 0.8.5", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -3586,16 +3801,16 @@ version = "0.13.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "rw-stream-sink", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", + "unsigned-varint 0.8.0", ] [[package]] @@ -3675,7 +3890,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ - "async-io", + "async-io 1.13.0", "bytes", "futures", "libc", @@ -3765,9 +3980,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -3802,9 +4017,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -3820,11 +4035,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -3841,7 +4056,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -3852,9 +4067,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", @@ -3862,6 +4077,104 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" +dependencies = [ + "async-trait", + "futures-core", + "http 0.2.9", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_api" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "ordered-float", + "percent-encoding", + "rand 0.8.5", + "regex", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -3918,7 +4231,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -3927,32 +4240,13 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "pem" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" -dependencies = [ - "base64 0.21.4", - "serde", -] - [[package]] name = "pem" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "serde", ] @@ -3967,41 +4261,35 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -4013,10 +4301,11 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" name = "ping-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4081,31 +4370,33 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", - "windows-sys", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] -name = "poly1305" -version = "0.7.2" +name = "polling" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "e53b6af1f60f36f8c2ac2aad5459d75a5a9b4be1e8cdd40264f315d78193e531" dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash 0.4.0", + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.31", + "tracing", + "windows-sys 0.48.0", ] [[package]] -name = "polyval" -version = "0.5.3" +name = "poly1305" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ - "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -4117,7 +4408,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -4159,31 +4450,20 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-warning" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.38", -] - [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "6f87c10af16e0af74010d2a123d202e8363c04db5acfa91d8747f64a8524da3a" dependencies = [ "dtoa", "itoa", @@ -4193,10 +4473,33 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.49", +] + +[[package]] +name = "prost" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", "proc-macro2", "quote", "syn 1.0.109", @@ -4219,13 +4522,16 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.3.1" dependencies = [ "asynchronous-codec", "bytes", + "criterion", + "futures", "quick-protobuf", + "quickcheck-ext", "thiserror", - "unsigned-varint", + "unsigned-varint 0.8.0", ] [[package]] @@ -4247,32 +4553,21 @@ dependencies = [ "quickcheck", ] -[[package]] -name = "quicksink" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.12", -] - [[package]] name = "quinn" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ - "async-io", + "async-io 1.13.0", "async-std", "bytes", "futures-io", - "pin-project-lite 0.2.12", + "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.7", + "rustls 0.21.9", "thiserror", "tokio", "tracing", @@ -4286,9 +4581,9 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", - "rustls 0.21.7", + "rustls 0.21.9", "slab", "thiserror", "tinyvec", @@ -4303,16 +4598,16 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.4", + "socket2 0.5.5", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -4376,7 +4671,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -4412,24 +4707,12 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" -dependencies = [ - "pem 1.1.1", - "ring", - "time", - "yasna", -] - -[[package]] -name = "rcgen" -version = "0.11.1" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 2.0.1", - "ring", + "pem", + "ring 0.16.20", "time", "x509-parser", "yasna", @@ -4447,7 +4730,7 @@ dependencies = [ "futures-util", "itoa", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", "ryu", "tokio", "tokio-util", @@ -4478,21 +4761,21 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.9.6" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick 1.0.2", "memchr", - "regex-automata 0.3.9", - "regex-syntax 0.7.5", + "regex-automata 0.4.4", + "regex-syntax 0.8.2", ] [[package]] @@ -4506,13 +4789,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.9" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", ] [[package]] @@ -4523,9 +4806,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relay-server-example" @@ -4534,9 +4817,10 @@ dependencies = [ "async-std", "async-trait", "clap", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4545,28 +4829,28 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", - "log", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.24", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", "hyper-tls", "ipnet", "js-sys", @@ -4575,10 +4859,12 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -4619,12 +4905,26 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom 0.2.12", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "ringbuf" version = "0.3.3" @@ -4698,9 +4998,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "8.0.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e7d90385b59f0a6bf3d3b757f3ca4ece2048265d70db20a2016043d4509a40" +checksum = "a82c0bbc10308ed323529fd3c1dce8badda635aa319a5ff0e6466f33b8101e3f" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -4709,23 +5009,23 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.0.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3d8c6fd84090ae348e63a84336b112b5c3918b3bf0493a581f7bd8ee623c29" +checksum = "6227c01b1783cdfee1bcf844eb44594cd16ec71c35305bf1c9fb5aade2735e16" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.38", + "syn 2.0.49", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "8.0.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "873feff8cb7bf86fdf0a71bb21c95159f4e4a37dd7a4bd1855a940909b583ada" +checksum = "8cb0a25bfbb2d4b4402179c2cf030387d9990857ce08a32592c6238db9fa8665" dependencies = [ "globset", "sha2 0.10.8", @@ -4764,29 +5064,29 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.37.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.3", - "windows-sys", + "linux-raw-sys 0.4.12", + "windows-sys 0.52.0", ] [[package]] @@ -4796,19 +5096,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring", + "ring 0.17.5", "rustls-webpki", "sct", ] @@ -4831,17 +5131,17 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", ] [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -4872,7 +5172,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -4890,7 +5190,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4911,8 +5211,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -4987,31 +5287,31 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.2.1", "itoa", "ryu", "serde", @@ -5035,7 +5335,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -5119,9 +5419,9 @@ dependencies = [ [[package]] name = "shellexpand" -version = "2.1.2" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" dependencies = [ "dirs", ] @@ -5166,9 +5466,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" [[package]] name = "smol" @@ -5179,12 +5479,12 @@ dependencies = [ "async-channel", "async-executor", "async-fs", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "async-net", "async-process", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -5198,16 +5498,16 @@ dependencies = [ [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.1", + "curve25519-dalek", "rand_core 0.6.4", - "ring", + "ring 0.17.5", "rustc_version", "sha2 0.10.8", "subtle", @@ -5225,12 +5525,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5254,6 +5554,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.7.2" @@ -5270,6 +5576,20 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stream-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "futures", + "libp2p", + "libp2p-stream", + "rand 0.8.5", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "stringmatch" version = "0.4.0" @@ -5291,12 +5611,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7beb1624a3ea34778d58d30e2b8606b4d29fe65e87c4d50b87ed30afd5c3830c" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "crc", "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -5332,9 +5652,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" dependencies = [ "proc-macro2", "quote", @@ -5361,9 +5681,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.29.10" +version = "0.29.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a18d114d420ada3a891e6bc8e96a2023402203296a47cdd65083377dad18ba5" +checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" dependencies = [ "cfg-if", "core-foundation-sys", @@ -5397,15 +5717,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix 0.38.4", - "windows-sys", + "fastrand 2.0.1", + "rustix 0.38.31", + "windows-sys 0.52.0", ] [[package]] @@ -5424,11 +5743,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf0fe180d5f1f7dd32bb5f1a8d19231bb63dc9bbb1985e1dbb6f07163b6a8578" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.21.7", "cookie", "fantoccini", "futures", - "http", + "http 0.2.9", "indexmap 1.9.3", "log", "parking_lot", @@ -5457,22 +5776,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.49" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.49" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -5539,9 +5858,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -5549,22 +5868,32 @@ dependencies = [ "mio", "num_cpus", "parking_lot", - "pin-project-lite 0.2.12", + "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] @@ -5589,27 +5918,56 @@ dependencies = [ ] [[package]] -name = "tokio-rustls" -version = "0.24.1" +name = "tokio-stream" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ - "rustls 0.21.7", + "futures-core", + "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-trait", + "axum 0.6.20", + "base64 0.21.7", + "bytes", + "futures-core", + "futures-util", + "h2 0.3.24", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", "tracing", ] @@ -5621,9 +5979,13 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", - "pin-project-lite 0.2.12", + "pin-project-lite", + "rand 0.8.5", + "slab", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5631,22 +5993,22 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "0da193277a4e2c33e59e09b5861580c33dd0a637c3883d0fa74ba40c0374af2e" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "bytes", - "futures-core", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "http-range-header", "httpdate", "mime", "mime_guess", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tokio-util", "tower-layer", @@ -5668,33 +6030,32 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", - "pin-project-lite 0.2.12", + "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -5712,79 +6073,59 @@ dependencies = [ ] [[package]] -name = "tracing-subscriber" -version = "0.3.17" +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "matchers", - "nu-ansi-term", + "log", "once_cell", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", "tracing-core", - "tracing-log", ] [[package]] -name = "trust-dns-proto" -version = "0.23.0" +name = "tracing-opentelemetry" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc775440033cb114085f6f2437682b194fa7546466024b1037e82a48a052a69" +checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" dependencies = [ - "async-trait", - "bytes", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "h2", - "http", - "idna", - "ipnet", "once_cell", - "rand 0.8.5", - "rustls 0.21.7", - "rustls-pemfile", - "rustls-webpki", + "opentelemetry", + "opentelemetry_sdk", "smallvec", - "socket2 0.5.4", - "thiserror", - "tinyvec", - "tokio", - "tokio-rustls 0.24.1", "tracing", - "url", - "webpki-roots", + "tracing-core", + "tracing-log 0.1.3", + "tracing-subscriber", ] [[package]] -name = "trust-dns-resolver" -version = "0.23.0" +name = "tracing-subscriber" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff7aed33ef3e8bf2c9966fccdfed93f93d46f432282ea875cd66faabc6ef2f" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", + "matchers", + "nu-ansi-term", "once_cell", - "parking_lot", - "rand 0.8.5", - "resolv-conf", - "rustls 0.21.7", + "regex", + "sharded-slab", "smallvec", - "thiserror", - "tokio", - "tokio-rustls 0.24.1", + "thread_local", "tracing", - "trust-dns-proto", - "webpki-roots", + "tracing-core", + "tracing-log 0.2.0", +] + +[[package]] +name = "tracing-wasm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" +dependencies = [ + "tracing", + "tracing-subscriber", + "wasm-bindgen", ] [[package]] @@ -5795,9 +6136,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.85" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196a58260a906cedb9bf6d8034b6379d0c11f552416960452f267402ceeddff1" +checksum = "9a9d3ba662913483d6722303f619e75ea10b7855b0f8e0d72799cf8621bb488f" dependencies = [ "basic-toml", "glob", @@ -5815,12 +6156,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58f4fcb97da0426e8146fe0e9b78cc13120161087256198701d12d9df77f7701" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.21.7", "futures", "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -5887,16 +6228,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "universal-hash" version = "0.5.1" @@ -5912,6 +6243,12 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" dependencies = [ "asynchronous-codec", "bytes", @@ -5923,6 +6260,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "upnp-example" version = "0.1.0" @@ -5930,19 +6273,26 @@ dependencies = [ "futures", "libp2p", "tokio", + "tracing-subscriber", ] [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" @@ -5955,7 +6305,7 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -6036,9 +6386,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6046,24 +6396,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" dependencies = [ "cfg-if", "js-sys", @@ -6073,9 +6423,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6083,28 +6433,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "wasm-bindgen-test" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671" +checksum = "143ddeb4f833e2ed0d252e618986e18bfc7b0e52f2d28d77d05b2f045dd8eb61" dependencies = [ "console_error_panic_hook", "js-sys", @@ -6116,12 +6466,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" +checksum = "a5211b7550606857312bba1d978a8ec75692eae187becc5e680444fffc5e6f89" dependencies = [ "proc-macro2", "quote", + "syn 2.0.49", ] [[package]] @@ -6137,9 +6488,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -6154,7 +6505,7 @@ dependencies = [ "base64 0.13.1", "bytes", "cookie", - "http", + "http 0.2.9", "log", "serde", "serde_derive", @@ -6170,8 +6521,8 @@ version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -6194,14 +6545,14 @@ dependencies = [ "interceptor", "lazy_static", "log", - "pem 3.0.2", + "pem", "rand 0.8.5", - "rcgen 0.11.1", + "rcgen", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", - "rustls 0.21.7", + "rustls 0.21.9", "sdp", "serde", "serde_json", @@ -6244,8 +6595,8 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32b140b953f986e97828aa33ec6318186b05d862bee689efbc57af04a243e832" dependencies = [ - "aes 0.8.3", - "aes-gcm 0.10.2", + "aes", + "aes-gcm", "async-trait", "bincode", "byteorder", @@ -6257,12 +6608,12 @@ dependencies = [ "log", "p256", "p384", - "pem 3.0.2", + "pem", "rand 0.8.5", "rand_core 0.6.4", - "rcgen 0.11.1", - "ring", - "rustls 0.21.7", + "rcgen", + "ring 0.16.20", + "rustls 0.21.9", "sec1", "serde", "sha1", @@ -6271,7 +6622,7 @@ dependencies = [ "thiserror", "tokio", "webrtc-util", - "x25519-dalek 2.0.0", + "x25519-dalek", "x509-parser", ] @@ -6306,7 +6657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" dependencies = [ "log", - "socket2 0.5.4", + "socket2 0.5.5", "thiserror", "tokio", "webrtc-util", @@ -6348,12 +6699,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1db1f36c1c81e4b1e531c0b9678ba0c93809e196ce62122d87259bb71c03b9f" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", - "aes-gcm 0.10.2", + "aead", + "aes", + "aes-gcm", "byteorder", "bytes", - "ctr 0.9.2", + "ctr", "hmac 0.12.1", "log", "rtcp", @@ -6391,7 +6742,7 @@ name = "webtransport-tests" version = "0.1.0" dependencies = [ "futures", - "getrandom 0.2.10", + "getrandom 0.2.12", "libp2p-core", "libp2p-identity", "libp2p-noise", @@ -6443,15 +6794,21 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +dependencies = [ + "windows-core", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows-targets 0.48.5", ] [[package]] @@ -6460,95 +6817,131 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winreg" @@ -6557,27 +6950,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", -] - -[[package]] -name = "x25519-dalek" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" -dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", - "zeroize", + "windows-sys 0.48.0", ] [[package]] name = "x25519-dalek" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek", "rand_core 0.6.4", "serde", "zeroize", @@ -6595,7 +6977,7 @@ dependencies = [ "lazy_static", "nom", "oid-registry", - "ring", + "ring 0.16.20", "rusticata-macros", "thiserror", "time", @@ -6618,11 +7000,27 @@ dependencies = [ [[package]] name = "yamux" -version = "0.12.0" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot", + "pin-project", + "rand 0.8.5", + "static_assertions", +] + +[[package]] +name = "yamux" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" +checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" dependencies = [ "futures", + "instant", "log", "nohash-hasher", "parking_lot", @@ -6640,11 +7038,31 @@ dependencies = [ "time", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.49", +] + [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -6657,5 +7075,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.49", ] diff --git a/Cargo.toml b/Cargo.toml index 55129a8ea965..3d7097f9a88b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,9 @@ members = [ "examples/ping", "examples/relay-server", "examples/rendezvous", + "examples/stream", "examples/upnp", + "hole-punching-tests", "identity", "interop-tests", "misc/allow-block-list", @@ -44,11 +46,11 @@ members = [ "protocols/relay", "protocols/rendezvous", "protocols/request-response", + "protocols/stream", "protocols/upnp", - "swarm", "swarm-derive", "swarm-test", - "transports/deflate", + "swarm", "transports/dns", "transports/noise", "transports/plaintext", @@ -57,9 +59,9 @@ members = [ "transports/tcp", "transports/tls", "transports/uds", - "transports/wasm-ext", - "transports/webrtc", "transports/webrtc-websys", + "transports/webrtc", + "transports/websocket-websys", "transports/websocket", "transports/webtransport-websys", "wasm-tests/webtransport-tests", @@ -67,62 +69,65 @@ members = [ resolver = "2" [workspace.package] -rust-version = "1.65.0" +rust-version = "1.73.0" [workspace.dependencies] -futures-bounded = { version = "0.1.0", path = "misc/futures-bounded" } -libp2p = { version = "0.52.3", path = "libp2p" } -libp2p-allow-block-list = { version = "0.2.0", path = "misc/allow-block-list" } -libp2p-autonat = { version = "0.11.0", path = "protocols/autonat" } -libp2p-connection-limits = { version = "0.2.1", path = "misc/connection-limits" } -libp2p-core = { version = "0.40.1", path = "core" } -libp2p-dcutr = { version = "0.10.0", path = "protocols/dcutr" } -libp2p-deflate = { version = "0.40.1", path = "transports/deflate" } -libp2p-dns = { version = "0.40.1", path = "transports/dns" } -libp2p-floodsub = { version = "0.43.0", path = "protocols/floodsub" } -libp2p-gossipsub = { version = "0.45.1", path = "protocols/gossipsub" } -libp2p-identify = { version = "0.43.1", path = "protocols/identify" } -libp2p-identity = { version = "0.2.5" } -libp2p-kad = { version = "0.44.6", path = "protocols/kad" } -libp2p-mdns = { version = "0.44.0", path = "protocols/mdns" } -libp2p-memory-connection-limits = { version = "0.1.0", path = "misc/memory-connection-limits" } -libp2p-metrics = { version = "0.13.1", path = "misc/metrics" } -libp2p-mplex = { version = "0.40.0", path = "muxers/mplex" } +asynchronous-codec = { version = "0.7.0" } +futures-bounded = { version = "0.2.3", path = "misc/futures-bounded" } +libp2p = { version = "0.53.2", path = "libp2p" } +libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } +libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } +libp2p-connection-limits = { version = "0.3.1", path = "misc/connection-limits" } +libp2p-core = { version = "0.41.2", path = "core" } +libp2p-dcutr = { version = "0.11.0", path = "protocols/dcutr" } +libp2p-dns = { version = "0.41.1", path = "transports/dns" } +libp2p-floodsub = { version = "0.44.0", path = "protocols/floodsub" } +libp2p-gossipsub = { version = "0.46.1", path = "protocols/gossipsub" } +libp2p-identify = { version = "0.44.2", path = "protocols/identify" } +libp2p-identity = { version = "0.2.8" } +libp2p-kad = { version = "0.45.4", path = "protocols/kad" } +libp2p-mdns = { version = "0.45.1", path = "protocols/mdns" } +libp2p-memory-connection-limits = { version = "0.2.0", path = "misc/memory-connection-limits" } +libp2p-metrics = { version = "0.14.1", path = "misc/metrics" } +libp2p-mplex = { version = "0.41.0", path = "muxers/mplex" } libp2p-muxer-test-harness = { path = "muxers/test-harness" } -libp2p-noise = { version = "0.43.1", path = "transports/noise" } -libp2p-perf = { version = "0.2.0", path = "protocols/perf" } -libp2p-ping = { version = "0.43.1", path = "protocols/ping" } -libp2p-plaintext = { version = "0.40.1", path = "transports/plaintext" } -libp2p-pnet = { version = "0.23.0", path = "transports/pnet" } -libp2p-quic = { version = "0.9.2", path = "transports/quic" } -libp2p-relay = { version = "0.16.1", path = "protocols/relay" } -libp2p-rendezvous = { version = "0.13.0", path = "protocols/rendezvous" } -libp2p-upnp = { version = "0.1.1", path = "protocols/upnp" } -libp2p-request-response = { version = "0.25.1", path = "protocols/request-response" } -libp2p-server = { version = "0.12.3", path = "misc/server" } -libp2p-swarm = { version = "0.43.5", path = "swarm" } -libp2p-swarm-derive = { version = "0.33.0", path = "swarm-derive" } -libp2p-swarm-test = { version = "0.2.0", path = "swarm-test" } -libp2p-tcp = { version = "0.40.0", path = "transports/tcp" } -libp2p-tls = { version = "0.2.1", path = "transports/tls" } -libp2p-uds = { version = "0.39.0", path = "transports/uds" } -libp2p-wasm-ext = { version = "0.40.0", path = "transports/wasm-ext" } -libp2p-webrtc = { version = "0.6.1-alpha", path = "transports/webrtc" } -libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" } -libp2p-webrtc-websys = { version = "0.1.0-alpha", path = "transports/webrtc-websys" } -libp2p-websocket = { version = "0.42.1", path = "transports/websocket" } -libp2p-webtransport-websys = { version = "0.1.0", path = "transports/webtransport-websys" } -libp2p-yamux = { version = "0.44.1", path = "muxers/yamux" } +libp2p-noise = { version = "0.44.0", path = "transports/noise" } +libp2p-perf = { version = "0.3.0", path = "protocols/perf" } +libp2p-ping = { version = "0.44.0", path = "protocols/ping" } +libp2p-plaintext = { version = "0.41.0", path = "transports/plaintext" } +libp2p-pnet = { version = "0.24.0", path = "transports/pnet" } +libp2p-quic = { version = "0.10.2", path = "transports/quic" } +libp2p-relay = { version = "0.17.1", path = "protocols/relay" } +libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } +libp2p-request-response = { version = "0.26.2", path = "protocols/request-response" } +libp2p-server = { version = "0.12.6", path = "misc/server" } +libp2p-stream = { version = "0.1.0-alpha", path = "protocols/stream" } +libp2p-swarm = { version = "0.44.2", path = "swarm" } +libp2p-swarm-derive = { version = "=0.34.3", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. +libp2p-swarm-test = { version = "0.3.0", path = "swarm-test" } +libp2p-tcp = { version = "0.41.0", path = "transports/tcp" } +libp2p-tls = { version = "0.3.0", path = "transports/tls" } +libp2p-uds = { version = "0.40.0", path = "transports/uds" } +libp2p-upnp = { version = "0.2.1", path = "protocols/upnp" } +libp2p-webrtc = { version = "0.7.1-alpha", path = "transports/webrtc" } +libp2p-webrtc-utils = { version = "0.2.0", path = "misc/webrtc-utils" } +libp2p-webrtc-websys = { version = "0.3.0-alpha", path = "transports/webrtc-websys" } +libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } +libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } +libp2p-webtransport-websys = { version = "0.2.0", path = "transports/webtransport-websys" } +libp2p-yamux = { version = "0.45.1", path = "muxers/yamux" } +multiaddr = "0.18.1" +multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } -quick-protobuf-codec = { version = "0.2.0", path = "misc/quick-protobuf-codec" } +prometheus-client = "0.22.1" +quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } -multiaddr = "0.18.0" -multihash = "0.19.1" +unsigned-varint = { version = "0.8.0" } [patch.crates-io] -# Patch away `libp2p-idnentity` in our dependency tree with the workspace version. +# Patch away `libp2p-identity` in our dependency tree with the workspace version. # `libp2p-identity` is a leaf dependency and used within `rust-multiaddr` which is **not** part of the workspace. # As a result, we cannot just reference the workspace version in our crates because the types would mismatch with what # we import via `rust-multiaddr`. @@ -134,3 +139,9 @@ rust.unreachable_pub = "warn" clippy.used_underscore_binding = "warn" clippy.pedantic = "allow" clippy.type_complexity = "allow" +clippy.unnecessary_wraps = "warn" +clippy.manual_let_else = "warn" +clippy.dbg_macro = "warn" + +[workspace.metadata.release] +pre-release-hook = ["/bin/sh", '-c', '/bin/sh $WORKSPACE_ROOT/scripts/add-changelog-header.sh'] # Nested use of shell to expand variables. diff --git a/README.md b/README.md index 6bd528fc6800..48fa976635ae 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,8 @@ This repository is the central place for Rust development of the [libp2p](https: many protocols in this repository. - For **security related issues** please [file a private security vulnerability - report](https://github.com/libp2p/rust-libp2p/security/advisories/new) - or reach out to [security@libp2p.io](mailto:security@libp2p.io). Please do not - file a public issue on GitHub. + report](https://github.com/libp2p/rust-libp2p/security/advisories/new) . Please do not file a + public issue on GitHub. - To **report bugs, suggest improvements or request new features** please open a GitHub issue on this repository. @@ -42,7 +41,7 @@ The main components of this repository are structured as follows: * `transports/`: Implementations of transport protocols (e.g. TCP) and protocol upgrades (e.g. for authenticated encryption, compression, ...) based on the `libp2p-core` `Transport` - API . + API. * `muxers/`: Implementations of the `StreamMuxer` interface of `libp2p-core`, e.g. (sub)stream multiplexing protocols on top of (typically TCP) connections. @@ -91,8 +90,8 @@ Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - [Forest](https://github.com/ChainSafe/forest) - An implementation of Filecoin written in Rust. - [fuel-core](https://github.com/FuelLabs/fuel-core) - A Rust implementation of the Fuel protocol. - [HotShot](https://github.com/EspressoSystems/HotShot) - Decentralized sequencer in Rust developed by [Espresso Systems](https://www.espressosys.com/). -- [ipfs-embed](https://github.com/ipfs-rust/ipfs-embed) - A small embeddable ipfs implementation -used and maintained by [Actyx](https://www.actyx.com). +- [ipfs-embed](https://github.com/ipfs-rust/ipfs-embed) - A small embeddable ipfs implementation used and maintained by [Actyx](https://www.actyx.com). +- [Homestar](https://github.com/ipvm-wg/homestar) - An InterPlanetary Virtual Machine (IPVM) implementation used and maintained by Fission. - [beetle](https://github.com/n0-computer/beetle) - Next-generation implementation of IPFS for Cloud & Mobile platforms. - [Lighthouse](https://github.com/sigp/lighthouse) - Ethereum consensus client in Rust. - [Locutus](https://github.com/freenet/locutus) - Global, observable, decentralized key-value store. @@ -103,4 +102,4 @@ used and maintained by [Actyx](https://www.actyx.com). - [Subspace](https://github.com/subspace/subspace) - Subspace Network reference implementation - [Substrate](https://github.com/paritytech/substrate) - Framework for blockchain innovation, used by [Polkadot](https://www.parity.io/technologies/polkadot/). -- [Taple](https://github.com/opencanarias/taple-core) - Sustainable DLT for asset and process traceability by [OpenCanarias](https://www.opencanarias.com/en/). \ No newline at end of file +- [Taple](https://github.com/opencanarias/taple-core) - Sustainable DLT for asset and process traceability by [OpenCanarias](https://www.opencanarias.com/en/). diff --git a/SECURITY.md b/SECURITY.md index 0e5a3f2e55f9..f3ae83405e85 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -7,5 +7,3 @@ By default we provide security patches for the latest released version only. On ## Reporting a Vulnerability Please do not file a public issue on GitHub. Instead, please [file a private security vulnerability report](https://github.com/libp2p/rust-libp2p/security/advisories/new). - -If you need further assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io). diff --git a/clippy.toml b/clippy.toml index f66cc0ac2dac..fd38ead02022 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,4 @@ disallowed-methods = [ { path = "futures::channel::mpsc::unbounded", reason = "does not enforce backpressure" }, ] +avoid-breaking-exported-api = false diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index ea7bc2bd6a06..a7cd7fd46b45 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,23 @@ +## 0.41.2 + +- Implement `std::fmt::Display` on `ListenerId`. + See [PR 4936](https://github.com/libp2p/rust-libp2p/pull/4936). + +## 0.41.1 + +- Implement `{In,Out}boundConnectionUpgrade` for `SelectUpgrade`. + See [PR 4812](https://github.com/libp2p/rust-libp2p/pull/4812). + +## 0.41.0 + +- Remove blanket-impl of `{In,Out}boundUpgrade` for `{In,Out}boundConnectionUpgrade`. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). +- Remove deprecated functions from `ListenerId`. + See [PR 4736](https://github.com/libp2p/rust-libp2p/pull/4736). +- Remove `upgrade::transfer` module. + See [issue 4011](https://github.com/libp2p/rust-libp2p/issues/4011) for details. + See [PR 4788](https://github.com/libp2p/rust-libp2p/pull/4788). + ## 0.40.1 - Implement `Debug` for `StreamMuxerEvent`. diff --git a/core/Cargo.toml b/core/Cargo.toml index 15288a48d1cb..619cd357744d 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.40.1" +version = "0.41.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,24 +13,24 @@ categories = ["network-programming", "asynchronous"] [dependencies] either = "1.9" fnv = "1.0" -futures = { version = "0.3.28", features = ["executor", "thread-pool"] } +futures = { version = "0.3.30", features = ["executor", "thread-pool"] } futures-timer = "3" instant = "0.1.12" libp2p-identity = { workspace = true, features = ["peerid", "ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } multistream-select = { workspace = true } -once_cell = "1.18.0" +once_cell = "1.19.0" parking_lot = "0.12.0" -pin-project = "1.1.3" +pin-project = "1.1.4" quick-protobuf = "0.8" rand = "0.8" rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } -smallvec = "1.11.1" +smallvec = "1.12.0" thiserror = "1.0" -unsigned-varint = "0.7" +tracing = "0.1.37" +unsigned-varint = { workspace = true } void = "1" [dev-dependencies] @@ -39,7 +39,7 @@ libp2p-mplex = { path = "../muxers/mplex" } # Using `path` he libp2p-noise = { path = "../transports/noise" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. multihash = { workspace = true, features = ["arb"] } quickcheck = { workspace = true } -libp2p-identity = { workspace = true, features = ["ed25519"] } +libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } [features] serde = ["multihash/serde-codec", "dep:serde", "libp2p-identity/serde"] diff --git a/core/src/transport.rs b/core/src/transport.rs index 9aeddb72ad08..22e7a0532fa9 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -250,22 +250,15 @@ pub trait Transport { pub struct ListenerId(usize); impl ListenerId { - #[deprecated(note = "Renamed to ` ListenerId::next`.")] - #[allow(clippy::new_without_default)] - /// Creates a new `ListenerId`. - pub fn new() -> Self { - ListenerId::next() - } - /// Creates a new `ListenerId`. pub fn next() -> Self { ListenerId(NEXT_LISTENER_ID.fetch_add(1, Ordering::SeqCst)) } +} - #[deprecated(note = "Use ` ListenerId::next` instead.")] - #[allow(clippy::should_implement_trait)] - pub fn default() -> Self { - Self::next() +impl std::fmt::Display for ListenerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) } } @@ -414,16 +407,16 @@ impl TransportEvent { /// Returns `None` if the event is not actually an incoming connection, /// otherwise the upgrade and the remote address. pub fn into_incoming(self) -> Option<(TUpgr, Multiaddr)> { - if let TransportEvent::Incoming { + let TransportEvent::Incoming { upgrade, send_back_addr, .. } = self - { - Some((upgrade, send_back_addr)) - } else { - None - } + else { + return None; + }; + + Some((upgrade, send_back_addr)) } /// Returns `true` if this is a [`TransportEvent::NewAddress`]. diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 8d3bfdecb794..aa3acfc32310 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -22,7 +22,6 @@ use crate::either::EitherFuture; use crate::transport::{ListenerId, Transport, TransportError, TransportEvent}; use either::Either; use futures::future; -use log::{debug, trace}; use multiaddr::Multiaddr; use std::{pin::Pin, task::Context, task::Poll}; @@ -52,16 +51,16 @@ where id: ListenerId, addr: Multiaddr, ) -> Result<(), TransportError> { - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.0.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -69,16 +68,16 @@ where res => return res.map_err(|err| err.map(Either::Left)), }; - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.1.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -94,17 +93,17 @@ where } fn dial(&mut self, addr: Multiaddr) -> Result> { - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.0.dial(addr) { Ok(connec) => return Ok(EitherFuture::First(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr @@ -114,17 +113,17 @@ where } }; - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.1.dial(addr) { Ok(connec) => return Ok(EitherFuture::Second(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 4f1fe8ab7947..0671b0e99845 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -22,7 +22,6 @@ use crate::{ multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, }; -use log::debug; use std::{ pin::Pin, task::{Context, Poll}, @@ -292,20 +291,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } @@ -318,20 +317,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 4c30ee9b65d1..bf88215dd43b 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -62,9 +62,8 @@ impl Hub { port } else { loop { - let port = match NonZeroU64::new(rand::random()) { - Some(p) => p, - None => continue, + let Some(port) = NonZeroU64::new(rand::random()) else { + continue; }; if !hub.contains_key(&port) { break port; @@ -184,16 +183,12 @@ impl Transport for MemoryTransport { id: ListenerId, addr: Multiaddr, ) -> Result<(), TransportError> { - let port = if let Ok(port) = parse_memory_addr(&addr) { - port - } else { - return Err(TransportError::MultiaddrNotSupported(addr)); - }; + let port = + parse_memory_addr(&addr).map_err(|_| TransportError::MultiaddrNotSupported(addr))?; - let (rx, port) = match HUB.register_port(port) { - Some((rx, port)) => (rx, port), - None => return Err(TransportError::Other(MemoryTransportError::Unreachable)), - }; + let (rx, port) = HUB + .register_port(port) + .ok_or(TransportError::Other(MemoryTransportError::Unreachable))?; let listener = Listener { id, diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 7db1853b56ca..69561fbebd8e 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -64,7 +64,6 @@ mod error; mod pending; mod ready; mod select; -mod transfer; pub(crate) use apply::{ apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply, @@ -73,11 +72,7 @@ pub(crate) use error::UpgradeError; use futures::future::Future; pub use self::{ - denied::DeniedUpgrade, - pending::PendingUpgrade, - ready::ReadyUpgrade, - select::SelectUpgrade, - transfer::{read_length_prefixed, read_varint, write_length_prefixed, write_varint}, + denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, }; pub use crate::Negotiated; pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; @@ -157,31 +152,3 @@ pub trait OutboundConnectionUpgrade: UpgradeInfo { /// The `info` is the identifier of the protocol, as produced by `protocol_info`. fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future; } - -// Blanket implementation for InboundConnectionUpgrade based on InboundUpgrade for backwards compatibility -impl InboundConnectionUpgrade for U -where - U: InboundUpgrade, -{ - type Output = >::Output; - type Error = >::Error; - type Future = >::Future; - - fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future { - self.upgrade_inbound(socket, info) - } -} - -// Blanket implementation for OutboundConnectionUpgrade based on OutboundUpgrade for backwards compatibility -impl OutboundConnectionUpgrade for U -where - U: OutboundUpgrade, -{ - type Output = >::Output; - type Error = >::Error; - type Future = >::Future; - - fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future { - self.upgrade_outbound(socket, info) - } -} diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index aefce686f013..15cb0348cf35 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -21,7 +21,6 @@ use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; use crate::{connection::ConnectedPoint, Negotiated}; use futures::{future::Either, prelude::*}; -use log::debug; use multistream_select::{self, DialerSelectFuture, ListenerSelectFuture}; use std::{mem, pin::Pin, task::Context, task::Poll}; @@ -141,11 +140,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded inbound stream to {name}"); + tracing::trace!(upgrade=%name, "Upgraded inbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade inbound stream to {name}"); + tracing::debug!(upgrade=%name, "Failed to upgrade inbound stream"); return Poll::Ready(Err(UpgradeError::Apply(e))); } } @@ -223,11 +222,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded outbound stream to {name}",); + tracing::trace!(upgrade=%name, "Upgraded outbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade outbound stream to {name}",); + tracing::debug!(upgrade=%name, "Failed to upgrade outbound stream",); return Poll::Ready(Err(UpgradeError::Apply(e))); } } diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 19b8b7a93f7d..037045a2f29a 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -19,7 +19,10 @@ // DEALINGS IN THE SOFTWARE. use crate::either::EitherFuture; -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use crate::upgrade::{ + InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, + UpgradeInfo, +}; use either::Either; use futures::future; use std::iter::{Chain, Map}; @@ -84,6 +87,23 @@ where } } +impl InboundConnectionUpgrade for SelectUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + } +} + impl OutboundUpgrade for SelectUpgrade where A: OutboundUpgrade, @@ -100,3 +120,20 @@ where } } } + +impl OutboundConnectionUpgrade for SelectUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + } +} diff --git a/core/src/upgrade/transfer.rs b/core/src/upgrade/transfer.rs deleted file mode 100644 index 93aeb987c8a9..000000000000 --- a/core/src/upgrade/transfer.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Contains some helper futures for creating upgrades. - -use futures::prelude::*; -use std::io; - -// TODO: these methods could be on an Ext trait to AsyncWrite - -/// Writes a message to the given socket with a length prefix appended to it. Also flushes the socket. -/// -/// > **Note**: Prepends a variable-length prefix indicate the length of the message. This is -/// > compatible with what [`read_length_prefixed`] expects. -pub async fn write_length_prefixed( - socket: &mut (impl AsyncWrite + Unpin), - data: impl AsRef<[u8]>, -) -> Result<(), io::Error> { - write_varint(socket, data.as_ref().len()).await?; - socket.write_all(data.as_ref()).await?; - socket.flush().await?; - - Ok(()) -} - -/// Writes a variable-length integer to the `socket`. -/// -/// > **Note**: Does **NOT** flush the socket. -pub async fn write_varint( - socket: &mut (impl AsyncWrite + Unpin), - len: usize, -) -> Result<(), io::Error> { - let mut len_data = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(len, &mut len_data).len(); - socket.write_all(&len_data[..encoded_len]).await?; - - Ok(()) -} - -/// Reads a variable-length integer from the `socket`. -/// -/// As a special exception, if the `socket` is empty and EOFs right at the beginning, then we -/// return `Ok(0)`. -/// -/// > **Note**: This function reads bytes one by one from the `socket`. It is therefore encouraged -/// > to use some sort of buffering mechanism. -pub async fn read_varint(socket: &mut (impl AsyncRead + Unpin)) -> Result { - let mut buffer = unsigned_varint::encode::usize_buffer(); - let mut buffer_len = 0; - - loop { - match socket.read(&mut buffer[buffer_len..buffer_len + 1]).await? { - 0 => { - // Reaching EOF before finishing to read the length is an error, unless the EOF is - // at the very beginning of the substream, in which case we assume that the data is - // empty. - if buffer_len == 0 { - return Ok(0); - } else { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - } - n => debug_assert_eq!(n, 1), - } - - buffer_len += 1; - - match unsigned_varint::decode::usize(&buffer[..buffer_len]) { - Ok((len, _)) => return Ok(len), - Err(unsigned_varint::decode::Error::Overflow) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "overflow in variable-length integer", - )); - } - // TODO: why do we have a `__Nonexhaustive` variant in the error? I don't know how to process it - // Err(unsigned_varint::decode::Error::Insufficient) => {} - Err(_) => {} - } - } -} - -/// Reads a length-prefixed message from the given socket. -/// -/// The `max_size` parameter is the maximum size in bytes of the message that we accept. This is -/// necessary in order to avoid DoS attacks where the remote sends us a message of several -/// gigabytes. -/// -/// > **Note**: Assumes that a variable-length prefix indicates the length of the message. This is -/// > compatible with what [`write_length_prefixed`] does. -pub async fn read_length_prefixed( - socket: &mut (impl AsyncRead + Unpin), - max_size: usize, -) -> io::Result> { - let len = read_varint(socket).await?; - if len > max_size { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Received data size ({len} bytes) exceeds maximum ({max_size} bytes)"), - )); - } - - let mut buf = vec![0; len]; - socket.read_exact(&mut buf).await?; - - Ok(buf) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn write_length_prefixed_works() { - let data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - let mut out = vec![0; 10_000]; - - futures::executor::block_on(async { - let mut socket = futures::io::Cursor::new(&mut out[..]); - - write_length_prefixed(&mut socket, &data).await.unwrap(); - socket.close().await.unwrap(); - }); - - let (out_len, out_data) = unsigned_varint::decode::usize(&out).unwrap(); - assert_eq!(out_len, data.len()); - assert_eq!(&out_data[..out_len], &data[..]); - } - - // TODO: rewrite these tests - /* - #[test] - fn read_one_works() { - let original_data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - - let mut len_buf = unsigned_varint::encode::usize_buffer(); - let len_buf = unsigned_varint::encode::usize(original_data.len(), &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend_from_slice(&original_data); - - let future = read_one_then(Cursor::new(in_buffer), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert_eq!(out, original_data); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_zero_len() { - let future = read_one_then(Cursor::new(vec![0]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_checks_length() { - let mut len_buf = unsigned_varint::encode::u64_buffer(); - let len_buf = unsigned_varint::encode::u64(5_000, &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend((0..5000).map(|_| 0)); - - let future = read_one_then(Cursor::new(in_buffer), 100, (), move |_, ()| -> Result<_, ReadOneError> { - Ok(()) - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::TooLarge { .. }) => (), - _ => panic!(), - } - } - - #[test] - fn read_one_accepts_empty() { - let future = read_one_then(Cursor::new([]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_eof_before_len() { - let future = read_one_then(Cursor::new([0x80]), 10_000, (), move |_, ()| -> Result<(), ReadOneError> { - unreachable!() - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::Io(ref err)) if err.kind() == io::ErrorKind::UnexpectedEof => (), - _ => panic!() - } - }*/ -} diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index 193ee73cbc86..a88720516185 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -20,7 +20,9 @@ use futures::prelude::*; use libp2p_core::transport::{ListenerId, MemoryTransport, Transport}; -use libp2p_core::upgrade::{self, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{ + self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_mplex::MplexConfig; use libp2p_noise as noise; @@ -40,7 +42,7 @@ impl UpgradeInfo for HelloUpgrade { } } -impl InboundUpgrade for HelloUpgrade +impl InboundConnectionUpgrade for HelloUpgrade where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -58,7 +60,7 @@ where } } -impl OutboundUpgrade for HelloUpgrade +impl OutboundConnectionUpgrade for HelloUpgrade where C: AsyncWrite + AsyncRead + Send + Unpin + 'static, { @@ -108,11 +110,11 @@ fn upgrade_pipeline() { let server = async move { loop { - let (upgrade, _send_back_addr) = - match listener_transport.select_next_some().await.into_incoming() { - Some(u) => u, - None => continue, - }; + let Some((upgrade, _send_back_addr)) = + listener_transport.select_next_some().await.into_incoming() + else { + continue; + }; let (peer, _mplex) = upgrade.await.unwrap(); assert_eq!(peer, dialer_id); } diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md index aef8dd6986aa..bacbfe9509e4 100644 --- a/docs/coding-guidelines.md +++ b/docs/coding-guidelines.md @@ -28,7 +28,7 @@ Below is a set of coding guidelines followed across the rust-libp2p code base. ## Hierarchical State Machines -If you sqint, rust-libp2p is just a big hierarchy of [state +If you squint, rust-libp2p is just a big hierarchy of [state machines](https://en.wikipedia.org/wiki/Finite-state_machine) where parents pass events down to their children and children pass events up to their parents. @@ -167,7 +167,7 @@ impl Stream for SomeStateMachine { } ``` -This priotization provides: +This prioritization provides: - Low memory footprint as local queues (here `events_to_return_to_parent`) stay small. - Low latency as accepted local work is not stuck in queues. - DOS defense as a remote does not control the size of the local queue, nor starves local work with its remote work. @@ -195,7 +195,7 @@ through a side-channel. ### Local queues As for channels shared across potentially concurrent actors (e.g. future tasks -or OS threads), the same applies for queues owned by a single actor only. E.g. +or OS threads), the same applies to queues owned by a single actor only. E.g. reading events from a socket into a `Vec` without some mechanism bounding the size of that `Vec` again can lead to unbounded memory growth and high latencies. @@ -241,7 +241,7 @@ shows a speed up when running it concurrently. ## Use `async/await` for sequential execution only Using `async/await` for sequential execution makes things significantly simpler. -Though unfortunately using `async/await` does not allow accesing methods on the +Though unfortunately using `async/await` does not allow accessing methods on the object being `await`ed unless paired with some synchronization mechanism like an `Arc>`. @@ -308,7 +308,7 @@ response and a previous request. For example, if a user requests two new connect peer, they should be able to match each new connection to the corresponding previous connection request without having to guess. -When accepting a **command** that eventually results in a response through an event require that +When accepting a **command** that eventually results in a response through an event requires that command to contain a unique ID, which is later on contained in the asynchronous response event. One such example is the `Swarm` accepting a `ToSwarm::Dial` from the `NetworkBehaviour`. diff --git a/docs/maintainer-handbook.md b/docs/maintainer-handbook.md index 8ec6e6e8c6ad..6d36f6fe77c0 100644 --- a/docs/maintainer-handbook.md +++ b/docs/maintainer-handbook.md @@ -26,6 +26,14 @@ Once a PR fulfills all merge requirements (approvals, passing CI, etc), applying In case of a trivial code change, maintainers may choose to apply the `trivial` label. This will have mergify approve your PR, thus fulfilling all requirements to automatically queue a PR for merging. +## Changelog entries + +Our CI checks that each crate which is modified gets a changelog entry. +Whilst this is a good default safety-wise, it creates a lot of false-positives for changes that are internal and don't need a changelog entry. + +For PRs that in the categories `chore`, `deps`, `refactor` and `docs`, this check is disabled automatically. +Any other PR needs to explicitly disable this check if desired by applying the `internal-change` label. + ## Dependencies We version our `Cargo.lock` file for better visibility into which dependencies are required for a functional build. diff --git a/docs/release.md b/docs/release.md index 5b4d32aedafc..50b7b0605c7d 100644 --- a/docs/release.md +++ b/docs/release.md @@ -17,43 +17,34 @@ Non-breaking changes are typically merged very quickly and often released as pat Every crate that we publish on `crates.io` has a `CHANGELOG.md` file. Substantial PRs should add an entry to each crate they modify. -The next unreleased version is tagged with ` - unreleased`, for example: `0.17.0 - unreleased`. +We have a CI check[^1] that enforces adding a changelog entry if you modify code in a particular crate. +In case the current version is already released (we also check that in CI), you'll have to add a new header at the top. +For example, the top-listed version might be `0.17.3` but it is already released. +In that case, add a new heading `## 0.17.4` with your changelog entry in case it is a non-breaking change. -In case there isn't a version with an ` - unreleased` postfix yet, add one for the next version. -The next version number depends on the impact of your change (breaking vs non-breaking, see above). - -If you are making a non-breaking change, please also bump the version number: - -- in the `Cargo.toml` manifest of the respective crate -- in the `[workspace.dependencies]` section of the workspace `Cargo.toml` manifest - -For breaking changes, a changelog entry itself is sufficient. -Bumping the version in the `Cargo.toml` file would lead to many merge conflicts once we decide to merge them. -Hence, we are going to bump those versions once we work through the milestone that collects the breaking changes. +The version in the crate's `Cargo.toml` and the top-most version in the `CHANGELOG.md` file always have to be in sync. +Additionally, we also enforce that all crates always depend on the latest version of other workspace-crates through workspace inheritance. +As a consequence, you'll also have to bump the version in `[workspace.dependencies]` in the workspace `Cargo.toml` manifest. ## Releasing one or more crates +The above changelog-management strategy means `master` is always in a state where we can make a release. + ### Prerequisites - [cargo release](https://github.com/crate-ci/cargo-release/) ### Steps -1. Remove the ` - unreleased` tag for each crate to be released in the respective `CHANGELOG.md`. - Create a pull request with the changes against the rust-libp2p `master` branch. - -2. Once merged, run the two commands below on the (squash-) merged commit on the `master` branch. +1. Run the two commands below on the (squash-) merged commit on the `master` branch. 1. `cargo release publish --execute` 2. `cargo release tag --sign-tag --execute` -3. Confirm that `cargo release` tagged the commit correctly via `git push - $YOUR_ORIGIN --tag --dry-run` and then push the new tags via `git push - $YOUR_ORIGIN --tag`. Make sure not to push unrelated git tags. - - Note that dropping the `--no-push` flag on `cargo release` might as well do - the trick. +2. Confirm that `cargo release` tagged the commit correctly via `git push $YOUR_ORIGIN --tag --dry-run` + Push the new tags via `git push $YOUR_ORIGIN --tag`. + Make sure not to push unrelated git tags. ## Patch release @@ -65,15 +56,17 @@ Hence, we are going to bump those versions once we work through the milestone th ## Dealing with alphas -Unfortunately, `cargo` has a rather uninutitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. +Unfortunately, `cargo` has a rather unintuitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. See this internals thread for some context: https://internals.rust-lang.org/t/changing-cargo-semver-compatibility-for-pre-releases In short, cargo will automatically update from `0.1.0-alpha.1` to `0.1.0-alpha.2` UNLESS you pin the version directly with `=0.1.0-alpha.1`. However, from a semver perspective, changes between pre-releases can be breaking. -To avoid accidential breaking changes for our users, we employ the following convention for alpha releases: +To avoid accidental breaking changes for our users, we employ the following convention for alpha releases: - For a breaking change in a crate with an alpha release, bump the "minor" version but retain the "alpha" tag. Example: `0.1.0-alpha` to `0.2.0-alpha`. - For a non-breaking change in a crate with an alpha release, bump or append number to the "alpha" tag. Example: `0.1.0-alpha` to `0.1.0-alpha.1`. + +[^1]: See [ci.yml](../.github/workflows/ci.yml) and look for "Ensure manifest and CHANGELOG are properly updated". diff --git a/examples/README.md b/examples/README.md index 28e085587b7d..0a3e55aed39a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -20,6 +20,6 @@ A set of examples showcasing how to use rust-libp2p. - [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. -- [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. +- [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../libp2p/src/tutorials/ping.rs) for a step-by-step guide building the example. - [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 332f77f04a7f..642e621c2575 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -5,12 +5,16 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -tokio = { version = "1.32", features = ["full"] } -clap = { version = "4.3.23", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" +tokio = { version = "1.36", features = ["full"] } +clap = { version = "4.4.16", features = ["derive"] } +futures = "0.3.30" libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index eeb39ec52dea..3fb25aa62221 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -21,14 +21,15 @@ #![doc = include_str!("../../README.md")] use clap::Parser; -use futures::prelude::*; +use futures::StreamExt; use libp2p::core::multiaddr::Protocol; -use libp2p::core::{upgrade::Version, Multiaddr, Transport}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use libp2p::core::Multiaddr; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; use std::error::Error; use std::net::Ipv4Addr; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -45,22 +46,23 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); - let transport = tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = Behaviour::new(local_key.public()); - - let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 065708f745f5..44a53f0d17fa 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -21,12 +21,14 @@ #![doc = include_str!("../../README.md")] use clap::Parser; -use futures::prelude::*; -use libp2p::core::{multiaddr::Protocol, upgrade::Version, Multiaddr, Transport}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; +use futures::StreamExt; +use libp2p::core::{multiaddr::Protocol, Multiaddr}; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; +use libp2p::{autonat, identify, identity, noise, tcp, yamux}; use std::error::Error; use std::net::Ipv4Addr; +use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -37,22 +39,23 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); - let transport = tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = Behaviour::new(local_key.public()); - - let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index f6b47d4ded9f..79068cdca877 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -9,34 +9,37 @@ repository = "https://github.com/libp2p/rust-libp2p" rust-version = { workspace = true } version = "0.1.0" +[package.metadata.release] +release = false + [lib] crate-type = ["cdylib"] [dependencies] -anyhow = "1.0.72" -env_logger = "0.10" -futures = "0.3.28" -log = "0.4" +anyhow = "1.0.79" +futures = "0.3.30" rand = "0.8" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -axum = "0.6.19" -libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen", "tokio"] } +axum = "0.7.4" +libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "tokio"] } libp2p-webrtc = { workspace = true, features = ["tokio"] } -rust-embed = { version = "8.0.0", features = ["include-exclude", "interpolate-folder-path"] } -tokio = { version = "1.29", features = ["macros", "net", "rt", "signal"] } +rust-embed = { version = "8.2.0", features = ["include-exclude", "interpolate-folder-path"] } +tokio = { version = "1.36", features = ["macros", "net", "rt", "signal"] } tokio-util = { version = "0.7", features = ["compat"] } tower = "0.4" -tower-http = { version = "0.4.0", features = ["cors"] } +tower-http = { version = "0.5.1", features = ["cors"] } mime_guess = "2.0.4" [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.64" +js-sys = "0.3.67" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } -wasm-bindgen = "0.2.84" -wasm-bindgen-futures = "0.4.37" -wasm-logger = { version = "0.2.0" } +tracing-wasm = "0.2.1" +wasm-bindgen = "0.2.90" +wasm-bindgen-futures = "0.4.41" web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } [lints] diff --git a/examples/browser-webrtc/README.md b/examples/browser-webrtc/README.md index d44cf8799054..eec2c9c0494c 100644 --- a/examples/browser-webrtc/README.md +++ b/examples/browser-webrtc/README.md @@ -5,6 +5,8 @@ It uses [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/) to build the pro ## Running the example +Ensure you have `wasm-pack` [installed](https://rustwasm.github.io/wasm-pack/). + 1. Build the client library: ```shell wasm-pack build --target web --out-dir static diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 1a9856dadcc5..9499ccbd1583 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -3,67 +3,73 @@ use futures::StreamExt; use js_sys::Date; use libp2p::core::Multiaddr; -use libp2p::identity::{Keypair, PeerId}; use libp2p::ping; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; -use std::convert::From; +use libp2p::swarm::SwarmEvent; +use libp2p_webrtc_websys as webrtc_websys; use std::io; +use std::time::Duration; use wasm_bindgen::prelude::*; use web_sys::{Document, HtmlElement}; #[wasm_bindgen] pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { - wasm_logger::init(wasm_logger::Config::default()); + tracing_wasm::set_as_global_default(); + + let ping_duration = Duration::from_secs(30); let body = Body::from_current_window()?; - body.append_p("Let's ping the WebRTC Server!")?; - - let local_key = Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - let mut swarm = SwarmBuilder::with_wasm_executor( - libp2p_webrtc_websys::Transport::new(libp2p_webrtc_websys::Config::new(&local_key)).boxed(), - Behaviour { - ping: ping::Behaviour::new(ping::Config::new()), - keep_alive: keep_alive::Behaviour, - }, - local_peer_id, - ) - .build(); - - log::info!("Initialize swarm with identity: {local_peer_id}"); + body.append_p(&format!( + "Let's ping the rust-libp2p server over WebRTC for {:?}:", + ping_duration + ))?; + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|key| { + webrtc_websys::Transport::new(webrtc_websys::Config::new(&key)) + })? + .with_behaviour(|_| ping::Behaviour::new(ping::Config::new()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(ping_duration)) + .build(); let addr = libp2p_endpoint.parse::()?; - log::info!("Dialing {addr}"); + tracing::info!("Dialing {addr}"); swarm.dial(addr)?; loop { match swarm.next().await.unwrap() { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(e), .. })) => { - log::error!("Ping failed: {:?}", e); + SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { + tracing::error!("Ping failed: {:?}", e); break; } - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + SwarmEvent::Behaviour(ping::Event { peer, result: Ok(rtt), .. - })) => { - log::info!("Ping successful: RTT: {rtt:?}, from {peer}"); + }) => { + tracing::info!("Ping successful: RTT: {rtt:?}, from {peer}"); body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; } - evt => log::info!("Swarm event: {:?}", evt), + SwarmEvent::ConnectionClosed { + cause: Some(cause), .. + } => { + tracing::info!("Swarm event: {:?}", cause); + + if let libp2p::swarm::ConnectionError::KeepAliveTimeout = cause { + body.append_p("All done with pinging! ")?; + + break; + } + body.append_p(&format!("Connection closed due to: {:?}", cause))?; + } + evt => tracing::info!("Swarm event: {:?}", evt), } } Ok(()) } -#[derive(NetworkBehaviour)] -struct Behaviour { - ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, -} - /// Convenience wrapper around the current document body struct Body { body: HtmlElement, diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 8a4034a436ee..7f06b0d0d998 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -10,37 +10,39 @@ use futures::StreamExt; use libp2p::{ core::muxing::StreamMuxerBox, core::Transport, - identity, multiaddr::{Multiaddr, Protocol}, ping, - swarm::{SwarmBuilder, SwarmEvent}, + swarm::SwarmEvent, }; use libp2p_webrtc as webrtc; use rand::thread_rng; use std::net::{Ipv4Addr, SocketAddr}; use std::time::Duration; +use tokio::net::TcpListener; use tower_http::cors::{Any, CorsLayer}; #[tokio::main] async fn main() -> anyhow::Result<()> { - env_logger::builder() - .parse_filters("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") - .parse_default_env() - .init(); - - let id_keys = identity::Keypair::generate_ed25519(); - let local_peer_id = id_keys.public().to_peer_id(); - let transport = webrtc::tokio::Transport::new( - id_keys, - webrtc::tokio::Certificate::generate(&mut thread_rng())?, - ) - .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) - .boxed(); - - let mut swarm = - SwarmBuilder::with_tokio_executor(transport, ping::Behaviour::default(), local_peer_id) - .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe the pings. - .build(); + let _ = tracing_subscriber::fmt() + .with_env_filter("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") + .try_init(); + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|id_keys| { + Ok(webrtc::tokio::Transport::new( + id_keys.clone(), + webrtc::tokio::Certificate::generate(&mut thread_rng())?, + ) + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn)))) + })? + .with_behaviour(|_| ping::Behaviour::default())? + .with_swarm_config(|cfg| { + cfg.with_idle_connection_timeout( + Duration::from_secs(u64::MAX), // Allows us to observe the pings. + ) + }) + .build(); let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED) .with(Protocol::Udp(0)) @@ -54,11 +56,13 @@ async fn main() -> anyhow::Result<()> { .iter() .any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST)) { - log::debug!("Ignoring localhost address to make sure the example works in Firefox"); + tracing::debug!( + "Ignoring localhost address to make sure the example works in Firefox" + ); continue; } - log::info!("Listening on: {address}"); + tracing::info!(%address, "Listening"); break address; } @@ -72,7 +76,7 @@ async fn main() -> anyhow::Result<()> { loop { tokio::select! { swarm_event = swarm.next() => { - log::trace!("Swarm Event: {:?}", swarm_event) + tracing::trace!(?swarm_event) }, _ = tokio::signal::ctrl_c() => { break; @@ -89,9 +93,8 @@ struct StaticFiles; /// Serve the Multiaddr we are listening on and the host files. pub(crate) async fn serve(libp2p_transport: Multiaddr) { - let listen_addr = match libp2p_transport.iter().next() { - Some(Protocol::Ip4(addr)) => addr, - _ => panic!("Expected 1st protocol to be IP4"), + let Some(Protocol::Ip4(listen_addr)) = libp2p_transport.iter().next() else { + panic!("Expected 1st protocol to be IP4") }; let server = Router::new() @@ -108,12 +111,14 @@ pub(crate) async fn serve(libp2p_transport: Multiaddr) { let addr = SocketAddr::new(listen_addr.into(), 8080); - log::info!("Serving client files at http://{addr}"); + tracing::info!(url=%format!("http://{addr}"), "Serving client files at url"); - axum::Server::bind(&addr) - .serve(server.into_make_service()) - .await - .unwrap(); + axum::serve( + TcpListener::bind((listen_addr, 8080)).await.unwrap(), + server.into_make_service(), + ) + .await + .unwrap(); } #[derive(Clone)] @@ -139,7 +144,7 @@ async fn get_index( /// Serves the static files generated by `wasm-pack`. async fn get_static_file(Path(path): Path) -> Result { - log::debug!("Serving static file: {path}"); + tracing::debug!(file_path=%path, "Serving static file"); let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data; let content_type = mime_guess::from_path(path) diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml index cee9e553e279..a16c930e5b3a 100644 --- a/examples/chat/Cargo.toml +++ b/examples/chat/Cargo.toml @@ -5,12 +5,16 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -tokio = { version = "1.32", features = ["full"] } +tokio = { version = "1.36", features = ["full"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 312ca2d30870..c785d301c2fe 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -20,19 +20,14 @@ #![doc = include_str!("../README.md")] -use futures::{future::Either, stream::StreamExt}; -use libp2p::{ - core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade}, - gossipsub, identity, mdns, noise, quic, - swarm::NetworkBehaviour, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, -}; +use futures::stream::StreamExt; +use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux}; use std::collections::hash_map::DefaultHasher; use std::error::Error; use std::hash::{Hash, Hasher}; use std::time::Duration; use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; // We create a custom network behaviour that combines Gossipsub and Mdns. #[derive(NetworkBehaviour)] @@ -43,58 +38,51 @@ struct MyBehaviour { #[tokio::main] async fn main() -> Result<(), Box> { - // Create a random PeerId - env_logger::init(); - let id_keys = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(id_keys.public()); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - // Set up an encrypted DNS-enabled TCP Transport over the yamux protocol. - let tcp_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) - .upgrade(upgrade::Version::V1Lazy) - .authenticate(noise::Config::new(&id_keys).expect("signing libp2p-noise static keypair")) - .multiplex(yamux::Config::default()) - .timeout(std::time::Duration::from_secs(20)) - .boxed(); - let quic_transport = quic::tokio::Transport::new(quic::Config::new(&id_keys)); - let transport = OrTransport::new(quic_transport, tcp_transport) - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_behaviour(|key| { + // To content-address message, we can take the hash of message and use it as an ID. + let message_id_fn = |message: &gossipsub::Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + gossipsub::MessageId::from(s.finish().to_string()) + }; - // To content-address message, we can take the hash of message and use it as an ID. - let message_id_fn = |message: &gossipsub::Message| { - let mut s = DefaultHasher::new(); - message.data.hash(&mut s); - gossipsub::MessageId::from(s.finish().to_string()) - }; + // Set a custom gossipsub configuration + let gossipsub_config = gossipsub::ConfigBuilder::default() + .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. + .build() + .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. - // Set a custom gossipsub configuration - let gossipsub_config = gossipsub::ConfigBuilder::default() - .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) - .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. - .build() - .expect("Valid config"); + // build a gossipsub network behaviour + let gossipsub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(key.clone()), + gossipsub_config, + )?; + + let mdns = + mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id())?; + Ok(MyBehaviour { gossipsub, mdns }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); - // build a gossipsub network behaviour - let mut gossipsub = gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(id_keys), - gossipsub_config, - ) - .expect("Correct configuration"); // Create a Gossipsub topic let topic = gossipsub::IdentTopic::new("test-net"); // subscribes to our topic - gossipsub.subscribe(&topic)?; - - // Create a Swarm to manage peers and events - let mut swarm = { - let mdns = mdns::tokio::Behaviour::new(mdns::Config::default(), local_peer_id)?; - let behaviour = MyBehaviour { gossipsub, mdns }; - SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build() - }; + swarm.behaviour_mut().gossipsub.subscribe(&topic)?; // Read full lines from stdin let mut stdin = io::BufReader::new(io::stdin()).lines(); diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index 49e1ada2e968..8443e9a59c7a 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -5,13 +5,18 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.3.23", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" +clap = { version = "4.4.16", features = ["derive"] } +futures = "0.3.30" futures-timer = "3.0" -libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } +libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } log = "0.4" +tokio = { version = "1.36", features = ["macros", "net", "rt", "signal"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 099867df7444..51df670f8a70 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -21,25 +21,16 @@ #![doc = include_str!("../README.md")] use clap::Parser; -use futures::{ - executor::{block_on, ThreadPool}, - future::{Either, FutureExt}, - stream::StreamExt, -}; +use futures::{executor::block_on, future::FutureExt, stream::StreamExt}; use libp2p::{ - core::{ - multiaddr::{Multiaddr, Protocol}, - muxing::StreamMuxerBox, - transport::Transport, - upgrade, - }, - dcutr, dns, identify, identity, noise, ping, quic, relay, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, + core::multiaddr::{Multiaddr, Protocol}, + dcutr, identify, identity, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use log::info; -use std::error::Error; use std::str::FromStr; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p DCUtR client")] @@ -78,37 +69,14 @@ impl FromStr for Mode { } } -fn main() -> Result<(), Box> { - env_logger::init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); - let local_key = generate_ed25519(opts.secret_key_seed); - let local_peer_id = PeerId::from(local_key.public()); - - let (relay_transport, client) = relay::client::new(local_peer_id); - - let transport = { - let relay_tcp_quic_transport = relay_transport - .or_transport(tcp::async_io::Transport::new( - tcp::Config::default().port_reuse(true), - )) - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&local_key).unwrap()) - .multiplex(yamux::Config::default()) - .or_transport(quic::async_std::Transport::new(quic::Config::new( - &local_key, - ))); - - block_on(dns::async_std::Transport::system(relay_tcp_quic_transport)) - .unwrap() - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed() - }; - #[derive(NetworkBehaviour)] struct Behaviour { relay_client: relay::client::Behaviour, @@ -117,21 +85,28 @@ fn main() -> Result<(), Box> { dcutr: dcutr::Behaviour, } - let behaviour = Behaviour { - relay_client: client, - ping: ping::Behaviour::new(ping::Config::new()), - identify: identify::Behaviour::new(identify::Config::new( - "/TODO/0.0.1".to_string(), - local_key.public(), - )), - dcutr: dcutr::Behaviour::new(local_peer_id), - }; - - let mut swarm = match ThreadPool::new() { - Ok(tp) => SwarmBuilder::with_executor(transport, behaviour, local_peer_id, tp), - Err(_) => SwarmBuilder::without_executor(transport, behaviour, local_peer_id), - } - .build(); + let mut swarm = + libp2p::SwarmBuilder::with_existing_identity(generate_ed25519(opts.secret_key_seed)) + .with_tokio() + .with_tcp( + tcp::Config::default().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_relay_client(noise::Config::new, yamux::Config::default)? + .with_behaviour(|keypair, relay_behaviour| Behaviour { + relay_client: relay_behaviour, + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( + "/TODO/0.0.1".to_string(), + keypair.public(), + )), + dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()), + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); swarm .listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap()) @@ -148,7 +123,7 @@ fn main() -> Result<(), Box> { event = swarm.next() => { match event.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } event => panic!("{event:?}"), } @@ -177,15 +152,14 @@ fn main() -> Result<(), Box> { SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Sent { .. })) => { - info!("Told relay its public address."); + tracing::info!("Told relay its public address"); told_relay_observed_addr = true; } SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { info: identify::Info { observed_addr, .. }, .. })) => { - info!("Relay told us our public address: {:?}", observed_addr); - swarm.add_external_address(observed_addr); + tracing::info!(address=%observed_addr, "Relay told us our observed address"); learned_observed_addr = true; } event => panic!("{event:?}"), @@ -218,31 +192,31 @@ fn main() -> Result<(), Box> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient( relay::client::Event::ReservationReqAccepted { .. }, )) => { assert!(opts.mode == Mode::Listen); - info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Dcutr(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Ping(_)) => {} SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { - info!("Outgoing connection error to {:?}: {:?}", peer_id, error); + tracing::info!(peer=?peer_id, "Outgoing connection failed: {error}"); } _ => {} } diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index aa9a875be5ad..a7efe3c06978 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -5,12 +5,16 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "websocket", "yamux"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index cd9857b1482e..404333f3d203 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -22,30 +22,23 @@ use async_std::io; use futures::{prelude::*, select}; -use libp2p::core::upgrade::Version; use libp2p::kad; -use libp2p::kad::record::store::MemoryStore; +use libp2p::kad::store::MemoryStore; use libp2p::kad::Mode; use libp2p::{ - identity, mdns, noise, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + mdns, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; use std::error::Error; +use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); - - // Create a random key for ourselves. - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // We create a custom network behaviour that combines Kademlia and mDNS. #[derive(NetworkBehaviour)] @@ -54,15 +47,27 @@ async fn main() -> Result<(), Box> { mdns: mdns::async_io::Behaviour, } - // Create a swarm to manage peers and events. - let mut swarm = { - // Create a Kademlia behaviour. - let store = MemoryStore::new(local_peer_id); - let kademlia = kad::Behaviour::new(local_peer_id, store); - let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; - let behaviour = Behaviour { kademlia, mdns }; - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() - }; + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| { + Ok(Behaviour { + kademlia: kad::Behaviour::new( + key.public().to_peer_id(), + MemoryStore::new(key.public().to_peer_id()), + ), + mdns: mdns::async_io::Behaviour::new( + mdns::Config::default(), + key.public().to_peer_id(), + )?, + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); @@ -148,7 +153,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { Some("GET") => { let key = { match args.next() { - Some(key) => kad::record::Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -160,7 +165,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { Some("GET_PROVIDERS") => { let key = { match args.next() { - Some(key) => kad::record::Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -172,7 +177,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { Some("PUT") => { let key = { match args.next() { - Some(key) => kad::record::Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -201,7 +206,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { Some("PUT_PROVIDER") => { let key = { match args.next() { - Some(key) => kad::record::Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index 90462790c115..e38039a11bbb 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -5,14 +5,18 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.16", features = ["derive"] } either = "1.9" -env_logger = "0.10" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = [ "async-std", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } void = "1.0.2" [lints] diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 4b6d368fc472..a834ee0600ec 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -31,10 +31,13 @@ use libp2p::{core::Multiaddr, multiaddr::Protocol}; use std::error::Error; use std::io::Write; use std::path::PathBuf; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); @@ -59,9 +62,8 @@ async fn main() -> Result<(), Box> { // In case the user provided an address of a peer on the CLI, dial it. if let Some(addr) = opt.peer { - let peer_id = match addr.iter().last() { - Some(Protocol::P2p(peer_id)) => peer_id, - _ => return Err("Expect peer multiaddr to contain peer ID.".into()), + let Some(Protocol::P2p(peer_id)) = addr.iter().last() else { + return Err("Expect peer multiaddr to contain peer ID.".into()); }; network_client .dial(peer_id, addr) diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index f13e72f0000c..59625fc39ea4 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,5 +1,3 @@ -use async_std::io; -use either::Either; use futures::channel::{mpsc, oneshot}; use futures::prelude::*; @@ -8,16 +6,16 @@ use libp2p::{ identity, kad, multiaddr::Protocol, noise, - request_response::{self, ProtocolSupport, RequestId, ResponseChannel}, - swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel}, + swarm::{NetworkBehaviour, Swarm, SwarmEvent}, + tcp, yamux, PeerId, }; -use libp2p::core::upgrade::Version; use libp2p::StreamProtocol; use serde::{Deserialize, Serialize}; use std::collections::{hash_map, HashMap, HashSet}; use std::error::Error; +use std::time::Duration; /// Creates the network components, namely: /// @@ -41,18 +39,18 @@ pub(crate) async fn new( }; let peer_id = id_keys.public().to_peer_id(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&id_keys)?) - .multiplex(yamux::Config::default()) - .boxed(); - - // Build the Swarm, connecting the lower layer transport logic with the - // higher layer network behaviour logic. - let mut swarm = SwarmBuilder::with_async_std_executor( - transport, - Behaviour { - kademlia: kad::Behaviour::new(peer_id, kad::record::store::MemoryStore::new(peer_id)), + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(id_keys) + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour { + kademlia: kad::Behaviour::new( + peer_id, + kad::store::MemoryStore::new(key.public().to_peer_id()), + ), request_response: request_response::cbor::Behaviour::new( [( StreamProtocol::new("/file-exchange/1"), @@ -60,10 +58,9 @@ pub(crate) async fn new( )], request_response::Config::default(), ), - }, - peer_id, - ) - .build(); + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); swarm .behaviour_mut() @@ -178,7 +175,7 @@ pub(crate) struct EventLoop { pending_start_providing: HashMap>, pending_get_providers: HashMap>>, pending_request_file: - HashMap, Box>>>, + HashMap, Box>>>, } impl EventLoop { @@ -211,10 +208,7 @@ impl EventLoop { } } - async fn handle_event( - &mut self, - event: SwarmEvent>, - ) { + async fn handle_event(&mut self, event: SwarmEvent) { match event { SwarmEvent::Behaviour(BehaviourEvent::Kademlia( kad::Event::OutboundQueryProgressed { @@ -408,7 +402,7 @@ impl EventLoop { #[derive(NetworkBehaviour)] struct Behaviour { request_response: request_response::cbor::Behaviour, - kademlia: kad::Behaviour, + kademlia: kad::Behaviour, } #[derive(Debug)] diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index cc30a0c614ca..2dcc780ac224 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -5,12 +5,16 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio","yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index dc98fb582955..916317a5a438 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -20,35 +20,32 @@ #![doc = include_str!("../README.md")] -use futures::prelude::*; -use libp2p::{ - core::{multiaddr::Multiaddr, upgrade::Version}, - identify, identity, noise, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, -}; -use std::error::Error; +use futures::StreamExt; +use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(); - - // Create a identify network behaviour. - let behaviour = identify::Behaviour::new(identify::Config::new( - "/ipfs/id/1.0.0".to_string(), - local_key.public(), - )); - - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| { + identify::Behaviour::new(identify::Config::new( + "/ipfs/id/1.0.0".to_string(), + key.public(), + )) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index ae524e3857b6..e82031ce64f3 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -5,14 +5,19 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -tokio = { version = "1.12", features = ["rt-multi-thread", "macros"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "macros"] } async-trait = "0.1" -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.16", features = ["derive"] } env_logger = "0.10" -futures = "0.3.28" -anyhow = "1.0.75" -libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "websocket", "yamux", "rsa"] } +futures = "0.3.30" +anyhow = "1.0.79" +libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index f912bd0565af..95921d6fa359 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -27,12 +27,9 @@ use std::time::{Duration, Instant}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::{ - bytes::BufMut, - identity, kad, - swarm::{SwarmBuilder, SwarmEvent}, - tokio_development_transport, PeerId, -}; +use libp2p::swarm::{StreamProtocol, SwarmEvent}; +use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId}; +use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", @@ -41,34 +38,43 @@ const BOOTNODES: [&str; 4] = [ "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", ]; +const IPFS_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); + #[tokio::main] async fn main() -> Result<()> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Create a random key for ourselves. let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - // Set up a an encrypted DNS-enabled TCP Transport over the yamux protocol - let transport = tokio_development_transport(local_key.clone())?; - - // Create a swarm to manage peers and events. - let mut swarm = { - // Create a Kademlia behaviour. - let mut cfg = kad::Config::default(); - cfg.set_query_timeout(Duration::from_secs(5 * 60)); - let store = kad::store::MemoryStore::new(local_peer_id); - let mut behaviour = kad::Behaviour::with_config(local_peer_id, store, cfg); - - // Add the bootnodes to the local routing table. `libp2p-dns` built - // into the `transport` resolves the `dnsaddr` when Kademlia tries - // to dial these nodes. - for peer in &BOOTNODES { - behaviour.add_address(&peer.parse()?, "/dnsaddr/bootstrap.libp2p.io".parse()?); - } - SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build() - }; + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_key.clone()) + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_dns()? + .with_behaviour(|key| { + // Create a Kademlia behaviour. + let mut cfg = kad::Config::new(IPFS_PROTO_NAME); + cfg.set_query_timeout(Duration::from_secs(5 * 60)); + let store = kad::store::MemoryStore::new(key.public().to_peer_id()); + kad::Behaviour::with_config(key.public().to_peer_id(), store, cfg) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); + + // Add the bootnodes to the local routing table. `libp2p-dns` built + // into the `transport` resolves the `dnsaddr` when Kademlia tries + // to dial these nodes. + for peer in &BOOTNODES { + swarm + .behaviour_mut() + .add_address(&peer.parse()?, "/dnsaddr/bootstrap.libp2p.io".parse()?); + } let cli_opt = Opt::parse(); @@ -83,11 +89,11 @@ async fn main() -> Result<()> { let mut pk_record_key = vec![]; pk_record_key.put_slice("/pk/".as_bytes()); - pk_record_key.put_slice(local_peer_id.to_bytes().as_slice()); + pk_record_key.put_slice(swarm.local_peer_id().to_bytes().as_slice()); let mut pk_record = kad::Record::new(pk_record_key, local_key.public().encode_protobuf()); - pk_record.publisher = Some(local_peer_id); + pk_record.publisher = Some(*swarm.local_peer_id()); pk_record.expires = Some(Instant::now().add(Duration::from_secs(60))); swarm diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 4bf501886dbf..daa68cd8f5a1 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -5,13 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -tokio = { version = "1.32", features = ["rt-multi-thread", "macros", "io-std"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "macros", "io-std"] } async-trait = "0.1" either = "1.9" -env_logger = "0.10" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index fe83e891cfc6..a57bfd465e0f 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -23,39 +23,17 @@ use either::Either; use futures::prelude::*; use libp2p::{ - core::{muxing::StreamMuxerBox, transport, transport::upgrade::Version}, - gossipsub, identify, identity, + core::transport::upgrade::Version, + gossipsub, identify, multiaddr::Protocol, noise, ping, pnet::{PnetConfig, PreSharedKey}, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, Transport, }; use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; use tokio::{io, io::AsyncBufReadExt, select}; - -/// Builds the transport that serves as a common ground for all connections. -pub fn build_transport( - key_pair: identity::Keypair, - psk: Option, -) -> transport::Boxed<(PeerId, StreamMuxerBox)> { - let noise_config = noise::Config::new(&key_pair).unwrap(); - let yamux_config = yamux::Config::default(); - - let base_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)); - let maybe_encrypted = match psk { - Some(psk) => Either::Left( - base_transport.and_then(move |socket, _| PnetConfig::new(psk).handshake(socket)), - ), - None => Either::Right(base_transport), - }; - maybe_encrypted - .upgrade(Version::V1Lazy) - .authenticate(noise_config) - .multiplex(yamux_config) - .timeout(Duration::from_secs(20)) - .boxed() -} +use tracing_subscriber::EnvFilter; /// Get the current ipfs repo path, either from the IPFS_PATH environment variable or /// from the default $HOME/.ipfs @@ -110,7 +88,9 @@ fn parse_legacy_multiaddr(text: &str) -> Result> { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ipfs_path = get_ipfs_path(); println!("using IPFS_PATH {ipfs_path:?}"); @@ -118,76 +98,68 @@ async fn main() -> Result<(), Box> { .map(|text| PreSharedKey::from_str(&text)) .transpose()?; - // Create a random PeerId - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("using random peer id: {local_peer_id:?}"); if let Some(psk) = psk { println!("using swarm key with fingerprint: {}", psk.fingerprint()); } - // Set up a an encrypted DNS-enabled TCP Transport over and Yamux protocol - let transport = build_transport(local_key.clone(), psk); - // Create a Gosspipsub topic let gossipsub_topic = gossipsub::IdentTopic::new("chat"); // We create a custom network behaviour that combines gossipsub, ping and identify. #[derive(NetworkBehaviour)] - #[behaviour(to_swarm = "MyBehaviourEvent")] struct MyBehaviour { gossipsub: gossipsub::Behaviour, identify: identify::Behaviour, ping: ping::Behaviour, } - enum MyBehaviourEvent { - Gossipsub(gossipsub::Event), - Identify(identify::Event), - Ping(ping::Event), - } - - impl From for MyBehaviourEvent { - fn from(event: gossipsub::Event) -> Self { - MyBehaviourEvent::Gossipsub(event) - } - } - - impl From for MyBehaviourEvent { - fn from(event: identify::Event) -> Self { - MyBehaviourEvent::Identify(event) - } - } - - impl From for MyBehaviourEvent { - fn from(event: ping::Event) -> Self { - MyBehaviourEvent::Ping(event) - } - } - - // Create a Swarm to manage peers and events - let mut swarm = { - let gossipsub_config = gossipsub::ConfigBuilder::default() - .max_transmit_size(262144) - .build() - .expect("valid config"); - let mut behaviour = MyBehaviour { - gossipsub: gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(local_key.clone()), - gossipsub_config, - ) - .expect("Valid configuration"), - identify: identify::Behaviour::new(identify::Config::new( - "/ipfs/0.1.0".into(), - local_key.public(), - )), - ping: ping::Behaviour::new(ping::Config::new()), - }; - - println!("Subscribing to {gossipsub_topic:?}"); - behaviour.gossipsub.subscribe(&gossipsub_topic).unwrap(); - SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build() - }; + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|key| { + let noise_config = noise::Config::new(key).unwrap(); + let yamux_config = yamux::Config::default(); + + let base_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)); + let maybe_encrypted = match psk { + Some(psk) => Either::Left( + base_transport + .and_then(move |socket, _| PnetConfig::new(psk).handshake(socket)), + ), + None => Either::Right(base_transport), + }; + maybe_encrypted + .upgrade(Version::V1Lazy) + .authenticate(noise_config) + .multiplex(yamux_config) + })? + .with_dns()? + .with_behaviour(|key| { + let gossipsub_config = gossipsub::ConfigBuilder::default() + .max_transmit_size(262144) + .build() + .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. + Ok(MyBehaviour { + gossipsub: gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(key.clone()), + gossipsub_config, + ) + .expect("Valid configuration"), + identify: identify::Behaviour::new(identify::Config::new( + "/ipfs/0.1.0".into(), + key.public(), + )), + ping: ping::Behaviour::new(ping::Config::new()), + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + println!("Subscribing to {gossipsub_topic:?}"); + swarm + .behaviour_mut() + .gossipsub + .subscribe(&gossipsub_topic) + .unwrap(); // Reach out to other nodes if specified for to_dial in std::env::args().skip(1) { diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 8cf7e1fc4068..39412d29aeaf 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -5,14 +5,21 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -env_logger = "0.10.0" -futures = "0.3.27" +futures = "0.3.30" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { path = "../../libp2p", features = ["async-std", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -log = "0.4.20" -tokio = { version = "1", features = ["rt-multi-thread"] } -prometheus-client = "0.21.2" +libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } +opentelemetry = { version = "0.20.0", features = ["rt-tokio", "metrics"] } +opentelemetry-otlp = { version = "0.13.0", features = ["metrics"]} +opentelemetry_api = "0.20.0" +prometheus-client = { workspace = true } +tokio = { version = "1", features = ["full"] } +tracing = "0.1.37" +tracing-opentelemetry = "0.21.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/metrics/README.md b/examples/metrics/README.md index fc73cbd74106..160536985f15 100644 --- a/examples/metrics/README.md +++ b/examples/metrics/README.md @@ -1,6 +1,6 @@ ## Description -The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics`. +The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics` as well as span data via `opentelemetry`. It sets up multiple nodes in the network and measures various metrics, such as `libp2p_ping`, to evaluate the network's performance. ## Usage @@ -34,6 +34,45 @@ To run the example, follow these steps: After executing the command, you should see a long list of metrics printed to the terminal. Make sure to check the `libp2p_ping` metrics, which should have a value greater than zero (`>0`). +## Opentelemetry + +To see the span data collected as part of the `Swarm`s activity, start up an opentelemetry collector: + +```sh +docker compose up +``` + +Then, configure tracing to output spans: + +```shell +export RUST_LOG=info,[ConnectionHandler::poll]=trace,[NetworkBehaviour::poll]=trace +``` + +Next, (re)-start the two example for it to connect to the OTEL collector. +Finally, open the Jaeger UI in a browser and explore the spans: http://localhost:16686. + +### Filtering spans + +For a precise documentation, please see the following documentation in tracing: . + +`rust-libp2p` consistently applies spans to the following functions: + +- `ConnectionHandler::poll` implementations +- `NetworkBehaviour::poll` implementations + +The above spans are all called exactly that: `ConnectionHandler::poll` and `NetworkBehaviour::poll`. +You can activate _all_ of them by setting: + +``` +RUST_LOG=[ConnectionHandler::poll]=trace +``` + +If you just wanted to see the spans of the `libp2p_ping` crate, you can filter like this: + +``` +RUST_LOG=libp2p_ping[ConnectionHandler::poll]=trace +``` + ## Conclusion This example demonstrates how to utilize the `libp2p-metrics` crate to collect and analyze metrics in a libp2p network. diff --git a/examples/metrics/docker-compose.yml b/examples/metrics/docker-compose.yml new file mode 100644 index 000000000000..06d8d5becfeb --- /dev/null +++ b/examples/metrics/docker-compose.yml @@ -0,0 +1,23 @@ +version: "2" +services: + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + restart: always + ports: + - "16686:16686" + - "14268" + - "14250" + + # Collector + otel-collector: + image: otel/opentelemetry-collector:0.88.0 + restart: always + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + depends_on: + - jaeger-all-in-one diff --git a/examples/metrics/otel-collector-config.yaml b/examples/metrics/otel-collector-config.yaml new file mode 100644 index 000000000000..8755848cd6e9 --- /dev/null +++ b/examples/metrics/otel-collector-config.yaml @@ -0,0 +1,25 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + +exporters: + debug: + otlp: + endpoint: jaeger-all-in-one:4317 + tls: + insecure: true + +processors: + batch: + +service: + telemetry: + logs: + level: "debug" + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [debug, otlp] diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 46cb7aacb842..8c77d724ea30 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::{error, info}; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -33,18 +32,14 @@ const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;v pub(crate) async fn metrics_server(registry: Registry) -> Result<(), std::io::Error> { // Serve on localhost. - let addr = ([127, 0, 0, 1], 8080).into(); - - // Use the tokio runtime to run the hyper server. - let rt = tokio::runtime::Runtime::new()?; - rt.block_on(async { - let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); - info!("Metrics server on http://{}/metrics", server.local_addr()); - if let Err(e) = server.await { - error!("server error: {}", e); - } - Ok(()) - }) + let addr = ([127, 0, 0, 1], 0).into(); + + let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); + tracing::info!(metrics_server=%format!("http://{}/metrics", server.local_addr())); + if let Err(e) = server.await { + tracing::error!("server error: {}", e); + } + Ok(()) } pub(crate) struct MetricService { diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index b28abaee9418..3ab6815cb32d 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,71 +20,90 @@ #![doc = include_str!("../README.md")] -use env_logger::Env; -use futures::executor::block_on; -use futures::stream::StreamExt; -use libp2p::core::{upgrade::Version, Multiaddr, Transport}; -use libp2p::identity::PeerId; +use futures::StreamExt; +use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use log::info; +use opentelemetry::sdk; +use opentelemetry_api::KeyValue; use prometheus_client::registry::Registry; use std::error::Error; -use std::thread; use std::time::Duration; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; mod http_service; -fn main() -> Result<(), Box> { - env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + setup_tracing()?; - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - let local_pub_key = local_key.public(); + let mut metric_registry = Registry::default(); - let mut swarm = SwarmBuilder::without_executor( - tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(), - Behaviour::new(local_pub_key), - local_peer_id, - ) - .idle_connection_timeout(Duration::from_secs(60)) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_bandwidth_metrics(&mut metric_registry) + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) + .build(); swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; if let Some(addr) = std::env::args().nth(1) { let remote: Multiaddr = addr.parse()?; swarm.dial(remote)?; - info!("Dialed {}", addr) + tracing::info!(address=%addr, "Dialed address") } - let mut metric_registry = Registry::default(); let metrics = Metrics::new(&mut metric_registry); - thread::spawn(move || block_on(http_service::metrics_server(metric_registry))); + tokio::spawn(http_service::metrics_server(metric_registry)); - block_on(async { - loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { - info!("{:?}", ping_event); - metrics.record(&ping_event); - } - SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { - info!("{:?}", identify_event); - metrics.record(&identify_event); - } - swarm_event => { - info!("{:?}", swarm_event); - metrics.record(&swarm_event); - } + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { + tracing::info!(?ping_event); + metrics.record(&ping_event); + } + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { + tracing::info!(?identify_event); + metrics.record(&identify_event); + } + swarm_event => { + tracing::info!(?swarm_event); + metrics.record(&swarm_event); } } - }); + } +} + +fn setup_tracing() -> Result<(), Box> { + let tracer = opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .with_trace_config( + sdk::trace::Config::default().with_resource(sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])), + ) + .install_batch(opentelemetry::runtime::Tokio)?; + + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) + .with( + tracing_opentelemetry::layer() + .with_tracer(tracer) + .with_filter(EnvFilter::from_default_env()), + ) + .try_init()?; + Ok(()) } diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml index b39df505b4ac..db47e4e2d8eb 100644 --- a/examples/ping/Cargo.toml +++ b/examples/ping/Cargo.toml @@ -5,11 +5,15 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -env_logger = "0.10.0" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] } -tokio = { version = "1.32.0", features = ["full"] } +tokio = { version = "1.36.0", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index 25939a132c1b..911b0384f899 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -21,31 +21,26 @@ #![doc = include_str!("../README.md")] use futures::prelude::*; -use libp2p::core::upgrade::Version; -use libp2p::{ - identity, noise, ping, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, -}; -use std::error::Error; -use std::time::Duration; +use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let transport = tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let mut swarm = - SwarmBuilder::with_tokio_executor(transport, ping::Behaviour::default(), local_peer_id) - .idle_connection_timeout(Duration::from_secs(60)) // For illustrative purposes, keep idle connections alive for a minute so we can observe a few pings. - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|_| ping::Behaviour::default())? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) + .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 39d899f5573d..65c7c7070877 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -5,13 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.16", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = [ "async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index ab87615d74e5..bf5817454f8e 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -22,61 +22,46 @@ #![doc = include_str!("../README.md")] use clap::Parser; +use futures::executor::block_on; use futures::stream::StreamExt; -use futures::{executor::block_on, future::Either}; use libp2p::{ core::multiaddr::Protocol, - core::muxing::StreamMuxerBox, - core::upgrade, - core::{Multiaddr, Transport}, - identify, identity, - identity::PeerId, - noise, ping, quic, relay, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, + core::Multiaddr, + identify, identity, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; +use tracing_subscriber::EnvFilter; fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - println!("opt: {opt:?}"); // Create a static known PeerId based on given secret let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed); - let local_peer_id = PeerId::from(local_key.public()); - let tcp_transport = tcp::async_io::Transport::default(); - - let tcp_transport = tcp_transport - .upgrade(upgrade::Version::V1Lazy) - .authenticate( - noise::Config::new(&local_key).expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(libp2p::yamux::Config::default()); - - let quic_transport = quic::async_std::Transport::new(quic::Config::new(&local_key)); - - let transport = quic_transport - .or_transport(tcp_transport) - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed(); - - let behaviour = Behaviour { - relay: relay::Behaviour::new(local_peer_id, Default::default()), - ping: ping::Behaviour::new(ping::Config::new()), - identify: identify::Behaviour::new(identify::Config::new( - "/TODO/0.0.1".to_string(), - local_key.public(), - )), - }; - - let mut swarm = SwarmBuilder::without_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_key) + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_behaviour(|key| Behaviour { + relay: relay::Behaviour::new(key.public().to_peer_id(), Default::default()), + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( + "/TODO/0.0.1".to_string(), + key.public(), + )), + })? + .build(); // Listen on all interfaces let listen_addr_tcp = Multiaddr::empty() diff --git a/examples/rendezvous/Cargo.toml b/examples/rendezvous/Cargo.toml index e98f465bc3bd..edd5b8031a4e 100644 --- a/examples/rendezvous/Cargo.toml +++ b/examples/rendezvous/Cargo.toml @@ -5,14 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" +futures = "0.3.30" libp2p = { path = "../../libp2p", features = [ "async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } -log = "0.4" -tokio = { version = "1.32", features = ["rt-multi-thread", "macros", "time"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "macros", "time"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index ac45afae8408..edd3d10a0ce0 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -20,41 +20,41 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identity, multiaddr::Protocol, noise, ping, rendezvous, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; #[tokio::main] -async fn main() { - env_logger::init(); +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| MyBehaviour { + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - }, - PeerId::from(key_pair.public()), - ) - .idle_connection_timeout(Duration::from_secs(5)) - .build(); + })? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); swarm.dial(rendezvous_point_address.clone()).unwrap(); @@ -65,7 +65,7 @@ async fn main() { tokio::select! { event = swarm.select_next_some() => match event { SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { - log::info!( + tracing::info!( "Connected to rendezvous point, discovering nodes in '{}' namespace ...", NAMESPACE ); @@ -87,7 +87,7 @@ async fn main() { for registration in registrations { for address in registration.record.addresses() { let peer = registration.record.peer_id(); - log::info!("Discovered peer {} at {}", peer, address); + tracing::info!(%peer, %address, "Discovered peer"); let p2p_suffix = Protocol::P2p(peer); let address_with_p2p = @@ -106,10 +106,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!(%peer, "Ping is {}ms", rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } }, _ = discover_tick.tick(), if cookie.is_some() => diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index 95ed7a5ccd8a..1d5455928298 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -20,41 +20,43 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identify, identity, noise, ping, rendezvous, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + identify, noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + ) + .unwrap() + .with_behaviour(|key| MyBehaviour { identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), - key_pair.public(), + key.public(), )), - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - }, - PeerId::from(key_pair.public()), - ) - .idle_connection_timeout(Duration::from_secs(5)) - .build(); + }) + .unwrap() + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()); @@ -63,14 +65,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Identify(identify::Event::Received { @@ -81,7 +83,7 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } } @@ -92,7 +94,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -106,7 +108,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -119,10 +121,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index 51acfee2a71b..bd848238d4a1 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -20,37 +20,39 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identity, noise, ping, rendezvous, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + ) + .unwrap() + .with_behaviour(|key| MyBehaviour { + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - }, - PeerId::from(key_pair.public()), - ) - .idle_connection_timeout(Duration::from_secs(5)) - .build(); + }) + .unwrap() + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); // In production the external address should be the publicly facing IP address of the rendezvous point. // This address is recorded in the registration entry by the rendezvous point. @@ -62,14 +64,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { if let Err(error) = swarm.behaviour_mut().rendezvous.register( @@ -77,10 +79,10 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } - log::info!("Connection established with rendezvous point {}", peer_id); + tracing::info!("Connection established with rendezvous point {}", peer_id); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( @@ -90,7 +92,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -104,7 +106,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -117,10 +119,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a3ed3c0fce5c..a15bc1ca2d37 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -22,52 +22,56 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identify, identity, noise, ping, rendezvous, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + identify, noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] -async fn main() { - env_logger::init(); +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); + // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which is + // used as the rendezvous point by the other peer examples. + let keypair = libp2p::identity::Keypair::ed25519_from_bytes([0; 32]).unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| MyBehaviour { identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), - key_pair.public(), + key.public(), )), rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - }, - PeerId::from(key_pair.public()), - ) - .idle_connection_timeout(Duration::from_secs(5)) - .build(); + })? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap()); while let Some(event) = swarm.next().await { match event { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - log::info!("Connected to {}", peer_id); + tracing::info!("Connected to {}", peer_id); } SwarmEvent::ConnectionClosed { peer_id, .. } => { - log::info!("Disconnected from {}", peer_id); + tracing::info!("Disconnected from {}", peer_id); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( rendezvous::server::Event::PeerRegistered { peer, registration }, )) => { - log::info!( + tracing::info!( "Peer {} registered for namespace '{}'", peer, registration.namespace @@ -79,17 +83,19 @@ async fn main() { registrations, }, )) => { - log::info!( + tracing::info!( "Served peer {} with {} registrations", enquirer, registrations.len() ); } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } + + Ok(()) } #[derive(NetworkBehaviour)] diff --git a/examples/stream/Cargo.toml b/examples/stream/Cargo.toml new file mode 100644 index 000000000000..37f84e6ed62d --- /dev/null +++ b/examples/stream/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "stream-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[package.metadata.release] +release = false + +[dependencies] +anyhow = "1" +futures = "0.3.29" +libp2p = { path = "../../libp2p", features = [ "tokio", "quic"] } +libp2p-stream = { path = "../../protocols/stream", version = "0.1.0-alpha" } +rand = "0.8" +tokio = { version = "1.36", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/stream/README.md b/examples/stream/README.md new file mode 100644 index 000000000000..8437a5ea21e7 --- /dev/null +++ b/examples/stream/README.md @@ -0,0 +1,35 @@ +## Description + +This example shows the usage of the `stream::Behaviour`. +As a counter-part to the `request_response::Behaviour`, the `stream::Behaviour` allows users to write stream-oriented protocols whilst having minimal interaction with the `Swarm`. + +In this showcase, we implement an echo protocol: All incoming data is echoed back to the dialer, until the stream is closed. + +## Usage + +To run the example, follow these steps: + +1. Start an instance of the example in one terminal: + + ```sh + cargo run --bin stream-example + ``` + + Observe printed listen address. + +2. Start another instance in a new terminal, providing the listen address of the first one. + + ```sh + cargo run --bin stream-example --
+ ``` + +3. Both terminals should now continuosly print messages. + +## Conclusion + +The `stream::Behaviour` is an "escape-hatch" from the way typical rust-libp2p protocols are written. +It is suitable for several scenarios including: + +- prototyping of new protocols +- experimentation with rust-libp2p +- integration in `async/await`-heavy applications \ No newline at end of file diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs new file mode 100644 index 000000000000..872ab8c3b980 --- /dev/null +++ b/examples/stream/src/main.rs @@ -0,0 +1,154 @@ +use std::{io, time::Duration}; + +use anyhow::{Context, Result}; +use futures::{AsyncReadExt, AsyncWriteExt, StreamExt}; +use libp2p::{multiaddr::Protocol, Multiaddr, PeerId, Stream, StreamProtocol}; +use libp2p_stream as stream; +use rand::RngCore; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; + +const ECHO_PROTOCOL: StreamProtocol = StreamProtocol::new("/echo"); + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env()?, + ) + .init(); + + let maybe_address = std::env::args() + .nth(1) + .map(|arg| arg.parse::()) + .transpose() + .context("Failed to parse argument as `Multiaddr`")?; + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_behaviour(|_| stream::Behaviour::new())? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(10))) + .build(); + + swarm.listen_on("/ip4/127.0.0.1/udp/0/quic-v1".parse()?)?; + + let mut incoming_streams = swarm + .behaviour() + .new_control() + .accept(ECHO_PROTOCOL) + .unwrap(); + + // Deal with incoming streams. + // Spawning a dedicated task is just one way of doing this. + // libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow. + // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them. + tokio::spawn(async move { + // This loop handles incoming streams _sequentially_ but that doesn't have to be the case. + // You can also spawn a dedicated task per stream if you want to. + // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer. + // Each task needs memory meaning an aggressive remote peer may force you OOM this way. + + while let Some((peer, stream)) = incoming_streams.next().await { + match echo(stream).await { + Ok(n) => { + tracing::info!(%peer, "Echoed {n} bytes!"); + } + Err(e) => { + tracing::warn!(%peer, "Echo failed: {e}"); + continue; + } + }; + } + }); + + // In this demo application, the dialing peer initiates the protocol. + if let Some(address) = maybe_address { + let Some(Protocol::P2p(peer_id)) = address.iter().last() else { + anyhow::bail!("Provided address does not end in `/p2p`"); + }; + + swarm.dial(address)?; + + tokio::spawn(connection_handler(peer_id, swarm.behaviour().new_control())); + } + + // Poll the swarm to make progress. + loop { + let event = swarm.next().await.expect("never terminates"); + + match event { + libp2p::swarm::SwarmEvent::NewListenAddr { address, .. } => { + let listen_address = address.with_p2p(*swarm.local_peer_id()).unwrap(); + tracing::info!(%listen_address); + } + event => tracing::trace!(?event), + } + } +} + +/// A very simple, `async fn`-based connection handler for our custom echo protocol. +async fn connection_handler(peer: PeerId, mut control: stream::Control) { + loop { + tokio::time::sleep(Duration::from_secs(1)).await; // Wait a second between echos. + + let stream = match control.open_stream(peer, ECHO_PROTOCOL).await { + Ok(stream) => stream, + Err(error @ stream::OpenStreamError::UnsupportedProtocol(_)) => { + tracing::info!(%peer, %error); + return; + } + Err(error) => { + // Other errors may be temporary. + // In production, something like an exponential backoff / circuit-breaker may be more appropriate. + tracing::debug!(%peer, %error); + continue; + } + }; + + if let Err(e) = send(stream).await { + tracing::warn!(%peer, "Echo protocol failed: {e}"); + continue; + } + + tracing::info!(%peer, "Echo complete!") + } +} + +async fn echo(mut stream: Stream) -> io::Result { + let mut total = 0; + + let mut buf = [0u8; 100]; + + loop { + let read = stream.read(&mut buf).await?; + if read == 0 { + return Ok(total); + } + + total += read; + stream.write_all(&buf[..read]).await?; + } +} + +async fn send(mut stream: Stream) -> io::Result<()> { + let num_bytes = rand::random::() % 1000; + + let mut bytes = vec![0; num_bytes]; + rand::thread_rng().fill_bytes(&mut bytes); + + stream.write_all(&bytes).await?; + + let mut buf = vec![0; num_bytes]; + stream.read_exact(&mut buf).await?; + + if bytes != buf { + return Err(io::Error::new(io::ErrorKind::Other, "incorrect echo")); + } + + stream.close().await?; + + Ok(()) +} diff --git a/examples/upnp/Cargo.toml b/examples/upnp/Cargo.toml index 74e88bf82bd6..db9825c8742b 100644 --- a/examples/upnp/Cargo.toml +++ b/examples/upnp/Cargo.toml @@ -5,10 +5,14 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] tokio = { version = "1", features = ["rt-multi-thread", "macros"] } -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux", "upnp"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "yamux", "upnp"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index b4350dc82adc..fd0764990d13 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -21,32 +21,25 @@ #![doc = include_str!("../README.md")] use futures::prelude::*; -use libp2p::core::upgrade::Version; -use libp2p::{ - identity, noise, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, upnp, yamux, Multiaddr, PeerId, Transport, -}; +use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; use std::error::Error; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let transport = tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let mut swarm = SwarmBuilder::with_tokio_executor( - transport, - upnp::tokio::Behaviour::default(), - local_peer_id, - ) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|_| upnp::tokio::Behaviour::default())? + .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml new file mode 100644 index 000000000000..9e6fb15965d3 --- /dev/null +++ b/hole-punching-tests/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "hole-punching-tests" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +anyhow = "1" +env_logger = "0.10.2" +futures = "0.3.30" +libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } +tokio = { version = "1.36.0", features = ["full"] } +serde = { version = "1.0.196", features = ["derive"] } +serde_json = "1.0.113" +either = "1.9.0" diff --git a/hole-punching-tests/Dockerfile b/hole-punching-tests/Dockerfile new file mode 100644 index 000000000000..864f058799e4 --- /dev/null +++ b/hole-punching-tests/Dockerfile @@ -0,0 +1,19 @@ +# syntax=docker/dockerfile:1.5-labs +FROM rust:1.73.0 as builder + +# Run with access to the target cache to speed up builds +WORKDIR /workspace +ADD . . + +# Build the relay as a statically-linked binary. Unfortunately, we must specify the `--target` explicitly. See https://msfjarvis.dev/posts/building-static-rust-binaries-for-linux/. +RUN --mount=type=cache,target=./target \ + --mount=type=cache,target=/usr/local/cargo/registry \ + RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package hole-punching-tests --target $(rustc -vV | grep host | awk '{print $2}') + +RUN --mount=type=cache,target=./target \ + mv ./target/$(rustc -vV | grep host | awk '{print $2}')/release/hole-punching-tests /usr/local/bin/hole-punching-tests + +FROM alpine:3 +COPY --from=builder /usr/local/bin/hole-punching-tests /usr/bin/hole-punch-client +RUN --mount=type=cache,target=/var/cache/apk apk add bind-tools jq curl tcpdump iproute2-tc +ENV RUST_BACKTRACE=1 diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs new file mode 100644 index 000000000000..4f81cd654806 --- /dev/null +++ b/hole-punching-tests/src/main.rs @@ -0,0 +1,369 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use anyhow::{Context, Result}; +use either::Either; +use futures::stream::StreamExt; +use libp2p::core::transport::ListenerId; +use libp2p::swarm::dial_opts::DialOpts; +use libp2p::swarm::ConnectionId; +use libp2p::{ + core::multiaddr::{Multiaddr, Protocol}, + dcutr, identify, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Swarm, +}; +use redis::AsyncCommands; +use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; +use std::time::Duration; +use std::{fmt, io}; + +/// The redis key we push the relay's TCP listen address to. +const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; +/// The redis key we push the relay's QUIC listen address to. +const RELAY_QUIC_ADDRESS: &str = "RELAY_QUIC_ADDRESS"; +/// The redis key we push the listen client's PeerId to. +const LISTEN_CLIENT_PEER_ID: &str = "LISTEN_CLIENT_PEER_ID"; + +#[tokio::main] +async fn main() -> Result<()> { + env_logger::builder() + .parse_filters("debug,netlink_proto=warn,rustls=warn,multistream_select=warn,libp2p_core::transport::choice=off,libp2p_swarm::connection=warn,libp2p_quic=trace") + .parse_default_env() + .init(); + + let mode = get_env("MODE")?; + let transport = get_env("TRANSPORT")?; + + let mut redis = RedisClient::new("redis", 6379).await?; + + let relay_addr = match transport { + TransportProtocol::Tcp => redis.pop::(RELAY_TCP_ADDRESS).await?, + TransportProtocol::Quic => redis.pop::(RELAY_QUIC_ADDRESS).await?, + }; + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::new().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_relay_client(noise::Config::new, yamux::Config::default)? + .with_behaviour(|key, relay_client| { + Ok(Behaviour { + relay_client, + identify: identify::Behaviour::new(identify::Config::new( + "/hole-punch-tests/1".to_owned(), + key.public(), + )), + dcutr: dcutr::Behaviour::new(key.public().to_peer_id()), + ping: ping::Behaviour::new( + ping::Config::default().with_interval(Duration::from_secs(1)), + ), + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + client_listen_on_transport(&mut swarm, transport).await?; + let id = client_setup(&mut swarm, &mut redis, relay_addr.clone(), mode).await?; + + let mut hole_punched_peer_connection = None; + + loop { + match ( + swarm.next().await.unwrap(), + hole_punched_peer_connection, + id, + ) { + ( + SwarmEvent::Behaviour(BehaviourEvent::RelayClient( + relay::client::Event::ReservationReqAccepted { .. }, + )), + _, + _, + ) => { + tracing::info!("Relay accepted our reservation request."); + + redis + .push(LISTEN_CLIENT_PEER_ID, swarm.local_peer_id()) + .await?; + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Ok(connection_id), + })), + _, + _, + ) => { + tracing::info!("Successfully hole-punched to {remote_peer_id}"); + + hole_punched_peer_connection = Some(connection_id); + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + connection, + result: Ok(rtt), + .. + })), + Some(hole_punched_connection), + _, + ) if mode == Mode::Dial && connection == hole_punched_connection => { + println!("{}", serde_json::to_string(&Report::new(rtt))?); + + return Ok(()); + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Err(error), + .. + })), + _, + _, + ) => { + tracing::info!("Failed to hole-punched to {remote_peer_id}"); + return Err(anyhow::Error::new(error)); + } + ( + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + }, + _, + Either::Left(reservation), + ) if listener_id == reservation => { + anyhow::bail!("Reservation on relay failed: {e}"); + } + ( + SwarmEvent::OutgoingConnectionError { + connection_id, + error, + .. + }, + _, + Either::Right(circuit), + ) if connection_id == circuit => { + anyhow::bail!("Circuit request relay failed: {error}"); + } + _ => {} + } + } +} + +#[derive(serde::Serialize)] +struct Report { + rtt_to_holepunched_peer_millis: u128, +} + +impl Report { + fn new(rtt: Duration) -> Self { + Self { + rtt_to_holepunched_peer_millis: rtt.as_millis(), + } + } +} + +fn get_env(key: &'static str) -> Result +where + T: FromStr, + T::Err: std::error::Error + Send + Sync + 'static, +{ + let val = std::env::var(key) + .with_context(|| format!("Missing env var `{key}`"))? + .parse() + .with_context(|| format!("Failed to parse `{key}`)"))?; + + Ok(val) +} + +async fn client_listen_on_transport( + swarm: &mut Swarm, + transport: TransportProtocol, +) -> Result<()> { + let listen_addr = match transport { + TransportProtocol::Tcp => tcp_addr(Ipv4Addr::UNSPECIFIED.into()), + TransportProtocol::Quic => quic_addr(Ipv4Addr::UNSPECIFIED.into()), + }; + let expected_listener_id = swarm + .listen_on(listen_addr) + .context("Failed to listen on address")?; + + let mut listen_addresses = 0; + + // We should have at least two listen addresses, one for localhost and the actual interface. + while listen_addresses < 2 { + if let SwarmEvent::NewListenAddr { + listener_id, + address, + } = swarm.next().await.unwrap() + { + if listener_id == expected_listener_id { + listen_addresses += 1; + } + + tracing::info!("Listening on {address}"); + } + } + Ok(()) +} + +async fn client_setup( + swarm: &mut Swarm, + redis: &mut RedisClient, + relay_addr: Multiaddr, + mode: Mode, +) -> Result> { + let either = match mode { + Mode::Listen => { + let id = swarm.listen_on(relay_addr.with(Protocol::P2pCircuit))?; + + Either::Left(id) + } + Mode::Dial => { + let remote_peer_id = redis.pop(LISTEN_CLIENT_PEER_ID).await?; + + let opts = DialOpts::from( + relay_addr + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(remote_peer_id)), + ); + let id = opts.connection_id(); + + swarm.dial(opts)?; + + Either::Right(id) + } + }; + + Ok(either) +} + +fn tcp_addr(addr: IpAddr) -> Multiaddr { + Multiaddr::empty().with(addr.into()).with(Protocol::Tcp(0)) +} + +fn quic_addr(addr: IpAddr) -> Multiaddr { + Multiaddr::empty() + .with(addr.into()) + .with(Protocol::Udp(0)) + .with(Protocol::QuicV1) +} + +struct RedisClient { + inner: redis::aio::Connection, +} + +impl RedisClient { + async fn new(host: &str, port: u16) -> Result { + let client = redis::Client::open(format!("redis://{host}:{port}/")) + .context("Bad redis server URL")?; + let connection = client + .get_async_connection() + .await + .context("Failed to connect to redis server")?; + + Ok(Self { inner: connection }) + } + + async fn push(&mut self, key: &str, value: impl ToString) -> Result<()> { + let value = value.to_string(); + + tracing::debug!("Pushing {key}={value} to redis"); + + self.inner.rpush(key, value).await?; + + Ok(()) + } + + async fn pop(&mut self, key: &str) -> Result + where + V: FromStr + fmt::Display, + V::Err: std::error::Error + Send + Sync + 'static, + { + tracing::debug!("Fetching {key} from redis"); + + let value = self + .inner + .blpop::<_, HashMap>(key, 0) + .await? + .remove(key) + .with_context(|| format!("Failed to get value for {key} from redis"))? + .parse()?; + + tracing::debug!("{key}={value}"); + + Ok(value) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum TransportProtocol { + Tcp, + Quic, +} + +impl FromStr for TransportProtocol { + type Err = io::Error; + fn from_str(mode: &str) -> Result { + match mode { + "tcp" => Ok(TransportProtocol::Tcp), + "quic" => Ok(TransportProtocol::Quic), + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Expected either 'tcp' or 'quic'", + )), + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum Mode { + Dial, + Listen, +} + +impl FromStr for Mode { + type Err = io::Error; + fn from_str(mode: &str) -> Result { + match mode { + "dial" => Ok(Mode::Dial), + "listen" => Ok(Mode::Listen), + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Expected either 'dial' or 'listen'", + )), + } + } +} + +#[derive(NetworkBehaviour)] +struct Behaviour { + relay_client: relay::client::Behaviour, + identify: identify::Behaviour, + dcutr: dcutr::Behaviour, + ping: ping::Behaviour, +} diff --git a/hole-punching-tests/version.json b/hole-punching-tests/version.json new file mode 100644 index 000000000000..f5db52d1c2dd --- /dev/null +++ b/hole-punching-tests/version.json @@ -0,0 +1,8 @@ +{ + "id": "rust-libp2p-head", + "containerImageID": "rust-libp2p-head", + "transports": [ + "tcp", + "quic" + ] +} diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 27873e88e016..004943ce1950 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -1,3 +1,19 @@ +## 0.2.8 + +- Bump `ring` to `0.17.5. + See [PR 4779](https://github.com/libp2p/rust-libp2p/pull/4779). + +## 0.2.7 + +- Add `rand` feature to gate methods requiring a random number generator, enabling use in restricted environments (e.g. smartcontracts). + This feature is not enabled by default. + See [PR 4349](https://github.com/libp2p/rust-libp2p/pull/4349). + +## 0.2.6 + +- Make `PeerId::to_bytes` and `PeerId::to_base58` take `self` by value to follow Rust convention of `Copy` types. + See [PR 4653](https://github.com/libp2p/rust-libp2p/pull/4653). + ## 0.2.5 - Fix usage of HKDF within `Keypair::derive_secret`. diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 69f8bf55f02a..920b9a990e4f 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "libp2p-identity" -version = "0.2.5" +version = "0.2.8" edition = "2021" description = "Data structures and algorithms for identifying peers in libp2p." -rust-version = { workspace = true } +rust-version = "1.73.0" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking", "cryptography"] @@ -14,10 +14,10 @@ categories = ["cryptography"] [dependencies] asn1_der = { version = "0.7.6", optional = true } bs58 = { version = "0.5.0", optional = true } -ed25519-dalek = { version = "2.0", optional = true, features = ["rand_core"] } -hkdf = { version = "0.12.3", optional = true } +ed25519-dalek = { version = "2.1", optional = true } +hkdf = { version = "0.12.4", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } -log = "0.4" +tracing = "0.1.37" multihash = { version = "0.19.1", optional = true } p256 = { version = "0.13", default-features = false, features = [ "ecdsa", "std", "pem"], optional = true } quick-protobuf = "0.8.1" @@ -27,21 +27,22 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = { version = "0.10.8", optional = true } thiserror = { version = "1.0", optional = true } void = { version = "1.0", optional = true } -zeroize = { version = "1.6", optional = true } +zeroize = { version = "1.7", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = { version = "0.16.9", features = [ "alloc", "std"], default-features = false, optional = true } +ring = { version = "0.17.5", features = [ "alloc", "std"], default-features = false, optional = true } [features] -secp256k1 = ["dep:libsecp256k1", "dep:asn1_der", "dep:rand", "dep:sha2", "dep:hkdf", "dep:zeroize"] -ecdsa = ["dep:p256", "dep:rand", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] +secp256k1 = ["dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize"] +ecdsa = ["dep:p256", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] rsa = ["dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize"] -ed25519 = ["dep:ed25519-dalek", "dep:rand", "dep:zeroize", "dep:sha2", "dep:hkdf"] -peerid = ["dep:multihash", "dep:bs58", "dep:rand", "dep:thiserror", "dep:sha2", "dep:hkdf" ] +ed25519 = ["dep:ed25519-dalek", "dep:zeroize", "dep:sha2", "dep:hkdf"] +peerid = ["dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf"] +rand = ["dep:rand", "ed25519-dalek?/rand_core"] [dev-dependencies] quickcheck = { workspace = true } -base64 = "0.21.4" +base64 = "0.21.7" serde_json = "1.0" rmp-serde = "1.1" criterion = "0.5" diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 21970f2ffdca..2f1a286d46d3 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -44,6 +44,7 @@ pub struct Keypair { impl Keypair { /// Generate a new random ECDSA keypair. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } @@ -150,9 +151,8 @@ pub struct PublicKey(VerifyingKey); impl PublicKey { /// Verify an ECDSA signature on a message using the public key. pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { - let sig = match Signature::from_der(sig) { - Ok(sig) => sig, - Err(_) => return false, + let Ok(sig) = Signature::from_der(sig) else { + return false; }; self.0.verify(msg, &sig).is_ok() } @@ -265,6 +265,7 @@ mod tests { use super::*; #[test] + #[cfg(feature = "rand")] fn sign_verify() { let pair = Keypair::generate(); let pk = pair.public(); diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 8b6b9e0d1e0b..529a4dddea1c 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -34,6 +34,7 @@ pub struct Keypair(ed25519::SigningKey); impl Keypair { /// Generate a new random Ed25519 keypair. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } @@ -181,6 +182,7 @@ impl fmt::Debug for SecretKey { impl SecretKey { /// Generate a new Ed25519 secret key. + #[cfg(feature = "rand")] pub fn generate() -> SecretKey { let signing = ed25519::SigningKey::generate(&mut rand::rngs::OsRng); SecretKey(signing.to_bytes()) @@ -213,6 +215,7 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn ed25519_keypair_encode_decode() { fn prop() -> bool { let kp1 = Keypair::generate(); @@ -224,6 +227,7 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn ed25519_keypair_from_secret() { fn prop() -> bool { let kp1 = Keypair::generate(); @@ -235,6 +239,7 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn ed25519_signature() { let kp = Keypair::generate(); let pk = kp.public(); diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 198296fa4fad..bdfb68c0091c 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -102,7 +102,7 @@ enum KeyPairInner { impl Keypair { /// Generate a new Ed25519 keypair. - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] pub fn generate_ed25519() -> Keypair { Keypair { keypair: KeyPairInner::Ed25519(ed25519::Keypair::generate()), @@ -110,7 +110,7 @@ impl Keypair { } /// Generate a new Secp256k1 keypair. - #[cfg(feature = "secp256k1")] + #[cfg(all(feature = "secp256k1", feature = "rand"))] pub fn generate_secp256k1() -> Keypair { Keypair { keypair: KeyPairInner::Secp256k1(secp256k1::Keypair::generate()), @@ -118,7 +118,7 @@ impl Keypair { } /// Generate a new ECDSA keypair. - #[cfg(feature = "ecdsa")] + #[cfg(all(feature = "ecdsa", feature = "rand"))] pub fn generate_ecdsa() -> Keypair { Keypair { keypair: KeyPairInner::Ecdsa(ecdsa::Keypair::generate()), @@ -352,7 +352,6 @@ impl Keypair { /// ``` /// # fn main() { /// # use libp2p_identity as identity; - /// /// let key = identity::Keypair::generate_ed25519(); /// /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); @@ -673,7 +672,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ed25519"))] proto::KeyType::Ed25519 => { - log::debug!("support for ed25519 was disabled at compile-time"); + tracing::debug!("support for ed25519 was disabled at compile-time"); Err(DecodingError::missing_feature("ed25519")) } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] @@ -686,7 +685,7 @@ impl TryFrom for PublicKey { } #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] proto::KeyType::RSA => { - log::debug!("support for RSA was disabled at compile-time"); + tracing::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::missing_feature("rsa")) } #[cfg(feature = "secp256k1")] @@ -696,7 +695,7 @@ impl TryFrom for PublicKey { })?), #[cfg(not(feature = "secp256k1"))] proto::KeyType::Secp256k1 => { - log::debug!("support for secp256k1 was disabled at compile-time"); + tracing::debug!("support for secp256k1 was disabled at compile-time"); Err(DecodingError::missing_feature("secp256k1")) } #[cfg(feature = "ecdsa")] @@ -707,7 +706,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ecdsa"))] proto::KeyType::ECDSA => { - log::debug!("support for ECDSA was disabled at compile-time"); + tracing::debug!("support for ECDSA was disabled at compile-time"); Err(DecodingError::missing_feature("ecdsa")) } } @@ -926,7 +925,7 @@ mod tests { } #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn test_publickey_from_ed25519_public_key() { let pubkey = Keypair::generate_ed25519().public(); let ed25519_pubkey = pubkey @@ -941,7 +940,7 @@ mod tests { } #[test] - #[cfg(feature = "secp256k1")] + #[cfg(all(feature = "secp256k1", feature = "rand"))] fn test_publickey_from_secp256k1_public_key() { let pubkey = Keypair::generate_secp256k1().public(); let secp256k1_pubkey = pubkey @@ -955,7 +954,7 @@ mod tests { } #[test] - #[cfg(feature = "ecdsa")] + #[cfg(all(feature = "ecdsa", feature = "rand"))] fn test_publickey_from_ecdsa_public_key() { let pubkey = Keypair::generate_ecdsa().public(); let ecdsa_pubkey = pubkey.clone().try_into_ecdsa().expect("A ecdsa keypair"); diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 9a6c42374f66..c78e68d1652a 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -114,8 +114,9 @@ pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; -#[derive(Debug, PartialEq, Eq)] /// The type of key a `KeyPair` is holding. +#[derive(Debug, PartialEq, Eq)] +#[allow(clippy::upper_case_acronyms)] pub enum KeyType { Ed25519, RSA, diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 60ded4ad37c1..1d85fe66ffad 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -18,6 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#[cfg(feature = "rand")] use rand::Rng; use sha2::Digest as _; use std::{convert::TryFrom, fmt, str::FromStr}; @@ -101,6 +102,7 @@ impl PeerId { /// Generates a random peer ID from a cryptographically secure PRNG. /// /// This is useful for randomly walking on a DHT, or for testing purposes. + #[cfg(feature = "rand")] pub fn random() -> PeerId { let peer_id = rand::thread_rng().gen::<[u8; 32]>(); PeerId { @@ -109,12 +111,12 @@ impl PeerId { } /// Returns a raw bytes representation of this `PeerId`. - pub fn to_bytes(&self) -> Vec { + pub fn to_bytes(self) -> Vec { self.multihash.to_bytes() } /// Returns a base-58 encoded string of this `PeerId`. - pub fn to_base58(&self) -> String { + pub fn to_base58(self) -> String { bs58::encode(self.to_bytes()).into_string() } } @@ -247,7 +249,7 @@ mod tests { use super::*; #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn peer_id_into_bytes_then_from_bytes() { let peer_id = crate::Keypair::generate_ed25519().public().to_peer_id(); let second = PeerId::from_bytes(&peer_id.to_bytes()).unwrap(); @@ -255,7 +257,7 @@ mod tests { } #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn peer_id_to_base58_then_back() { let peer_id = crate::Keypair::generate_ed25519().public().to_peer_id(); let second: PeerId = peer_id.to_base58().parse().unwrap(); @@ -263,6 +265,7 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn random_peer_id_is_valid() { for _ in 0..5000 { let peer_id = PeerId::random(); diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index f14b19750231..cbfe3c1b919b 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -71,7 +71,7 @@ impl Keypair { /// Sign a message with this keypair. pub fn sign(&self, data: &[u8]) -> Result, SigningError> { - let mut signature = vec![0; self.0.public_modulus_len()]; + let mut signature = vec![0; self.0.public().modulus_len()]; let rng = SystemRandom::new(); match self.0.sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) { Ok(()) => Ok(signature), diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 94b9b9177876..5e1fda2933b7 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -38,6 +38,7 @@ pub struct Keypair { impl Keypair { /// Generate a new sec256k1 `Keypair`. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } @@ -88,6 +89,7 @@ impl fmt::Debug for SecretKey { impl SecretKey { /// Generate a new random Secp256k1 secret key. + #[cfg(feature = "rand")] pub fn generate() -> SecretKey { SecretKey(libsecp256k1::SecretKey::random(&mut rand::thread_rng())) } @@ -226,6 +228,7 @@ mod tests { use super::*; #[test] + #[cfg(feature = "rand")] fn secp256k1_secret_from_bytes() { let sk1 = SecretKey::generate(); let mut sk_bytes = [0; 32]; diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index ac046da69cdf..81c17e193036 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -5,37 +5,43 @@ version = "0.1.0" publish = false license = "MIT" +[package.metadata.release] +release = false + [lib] crate-type = ["cdylib", "rlib"] [dependencies] anyhow = "1" either = "1.9.0" -env_logger = "0.10.0" -futures = "0.3.28" -log = "0.4" -serde = { version = "1", features = ["derive"] } +futures = "0.3.30" rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -axum = "0.6" +axum = "0.7" libp2p = { path = "../libp2p", features = [ "ping", "noise", "tls", "rsa", "macros", "websocket", "tokio", "yamux", "tcp", "dns", "identify", "quic"] } -libp2p-webrtc = { workspace = true, features = ["tokio"] } libp2p-mplex = { path = "../muxers/mplex" } +libp2p-noise = { workspace = true } +libp2p-tls = { workspace = true } +libp2p-webrtc = { workspace = true, features = ["tokio"] } mime_guess = "2.0" redis = { version = "0.23.3", default-features = false, features = [ "tokio-comp", ] } -rust-embed = "8.0" +rust-embed = "8.2" serde_json = "1" thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 -tokio = { version = "1.32.0", features = ["full"] } -tower-http = { version = "0.4", features = ["cors", "fs", "trace"] } -tracing = "0.1" +tokio = { version = "1.36.0", features = ["full"] } +tower-http = { version = "0.5", features = ["cors", "fs", "trace"] } +tracing = "0.1.37" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(target_arch = "wasm32")'.dependencies] -libp2p = { path = "../libp2p", features = [ "ping", "macros", "webtransport-websys", "wasm-bindgen", "identify"] } +libp2p = { path = "../libp2p", features = [ "ping", "macros", "webtransport-websys", "wasm-bindgen", "identify", "websocket-websys", "yamux", "noise"] } +libp2p-mplex = { path = "../muxers/mplex" } libp2p-webrtc-websys = { workspace = true } wasm-bindgen = { version = "0.2" } wasm-bindgen-futures = { version = "0.4" } diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium index ab720c4d3175..5ec46e313aa8 100644 --- a/interop-tests/Dockerfile.chromium +++ b/interop-tests/Dockerfile.chromium @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.67.0 as chef +FROM rust:1.73.0 as chef RUN rustup target add wasm32-unknown-unknown RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" @@ -14,13 +14,13 @@ FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json # Build dependencies - this is the caching Docker layer! RUN cargo chef cook --release --package interop-tests --target wasm32-unknown-unknown --recipe-path recipe.json -RUN cargo chef cook --release --package interop-tests --bin wasm_ping --recipe-path recipe.json +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo chef cook --release --package interop-tests --target x86_64-unknown-linux-gnu --bin wasm_ping --recipe-path recipe.json # Build application COPY . . RUN wasm-pack build --target web interop-tests -RUN cargo build --release --package interop-tests --bin wasm_ping +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target x86_64-unknown-linux-gnu --bin wasm_ping FROM selenium/standalone-chrome:115.0 -COPY --from=builder /app/target/release/wasm_ping /usr/local/bin/testplan +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/wasm_ping /usr/local/bin/testplan ENV RUST_BACKTRACE=1 ENTRYPOINT ["testplan"] diff --git a/interop-tests/Dockerfile.native b/interop-tests/Dockerfile.native index df5eb9a1240c..91e6cf8893e4 100644 --- a/interop-tests/Dockerfile.native +++ b/interop-tests/Dockerfile.native @@ -1,6 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.67.0 as chef -RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin +FROM lukemathwalker/cargo-chef:0.1.62-rust-1.73.0 as chef WORKDIR /app FROM chef AS planner @@ -10,12 +9,13 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json # Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --package interop-tests --bin native_ping --recipe-path recipe.json +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo chef cook --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping --recipe-path recipe.json # Build application COPY . . -RUN cargo build --release --package interop-tests --bin native_ping +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping +RUN cp /app/target/$(rustc -vV | grep host | awk '{print $2}')/release/native_ping /usr/local/bin/testplan -FROM gcr.io/distroless/cc -COPY --from=builder /app/target/release/native_ping /usr/local/bin/testplan +FROM scratch +COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan ENV RUST_BACKTRACE=1 ENTRYPOINT ["testplan"] diff --git a/interop-tests/README.md b/interop-tests/README.md index bab98df7987c..c2805ddf7072 100644 --- a/interop-tests/README.md +++ b/interop-tests/README.md @@ -8,9 +8,11 @@ You can run this test locally by having a local Redis instance and by having another peer that this test can dial or listen for. For example to test that we can dial/listen for ourselves we can do the following: -1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine`. -2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping` -3. In another terminal, run the listener: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping` +1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine` +2. In one terminal run the dialer: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="true" cargo run --bin native_ping` +3. In another terminal, run the listener: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="false" cargo run --bin native_ping` + +If testing `transport=quic-v1`, then remove `security` and `muxer` variables from command line, because QUIC protocol comes with its own encryption and multiplexing. To test the interop with other versions do something similar, except replace one of these nodes with the other version's interop test. @@ -30,9 +32,9 @@ Firefox is not yet supported as it doesn't support all required features yet To run the webrtc-direct test, you'll need the `chromedriver` in your `$PATH`, compatible with your Chrome browser. 1. Start redis: `docker run --rm -p 6379:6379 redis:7-alpine`. -1. Build the wasm package: `wasm-pack build --target web` -1. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` -1. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` +2. Build the wasm package: `wasm-pack build --target web` +3. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` +4. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` # Running all interop tests locally with Compose @@ -41,8 +43,8 @@ To run this test against all released libp2p versions you'll need to have the the following (from the root directory of this repository): 1. Build the image: `docker build -t rust-libp2p-head . -f interop-tests/Dockerfile`. -1. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. -1. Run the test: +2. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. +3. Run the test: ``` RUST_LIBP2P="$PWD"; (cd /libp2p/test-plans/multidim-interop/ && npm run test -- --extra-version=$RUST_LIBP2P/interop-tests/ping-version.json --name-filter="rust-libp2p-head") ``` diff --git a/interop-tests/chromium-ping-version.json b/interop-tests/chromium-ping-version.json index ae5c6e10eddd..6ee0a0756d4a 100644 --- a/interop-tests/chromium-ping-version.json +++ b/interop-tests/chromium-ping-version.json @@ -3,8 +3,9 @@ "containerImageID": "chromium-rust-libp2p-head", "transports": [ { "name": "webtransport", "onlyDial": true }, - { "name": "webrtc-direct", "onlyDial": true } + { "name": "webrtc-direct", "onlyDial": true }, + { "name": "ws", "onlyDial": true } ], - "secureChannels": [], - "muxers": [] + "secureChannels": ["noise"], + "muxers": ["mplex", "yamux"] } diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index b30c3ad1e31f..52000f90a86f 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -1,133 +1,166 @@ -use libp2p::core::muxing::StreamMuxerBox; -use libp2p::core::transport::Boxed; -use libp2p::PeerId; - // Native re-exports #[cfg(not(target_arch = "wasm32"))] -pub(crate) use native::{build_transport, init_logger, sleep, swarm_builder, Instant, RedisClient}; +pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient}; // Wasm re-exports #[cfg(target_arch = "wasm32")] -pub(crate) use wasm::{build_transport, init_logger, sleep, swarm_builder, Instant, RedisClient}; - -type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; +pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient}; #[cfg(not(target_arch = "wasm32"))] pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use either::Either; - use env_logger::{Env, Target}; use futures::future::BoxFuture; use futures::FutureExt; - use libp2p::core::muxing::StreamMuxerBox; - use libp2p::core::upgrade::Version; use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; - use libp2p::websocket::WsConfig; - use libp2p::{noise, quic, tcp, tls, yamux, PeerId, Transport as _}; + use libp2p::swarm::{NetworkBehaviour, Swarm}; + use libp2p::{noise, tcp, tls, yamux}; use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; + use tracing_subscriber::EnvFilter; - use crate::{from_env, Muxer, SecProtocol, Transport}; - - use super::BoxedTransport; + use crate::{Muxer, SecProtocol, Transport}; pub(crate) type Instant = std::time::Instant; pub(crate) fn init_logger() { - env_logger::Builder::from_env(Env::default().default_filter_or("info")) - .target(Target::Stdout) - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); } pub(crate) fn sleep(duration: Duration) -> BoxFuture<'static, ()> { tokio::time::sleep(duration).boxed() } - fn muxer_protocol_from_env() -> Result> { - Ok(match from_env("muxer")? { - Muxer::Yamux => Either::Left(yamux::Config::default()), - Muxer::Mplex => Either::Right(mplex::MplexConfig::new()), - }) - } - - pub(crate) fn build_transport( - local_key: Keypair, + pub(crate) async fn build_swarm( ip: &str, transport: Transport, - ) -> Result<(BoxedTransport, String)> { - let (transport, addr) = match (transport, from_env::("security")) { - (Transport::QuicV1, _) => ( - quic::tokio::Transport::new(quic::Config::new(&local_key)) - .map(|(p, c), _| (p, StreamMuxerBox::new(c))) - .boxed(), + sec_protocol: Option, + muxer: Option, + behaviour_constructor: impl FnOnce(&Keypair) -> B, + ) -> Result<(Swarm, String)> { + let (swarm, addr) = match (transport, sec_protocol, muxer) { + (Transport::QuicV1, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/udp/0/quic-v1"), ), - (Transport::Tcp, Ok(SecProtocol::Tls)) => ( - tcp::tokio::Transport::new(tcp::Config::new()) - .upgrade(Version::V1Lazy) - .authenticate(tls::Config::new(&local_key).context("failed to initialise tls")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), + (Transport::Tcp, Some(SecProtocol::Tls), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + tls::Config::new, + mplex::MplexConfig::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/tcp/0"), ), - (Transport::Tcp, Ok(SecProtocol::Noise)) => ( - tcp::tokio::Transport::new(tcp::Config::new()) - .upgrade(Version::V1Lazy) - .authenticate( - noise::Config::new(&local_key).context("failed to intialise noise")?, - ) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), + (Transport::Tcp, Some(SecProtocol::Tls), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + tls::Config::new, + yamux::Config::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/tcp/0"), ), - (Transport::Ws, Ok(SecProtocol::Tls)) => ( - WsConfig::new(tcp::tokio::Transport::new(tcp::Config::new())) - .upgrade(Version::V1Lazy) - .authenticate(tls::Config::new(&local_key).context("failed to initialise tls")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), + (Transport::Tcp, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + mplex::MplexConfig::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Tcp, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Ws, Some(SecProtocol::Tls), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(tls::Config::new, mplex::MplexConfig::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::Ws, Some(SecProtocol::Tls), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(tls::Config::new, yamux::Config::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(noise::Config::new, mplex::MplexConfig::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/tcp/0/ws"), ), - (Transport::Ws, Ok(SecProtocol::Noise)) => ( - WsConfig::new(tcp::tokio::Transport::new(tcp::Config::new())) - .upgrade(Version::V1Lazy) - .authenticate( - noise::Config::new(&local_key).context("failed to intialise noise")?, - ) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(noise::Config::new, yamux::Config::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/tcp/0/ws"), ), - (Transport::WebRtcDirect, _) => ( - webrtc::tokio::Transport::new( - local_key, - webrtc::tokio::Certificate::generate(&mut rand::thread_rng())?, - ) - .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) - .boxed(), + (Transport::WebRtcDirect, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|key| { + Ok(webrtc::tokio::Transport::new( + key.clone(), + webrtc::tokio::Certificate::generate(&mut rand::thread_rng())?, + )) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/udp/0/webrtc-direct"), ), - (Transport::Tcp, Err(_)) => bail!("Missing security protocol for TCP transport"), - (Transport::Ws, Err(_)) => bail!("Missing security protocol for Websocket transport"), - (Transport::Webtransport, _) => bail!("Webtransport can only be used with wasm"), + (t, s, m) => bail!("Unsupported combination: {t:?} {s:?} {m:?}"), }; - Ok((transport, addr)) - } - - pub(crate) fn swarm_builder( - transport: BoxedTransport, - behaviour: TBehaviour, - peer_id: PeerId, - ) -> SwarmBuilder { - SwarmBuilder::with_tokio_executor(transport, behaviour, peer_id) + Ok((swarm, addr)) } pub(crate) struct RedisClient(redis::Client); @@ -154,17 +187,17 @@ pub(crate) mod native { #[cfg(target_arch = "wasm32")] pub(crate) mod wasm { - use anyhow::{bail, Result}; + use anyhow::{bail, Context, Result}; use futures::future::{BoxFuture, FutureExt}; + use libp2p::core::upgrade::Version; use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; - use libp2p::PeerId; - use libp2p_webrtc_websys as webrtc; + use libp2p::swarm::{NetworkBehaviour, Swarm}; + use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _}; + use libp2p_mplex as mplex; + use libp2p_webrtc_websys as webrtc_websys; use std::time::Duration; - use crate::{BlpopRequest, Transport}; - - use super::BoxedTransport; + use crate::{BlpopRequest, Muxer, SecProtocol, Transport}; pub(crate) type Instant = instant::Instant; @@ -177,33 +210,74 @@ pub(crate) mod wasm { futures_timer::Delay::new(duration).boxed() } - pub(crate) fn build_transport( - local_key: Keypair, + pub(crate) async fn build_swarm( ip: &str, transport: Transport, - ) -> Result<(BoxedTransport, String)> { - match transport { - Transport::Webtransport => Ok(( - libp2p::webtransport_websys::Transport::new( - libp2p::webtransport_websys::Config::new(&local_key), - ) - .boxed(), + sec_protocol: Option, + muxer: Option, + behaviour_constructor: impl FnOnce(&Keypair) -> B, + ) -> Result<(Swarm, String)> { + Ok(match (transport, sec_protocol, muxer) { + (Transport::Webtransport, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + webtransport_websys::Transport::new(webtransport_websys::Config::new( + &local_key, + )) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/udp/0/quic/webtransport"), - )), - Transport::WebRtcDirect => Ok(( - webrtc::Transport::new(webrtc::Config::new(&local_key)).boxed(), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + Ok(websocket_websys::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate( + noise::Config::new(&local_key) + .context("failed to initialise noise")?, + ) + .multiplex(mplex::MplexConfig::new())) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/wss"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + Ok(websocket_websys::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate( + noise::Config::new(&local_key) + .context("failed to initialise noise")?, + ) + .multiplex(yamux::Config::default())) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/wss"), + ), + (Transport::WebRtcDirect, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + webrtc_websys::Transport::new(webrtc_websys::Config::new(&local_key)) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), format!("/ip4/{ip}/udp/0/webrtc-direct"), - )), - _ => bail!("Only webtransport and webrtc-direct are supported with wasm"), - } - } - - pub(crate) fn swarm_builder( - transport: BoxedTransport, - behaviour: TBehaviour, - peer_id: PeerId, - ) -> SwarmBuilder { - SwarmBuilder::with_wasm_executor(transport, behaviour, peer_id) + ), + (t, s, m) => bail!("Unsupported combination: {t:?} {s:?} {m:?}"), + }) } pub(crate) struct RedisClient(String); diff --git a/interop-tests/src/bin/config/mod.rs b/interop-tests/src/bin/config/mod.rs index 82747e82802b..dff297ef412e 100644 --- a/interop-tests/src/bin/config/mod.rs +++ b/interop-tests/src/bin/config/mod.rs @@ -5,6 +5,8 @@ use anyhow::{Context, Result}; #[derive(Debug, Clone)] pub(crate) struct Config { pub(crate) transport: String, + pub(crate) sec_protocol: Option, + pub(crate) muxer: Option, pub(crate) ip: String, pub(crate) is_dialer: bool, pub(crate) test_timeout: u64, @@ -26,8 +28,13 @@ impl Config { .map(|addr| format!("redis://{addr}")) .unwrap_or_else(|_| "redis://redis:6379".into()); + let sec_protocol = env::var("security").ok(); + let muxer = env::var("muxer").ok(); + Ok(Self { transport, + sec_protocol, + muxer, ip, is_dialer, test_timeout, diff --git a/interop-tests/src/bin/native_ping.rs b/interop-tests/src/bin/native_ping.rs index 88905803d26d..2fb6ce12e29d 100644 --- a/interop-tests/src/bin/native_ping.rs +++ b/interop-tests/src/bin/native_ping.rs @@ -12,6 +12,8 @@ async fn main() -> Result<()> { config.is_dialer, config.test_timeout, &config.redis_addr, + config.sec_protocol, + config.muxer, ) .await?; diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index b3a918192261..e1bb2ea49fb5 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -1,9 +1,10 @@ #![allow(non_upper_case_globals)] + +use std::future::IntoFuture; use std::process::Stdio; use std::time::Duration; use anyhow::{bail, Context, Result}; -use axum::body; use axum::http::{header, Uri}; use axum::response::{Html, IntoResponse, Response}; use axum::routing::get; @@ -11,11 +12,11 @@ use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; use redis::{AsyncCommands, Client}; use thirtyfour::prelude::*; use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::net::TcpListener; use tokio::process::Child; use tokio::sync::mpsc; use tower_http::cors::CorsLayer; use tower_http::trace::TraceLayer; -use tracing::{error, warn}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; use interop_tests::{BlpopRequest, Report}; @@ -77,7 +78,7 @@ async fn main() -> Result<()> { .with_state(state); // Run the service in background - tokio::spawn(axum::Server::bind(&BIND_ADDR.parse()?).serve(app.into_make_service())); + tokio::spawn(axum::serve(TcpListener::bind(BIND_ADDR).await?, app).into_future()); // Start executing the test in a browser let (mut chrome, driver) = open_in_browser().await?; @@ -144,16 +145,17 @@ async fn redis_blpop( ) -> Result>, StatusCode> { let client = state.0.redis_client; let mut conn = client.get_async_connection().await.map_err(|e| { - warn!("Failed to connect to redis: {e}"); + tracing::warn!("Failed to connect to redis: {e}"); StatusCode::INTERNAL_SERVER_ERROR })?; let res = conn .blpop(&request.key, request.timeout as usize) .await .map_err(|e| { - warn!( - "Failed to get list elem {} within timeout {}: {e}", - request.key, request.timeout + tracing::warn!( + key=%request.key, + timeout=%request.timeout, + "Failed to get list elem key within timeout: {e}" ); StatusCode::INTERNAL_SERVER_ERROR })?; @@ -167,7 +169,7 @@ async fn post_results( request: Json>, ) -> Result<(), StatusCode> { state.0.results_tx.send(request.0).await.map_err(|_| { - error!("Failed to send results"); + tracing::error!("Failed to send results"); StatusCode::INTERNAL_SERVER_ERROR }) } @@ -179,8 +181,18 @@ async fn serve_index_html(state: State) -> Result @@ -200,7 +212,9 @@ async fn serve_index_html(state: State) -> Result @@ -217,7 +231,7 @@ async fn serve_wasm_pkg(uri: Uri) -> Result { let mime = mime_guess::from_path(&path).first_or_octet_stream(); Ok(Response::builder() .header(header::CONTENT_TYPE, mime.as_ref()) - .body(body::boxed(body::Full::from(content.data))) + .body(content.data.into()) .unwrap()) } else { Err(StatusCode::NOT_FOUND) diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index 40c06b57810e..0154bec51a4e 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -3,14 +3,15 @@ use std::time::Duration; use anyhow::{bail, Context, Result}; use futures::{FutureExt, StreamExt}; +use libp2p::identity::Keypair; use libp2p::swarm::SwarmEvent; -use libp2p::{identify, identity, ping, swarm::NetworkBehaviour, Multiaddr, PeerId}; +use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr}; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; mod arch; -use arch::{build_transport, init_logger, swarm_builder, Instant, RedisClient}; +use arch::{build_swarm, init_logger, Instant, RedisClient}; pub async fn run_test( transport: &str, @@ -18,42 +19,45 @@ pub async fn run_test( is_dialer: bool, test_timeout_seconds: u64, redis_addr: &str, + sec_protocol: Option, + muxer: Option, ) -> Result { init_logger(); let test_timeout = Duration::from_secs(test_timeout_seconds); let transport = transport.parse().context("Couldn't parse transport")?; + let sec_protocol = sec_protocol + .map(|sec_protocol| { + sec_protocol + .parse() + .context("Couldn't parse security protocol") + }) + .transpose()?; + let muxer = muxer + .map(|sec_protocol| { + sec_protocol + .parse() + .context("Couldn't parse muxer protocol") + }) + .transpose()?; - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); let redis_client = RedisClient::new(redis_addr).context("Could not connect to redis")?; // Build the transport from the passed ENV var. - let (boxed_transport, local_addr) = build_transport(local_key.clone(), ip, transport)?; - let mut swarm = swarm_builder( - boxed_transport, - Behaviour { - ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10))), - // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. - identify: identify::Behaviour::new(identify::Config::new( - "/interop-tests".to_owned(), - local_key.public(), - )), - }, - local_peer_id, - ) - .idle_connection_timeout(Duration::from_secs(5)) - .build(); - - log::info!("Running ping test: {}", swarm.local_peer_id()); + let (mut swarm, local_addr) = + build_swarm(ip, transport, sec_protocol, muxer, build_behaviour).await?; - let mut maybe_id = None; + tracing::info!(local_peer=%swarm.local_peer_id(), "Running ping test"); // See https://github.com/libp2p/rust-libp2p/issues/4071. #[cfg(not(target_arch = "wasm32"))] - if transport == Transport::WebRtcDirect { - maybe_id = Some(swarm.listen_on(local_addr.parse()?)?); - } + let maybe_id = if transport == Transport::WebRtcDirect { + Some(swarm.listen_on(local_addr.parse()?)?) + } else { + None + }; + #[cfg(target_arch = "wasm32")] + let maybe_id = None; // Run a ping interop test. Based on `is_dialer`, either dial the address // retrieved via `listenAddr` key over the redis connection. Or wait to be pinged and have @@ -70,7 +74,7 @@ pub async fn run_test( let handshake_start = Instant::now(); swarm.dial(other.parse::()?)?; - log::info!("Test instance, dialing multiaddress on: {}.", other); + tracing::info!(listener=%other, "Test instance, dialing multiaddress"); let rtt = loop { if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { @@ -78,7 +82,7 @@ pub async fn run_test( .. }))) = swarm.next().await { - log::info!("Ping successful: {rtt:?}"); + tracing::info!(?rtt, "Ping successful"); break rtt.as_micros() as f32 / 1000.; } }; @@ -97,9 +101,9 @@ pub async fn run_test( Some(id) => id, }; - log::info!( - "Test instance, listening for incoming connections on: {:?}.", - local_addr + tracing::info!( + address=%local_addr, + "Test instance, listening for incoming connections on address" ); loop { @@ -112,8 +116,8 @@ pub async fn run_test( continue; } if listener_id == id { - let ma = format!("{address}/p2p/{local_peer_id}"); - redis_client.rpush("listenerAddr", ma).await?; + let ma = format!("{address}/p2p/{}", swarm.local_peer_id()); + redis_client.rpush("listenerAddr", ma.clone()).await?; break; } } @@ -123,7 +127,9 @@ pub async fn run_test( futures::future::select( async move { loop { - swarm.next().await; + let event = swarm.next().await.unwrap(); + + tracing::debug!("{event:?}"); } } .boxed(), @@ -145,9 +151,20 @@ pub async fn run_test_wasm( is_dialer: bool, test_timeout_secs: u64, base_url: &str, + sec_protocol: Option, + muxer: Option, ) -> Result<(), JsValue> { - let result = run_test(transport, ip, is_dialer, test_timeout_secs, base_url).await; - log::info!("Sending test result: {result:?}"); + let result = run_test( + transport, + ip, + is_dialer, + test_timeout_secs, + base_url, + sec_protocol, + muxer, + ) + .await; + tracing::info!(?result, "Sending test result"); reqwest::Client::new() .post(&format!("http://{}/results", base_url)) .json(&result.map_err(|e| e.to_string())) @@ -240,18 +257,18 @@ impl FromStr for SecProtocol { } #[derive(NetworkBehaviour)] -struct Behaviour { +pub(crate) struct Behaviour { ping: ping::Behaviour, identify: identify::Behaviour, } -/// Helper function to get a ENV variable into an test parameter like `Transport`. -pub fn from_env(env_var: &str) -> Result -where - T: FromStr, -{ - std::env::var(env_var) - .with_context(|| format!("{env_var} environment variable is not set"))? - .parse() - .map_err(Into::into) +pub(crate) fn build_behaviour(key: &Keypair) -> Behaviour { + Behaviour { + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), + // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. + identify: identify::Behaviour::new(identify::Config::new( + "/interop-tests".to_owned(), + key.public(), + )), + } } diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index 9ae0d1c28aac..80b32c35643b 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,3 +1,50 @@ +## 0.53.2 + +- Allow `SwarmBuilder::with_bandwidth_metrics` after `SwarmBuilder::with_websocket`. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + +## 0.53.1 + +- Allow `SwarmBuilder::with_quic_config` to be called without `with_tcp` first. + See [PR 4821](https://github.com/libp2p/rust-libp2p/pull/4821). +- Introduce `SwarmBuilder::with_dns_config`. + See [PR 4808](https://github.com/libp2p/rust-libp2p/pull/4808). + +## 0.53.0 + +- Raise MSRV to 1.73. + See [PR 4692](https://github.com/libp2p/rust-libp2p/pull/4692). +- Remove deprecated `libp2p-wasm-ext`. + Users should use `libp2p-websocket-websys` instead. + See [PR 4694](https://github.com/libp2p/rust-libp2p/pull/4694). +- Remove deprecated `libp2p-deflate`. + See [issue 4522](https://github.com/libp2p/rust-libp2p/issues/4522) for details. + See [PR 4729](https://github.com/libp2p/rust-libp2p/pull/4729). +- Remove deprecated `development_transport`. + Use `libp2p::SwarmBuilder` instead. + See [PR 4732](https://github.com/libp2p/rust-libp2p/pull/4732). +- Introduce `SwarmBuilder::with_bandwidth_metrics` exposing Prometheus bandwidth metrics per transport protocol stack and direction (in-/ outbound). + Deprecate `Transport::with_bandwidth_logging` and `SwarmBuilder::with_bandwidth_logging` in favor of the new `SwarmBuilder::with_bandwidth_metrics`. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.52.4 + +- Introduce `libp2p::websocket_websys` module behind `websocket-websys` feature flag. + This supersedes the existing `libp2p::wasm_ext` module which is now deprecated. + See [PR 3679]. + +- Introduce a new `libp2p::SwarmBuilder` in favor of the now deprecated `libp2p::swarm::SwarmBuilder`. + See `libp2p::SwarmBuilder` docs on how to use the new builder. + Also see [PR 4120]. + +- Update `libp2p-identity` version to 0.2.6. + Under the hood, we feature-flagged `libp2p-identity`'s `rand` dependency but it is enabled by default when using `libp2p`. + See [PR 4349]. + +[PR 3679]: https://github.com/libp2p/rust-libp2p/pull/3679 +[PR 4120]: https://github.com/libp2p/rust-libp2p/pull/4120 +[PR 4349]: https://github.com/libp2p/rust-libp2p/pull/4349 + ## 0.52.3 - Add `libp2p-quic` stable release. @@ -64,8 +111,8 @@ ## 0.51.3 - Deprecate the `mplex` feature. -The recommended baseline stream multiplexer is `yamux`. -See [PR 3689]. + The recommended baseline stream multiplexer is `yamux`. + See [PR 3689]. [PR 3689]: https://github.com/libp2p/rust-libp2p/pull/3689 diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 1264a0505618..9dc9667be105 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition = "2021" rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.52.3" +version = "0.53.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,7 +16,6 @@ full = [ "autonat", "cbor", "dcutr", - "deflate", "dns", "ecdsa", "ed25519", @@ -45,8 +44,7 @@ full = [ "tokio", "uds", "wasm-bindgen", - "wasm-ext", - "wasm-ext-websocket", + "websocket-websys", "websocket", "webtransport-websys", "yamux", @@ -57,7 +55,6 @@ async-std = [ "libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/as autonat = ["dep:libp2p-autonat"] cbor = ["libp2p-request-response?/cbor"] dcutr = ["dep:libp2p-dcutr", "libp2p-metrics?/dcutr"] -deflate = ["dep:libp2p-deflate"] dns = ["dep:libp2p-dns"] ecdsa = ["libp2p-identity/ecdsa"] ed25519 = ["libp2p-identity/ed25519"] @@ -86,8 +83,7 @@ tls = ["dep:libp2p-tls"] tokio = [ "libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio", "libp2p-upnp?/tokio"] uds = ["dep:libp2p-uds"] wasm-bindgen = [ "futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen", "libp2p-gossipsub?/wasm-bindgen",] -wasm-ext = ["dep:libp2p-wasm-ext"] -wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"] +websocket-websys = ["dep:libp2p-websocket-websys"] websocket = ["dep:libp2p-websocket"] webtransport-websys = ["dep:libp2p-webtransport-websys"] yamux = ["dep:libp2p-yamux"] @@ -95,10 +91,13 @@ upnp = ["dep:libp2p-upnp"] [dependencies] bytes = "1" +either = "1.9.0" futures = "0.3.26" futures-timer = "3.0.2" # Explicit dependency to be used in `wasm-bindgen` feature getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.12" # Explicit dependency to be used in `wasm-bindgen` feature +# TODO feature flag? +rw-stream-sink = { workspace = true } libp2p-allow-block-list = { workspace = true } libp2p-autonat = { workspace = true, optional = true } @@ -108,7 +107,7 @@ libp2p-dcutr = { workspace = true, optional = true } libp2p-floodsub = { workspace = true, optional = true } libp2p-gossipsub = { workspace = true, optional = true } libp2p-identify = { workspace = true, optional = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-kad = { workspace = true, optional = true } libp2p-metrics = { workspace = true, optional = true } libp2p-noise = { workspace = true, optional = true } @@ -119,15 +118,14 @@ libp2p-relay = { workspace = true, optional = true } libp2p-rendezvous = { workspace = true, optional = true } libp2p-request-response = { workspace = true, optional = true } libp2p-swarm = { workspace = true } -libp2p-wasm-ext = { workspace = true, optional = true } +libp2p-websocket-websys = { workspace = true, optional = true } libp2p-webtransport-websys = { workspace = true, optional = true } libp2p-yamux = { workspace = true, optional = true } - multiaddr = { workspace = true } pin-project = "1.0.0" +thiserror = "1.0" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -libp2p-deflate = { workspace = true, optional = true } libp2p-dns = { workspace = true, optional = true } libp2p-mdns = { workspace = true, optional = true } libp2p-memory-connection-limits = { workspace = true, optional = true } @@ -141,13 +139,13 @@ libp2p-websocket = { workspace = true, optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } async-trait = "0.1" -either = "1.8.0" -env_logger = "0.10.0" clap = { version = "4.1.6", features = ["derive"] } tokio = { version = "1.15", features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] } +libp2p-mplex = { workspace = true } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs index dc696ce07e2c..b84cbb7e27b1 100644 --- a/libp2p/src/bandwidth.rs +++ b/libp2p/src/bandwidth.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#![allow(deprecated)] + use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; use futures::{ @@ -101,6 +103,9 @@ where } /// Allows obtaining the average bandwidth of the streams. +#[deprecated( + note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." +)] pub struct BandwidthSinks { inbound: AtomicU64, outbound: AtomicU64, diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs new file mode 100644 index 000000000000..c96c20d470a3 --- /dev/null +++ b/libp2p/src/builder.rs @@ -0,0 +1,603 @@ +use std::marker::PhantomData; + +mod phase; +mod select_muxer; +mod select_security; + +/// Build a [`Swarm`](libp2p_swarm::Swarm) by combining an identity, a set of +/// [`Transport`](libp2p_core::Transport)s and a +/// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour). +/// +/// ``` +/// # use libp2p::{swarm::NetworkBehaviour, SwarmBuilder}; +/// # use libp2p::core::transport::dummy::DummyTransport; +/// # use libp2p::core::muxing::StreamMuxerBox; +/// # use libp2p::identity::PeerId; +/// # use std::error::Error; +/// # +/// # #[cfg(all( +/// # not(target_arch = "wasm32"), +/// # feature = "tokio", +/// # feature = "tcp", +/// # feature = "tls", +/// # feature = "noise", +/// # feature = "quic", +/// # feature = "dns", +/// # feature = "relay", +/// # feature = "websocket", +/// # ))] +/// # async fn build_swarm() -> Result<(), Box> { +/// # #[derive(NetworkBehaviour)] +/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] +/// # struct MyBehaviour { +/// # relay: libp2p_relay::client::Behaviour, +/// # } +/// +/// let swarm = SwarmBuilder::with_new_identity() +/// .with_tokio() +/// .with_tcp( +/// Default::default(), +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_quic() +/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? +/// .with_dns()? +/// .with_websocket( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// ) +/// .await? +/// .with_relay_client( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_behaviour(|_key, relay| MyBehaviour { relay })? +/// .with_swarm_config(|cfg| { +/// // Edit cfg here. +/// cfg +/// }) +/// .build(); +/// # +/// # Ok(()) +/// # } +/// ``` +pub struct SwarmBuilder { + keypair: libp2p_identity::Keypair, + phantom: PhantomData, + phase: Phase, +} + +#[cfg(test)] +mod tests { + use crate::SwarmBuilder; + use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport}; + use libp2p_identity::PeerId; + use libp2p_swarm::NetworkBehaviour; + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + ))] + fn tcp() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + ))] + fn async_std_tcp() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "async-std", feature = "quic"))] + fn async_std_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "async-std", feature = "quic"))] + fn async_std_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "tcp", feature = "tls", feature = "yamux"))] + fn tcp_yamux_mplex() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + ( + libp2p_yamux::Config::default, + libp2p_mplex::MplexConfig::default, + ), + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux" + ))] + fn tcp_tls_noise() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + ( + libp2p_yamux::Config::default, + libp2p_mplex::MplexConfig::default, + ), + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn tcp_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn async_std_tcp_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn tcp_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn async_std_tcp_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "relay" + ))] + fn tcp_relay() { + #[derive(libp2p_swarm::NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Behaviour { + dummy: libp2p_swarm::dummy::Behaviour, + relay: libp2p_relay::client::Behaviour, + } + + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_relay_client(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .unwrap() + .with_behaviour(|_, relay| Behaviour { + dummy: libp2p_swarm::dummy::Behaviour, + relay, + }) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "dns" + ))] + async fn tcp_dns() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_dns() + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "dns" + ))] + async fn tcp_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all(feature = "tokio", feature = "quic", feature = "dns"))] + async fn quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns" + ))] + async fn tcp_quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns" + ))] + async fn async_std_tcp_quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + /// Showcases how to provide custom transports unknown to the libp2p crate, e.g. WebRTC. + #[test] + #[cfg(feature = "tokio")] + fn other_transport() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + // Closure can either return a Transport directly. + .with_other_transport(|_| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? + // Or a Result containing a Transport. + .with_other_transport(|_| { + if true { + Ok(DummyTransport::<(PeerId, StreamMuxerBox)>::new()) + } else { + Err(Box::from("test")) + } + })? + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "dns", + feature = "websocket", + ))] + async fn tcp_websocket() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_websocket( + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .await + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns", + feature = "relay", + feature = "websocket", + feature = "metrics", + ))] + async fn all() { + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct MyBehaviour { + relay: libp2p_relay::client::Behaviour, + } + + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns() + .unwrap() + .with_websocket(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .await + .unwrap() + .with_relay_client(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .unwrap() + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_key, relay| MyBehaviour { relay }) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "tcp", feature = "tls", feature = "yamux"))] + fn tcp_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + )? + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[test] + #[cfg(feature = "tokio")] + fn other_transport_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|_| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } +} diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs new file mode 100644 index 000000000000..c9679a467674 --- /dev/null +++ b/libp2p/src/builder/phase.rs @@ -0,0 +1,139 @@ +#![allow(unused_imports)] + +mod bandwidth_logging; +mod bandwidth_metrics; +mod behaviour; +mod build; +mod dns; +mod identity; +mod other_transport; +mod provider; +mod quic; +mod relay; +mod swarm; +mod tcp; +mod websocket; + +use bandwidth_logging::*; +use bandwidth_metrics::*; +use behaviour::*; +use build::*; +use dns::*; +use other_transport::*; +use provider::*; +use quic::*; +use relay::*; +use swarm::*; +use tcp::*; +use websocket::*; + +use super::select_muxer::SelectMuxerUpgrade; +use super::select_security::SelectSecurityUpgrade; +use super::SwarmBuilder; + +use libp2p_core::{muxing::StreamMuxerBox, Transport}; +use libp2p_identity::Keypair; + +#[allow(unreachable_pub)] +pub trait IntoSecurityUpgrade { + type Upgrade; + type Error; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result; +} + +impl IntoSecurityUpgrade for F +where + F: for<'a> FnOnce(&'a Keypair) -> Result, +{ + type Upgrade = T; + type Error = E; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result { + (self)(keypair) + } +} + +impl IntoSecurityUpgrade for (F1, F2) +where + F1: IntoSecurityUpgrade, + F2: IntoSecurityUpgrade, +{ + type Upgrade = SelectSecurityUpgrade; + type Error = either::Either; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result { + let (f1, f2) = self; + + let u1 = f1 + .into_security_upgrade(keypair) + .map_err(either::Either::Left)?; + let u2 = f2 + .into_security_upgrade(keypair) + .map_err(either::Either::Right)?; + + Ok(SelectSecurityUpgrade::new(u1, u2)) + } +} + +#[allow(unreachable_pub)] +pub trait IntoMultiplexerUpgrade { + type Upgrade; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade; +} + +impl IntoMultiplexerUpgrade for F +where + F: FnOnce() -> U, +{ + type Upgrade = U; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade { + (self)() + } +} + +impl IntoMultiplexerUpgrade for (U1, U2) +where + U1: IntoMultiplexerUpgrade, + U2: IntoMultiplexerUpgrade, +{ + type Upgrade = SelectMuxerUpgrade; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade { + let (f1, f2) = self; + + let u1 = f1.into_multiplexer_upgrade(); + let u2 = f2.into_multiplexer_upgrade(); + + SelectMuxerUpgrade::new(u1, u2) + } +} + +pub trait AuthenticatedMultiplexedTransport: + Transport< + Error = Self::E, + Dial = Self::D, + ListenerUpgrade = Self::U, + Output = (libp2p_identity::PeerId, StreamMuxerBox), + > + Send + + Unpin + + 'static +{ + type E: Send + Sync + 'static; + type D: Send; + type U: Send; +} + +impl AuthenticatedMultiplexedTransport for T +where + T: Transport + Send + Unpin + 'static, + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, +{ + type E = T::Error; + type D = T::Dial; + type U = T::ListenerUpgrade; +} diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs new file mode 100644 index 000000000000..cee9498fcaa0 --- /dev/null +++ b/libp2p/src/builder/phase/bandwidth_logging.rs @@ -0,0 +1,88 @@ +use super::*; +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::transport_ext::TransportExt; +use crate::SwarmBuilder; +use std::marker::PhantomData; +use std::sync::Arc; + +pub struct BandwidthLoggingPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +impl + SwarmBuilder> +{ + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder>, + Arc, + ) { + let (transport, sinks) = self.phase.transport.with_bandwidth_logging(); + ( + SwarmBuilder { + phase: BandwidthMetricsPhase { + relay_behaviour: self.phase.relay_behaviour, + transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }, + sinks, + ) + } + + pub fn without_bandwidth_logging(self) -> SwarmBuilder> { + SwarmBuilder { + phase: BandwidthMetricsPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +// Shortcuts +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder> { + self.without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs new file mode 100644 index 000000000000..52daa731ddd1 --- /dev/null +++ b/libp2p/src/builder/phase/bandwidth_metrics.rs @@ -0,0 +1,69 @@ +use super::*; +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::transport_ext::TransportExt; +use crate::SwarmBuilder; +use std::marker::PhantomData; +use std::sync::Arc; + +pub struct BandwidthMetricsPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder> { + SwarmBuilder { + phase: BehaviourPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: libp2p_metrics::BandwidthTransport::new(self.phase.transport, registry) + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))), + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +impl SwarmBuilder> { + pub fn without_bandwidth_metrics(self) -> SwarmBuilder> { + SwarmBuilder { + phase: BehaviourPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +// Shortcuts +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_metrics().with_behaviour(constructor) + } +} + +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_metrics().with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs new file mode 100644 index 000000000000..939db935c808 --- /dev/null +++ b/libp2p/src/builder/phase/behaviour.rs @@ -0,0 +1,90 @@ +use super::*; +use crate::SwarmBuilder; +use libp2p_swarm::NetworkBehaviour; +use std::convert::Infallible; +use std::marker::PhantomData; + +pub struct BehaviourPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +#[cfg(feature = "relay")] +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + Ok(SwarmBuilder { + phase: SwarmPhase { + behaviour: constructor(&self.keypair, self.phase.relay_behaviour) + .try_into_behaviour()?, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + // Discard `NoRelayBehaviour`. + let _ = self.phase.relay_behaviour; + + Ok(SwarmBuilder { + phase: SwarmPhase { + behaviour: constructor(&self.keypair).try_into_behaviour()?, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +pub trait TryIntoBehaviour: private::Sealed { + type Error; + + fn try_into_behaviour(self) -> Result; +} + +impl TryIntoBehaviour for B +where + B: NetworkBehaviour, +{ + type Error = Infallible; + + fn try_into_behaviour(self) -> Result { + Ok(self) + } +} + +impl TryIntoBehaviour for Result> +where + B: NetworkBehaviour, +{ + type Error = BehaviourError; + + fn try_into_behaviour(self) -> Result { + self.map_err(BehaviourError) + } +} + +mod private { + pub trait Sealed {} +} + +impl private::Sealed for B {} + +impl private::Sealed + for Result> +{ +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to build behaviour: {0}")] +pub struct BehaviourError(Box); diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs new file mode 100644 index 000000000000..80a83994eebf --- /dev/null +++ b/libp2p/src/builder/phase/build.rs @@ -0,0 +1,31 @@ +#[allow(unused_imports)] +use super::*; + +use crate::SwarmBuilder; +use libp2p_core::Transport; +use libp2p_swarm::Swarm; + +pub struct BuildPhase { + pub(crate) behaviour: B, + pub(crate) transport: T, + pub(crate) swarm_config: libp2p_swarm::Config, +} + +const CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +impl + SwarmBuilder> +{ + pub fn build(self) -> Swarm { + Swarm::new( + libp2p_core::transport::timeout::TransportTimeout::new( + self.phase.transport, + CONNECTION_TIMEOUT, + ) + .boxed(), + self.phase.behaviour, + self.keypair.public().to_peer_id(), + self.phase.swarm_config, + ) + } +} diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs new file mode 100644 index 000000000000..135f6c57b192 --- /dev/null +++ b/libp2p/src/builder/phase/dns.rs @@ -0,0 +1,117 @@ +use super::*; +use crate::SwarmBuilder; +use std::marker::PhantomData; + +pub struct DnsPhase { + pub(crate) transport: T, +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + // TODO: Remove `async` + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::async_std::Transport::system2(self.phase.transport)?, + }, + }) + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::tokio::Transport::system(self.phase.transport)?, + }, + }) + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::async_std::Transport::custom2( + self.phase.transport, + cfg, + opts, + ), + }, + } + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::tokio::Transport::custom(self.phase.transport, cfg, opts), + }, + } + } +} + +impl SwarmBuilder> { + pub(crate) fn without_dns(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_dns() + .without_websocket() + .without_relay() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs new file mode 100644 index 000000000000..ceb86819dc77 --- /dev/null +++ b/libp2p/src/builder/phase/identity.rs @@ -0,0 +1,21 @@ +use super::*; +use crate::SwarmBuilder; +use std::marker::PhantomData; + +pub struct IdentityPhase {} + +impl SwarmBuilder { + pub fn with_new_identity() -> SwarmBuilder { + SwarmBuilder::with_existing_identity(libp2p_identity::Keypair::generate_ed25519()) + } + + pub fn with_existing_identity( + keypair: libp2p_identity::Keypair, + ) -> SwarmBuilder { + SwarmBuilder { + keypair, + phantom: PhantomData, + phase: ProviderPhase {}, + } + } +} diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs new file mode 100644 index 000000000000..b0d56cd92d27 --- /dev/null +++ b/libp2p/src/builder/phase/other_transport.rs @@ -0,0 +1,269 @@ +use std::convert::Infallible; +use std::marker::PhantomData; +use std::sync::Arc; + +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::Transport; +#[cfg(feature = "relay")] +use libp2p_core::{Negotiated, UpgradeInfo}; +#[cfg(feature = "relay")] +use libp2p_identity::PeerId; + +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::SwarmBuilder; + +use super::*; + +pub struct OtherTransportPhase { + pub(crate) transport: T, +} + +impl + SwarmBuilder> +{ + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + Ok(SwarmBuilder { + phase: OtherTransportPhase { + transport: self + .phase + .transport + .or_transport( + constructor(&self.keypair) + .try_into_transport()? + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))), + ) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } + + pub(crate) fn without_any_other_transports(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: DnsPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl + SwarmBuilder> +{ + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_any_other_transports().with_dns().await + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_any_other_transports().with_dns() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + self.without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + self.without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + /// See [`SwarmBuilder::with_relay_client`]. + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } +} +impl + SwarmBuilder> +{ + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder< + Provider, + BandwidthMetricsPhase, + >, + Arc, + ) { + #[allow(deprecated)] + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_bandwidth_logging() + } +} +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_behaviour(constructor) + } +} + +pub trait TryIntoTransport: private::Sealed { + type Error; + + fn try_into_transport(self) -> Result; +} + +impl TryIntoTransport for T { + type Error = Infallible; + + fn try_into_transport(self) -> Result { + Ok(self) + } +} + +impl TryIntoTransport for Result> { + type Error = TransportError; + + fn try_into_transport(self) -> Result { + self.map_err(TransportError) + } +} + +mod private { + pub trait Sealed {} +} + +impl private::Sealed for T {} + +impl private::Sealed + for Result> +{ +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to build transport: {0}")] +pub struct TransportError(Box); diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs new file mode 100644 index 000000000000..32321442689c --- /dev/null +++ b/libp2p/src/builder/phase/provider.rs @@ -0,0 +1,46 @@ +#[allow(unused_imports)] +use super::*; + +use crate::SwarmBuilder; + +pub struct ProviderPhase {} + +impl SwarmBuilder { + #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] + pub fn with_async_std(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] + pub fn with_tokio(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } + + #[cfg(feature = "wasm-bindgen")] + pub fn with_wasm_bindgen(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } +} + +pub enum NoProviderSpecified {} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] +pub enum AsyncStd {} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] +pub enum Tokio {} + +#[cfg(feature = "wasm-bindgen")] +pub enum WasmBindgen {} diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs new file mode 100644 index 000000000000..885b16e2e03a --- /dev/null +++ b/libp2p/src/builder/phase/quic.rs @@ -0,0 +1,316 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::muxing::StreamMuxer; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(any( + feature = "relay", + all(not(target_arch = "wasm32"), feature = "websocket") +))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; +use std::{marker::PhantomData, sync::Arc}; + +pub struct QuicPhase { + pub(crate) transport: T, +} + +macro_rules! impl_quic_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $quic:ident) => { + #[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = $providerKebabCase))] + impl SwarmBuilder<$providerPascalCase, QuicPhase> { + pub fn with_quic( + self, + ) -> SwarmBuilder< + $providerPascalCase, + OtherTransportPhase, + > { + self.with_quic_config(std::convert::identity) + } + + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + $providerPascalCase, + OtherTransportPhase, + > { + SwarmBuilder { + phase: OtherTransportPhase { + transport: self + .phase + .transport + .or_transport( + libp2p_quic::$quic::Transport::new(constructor( + libp2p_quic::Config::new(&self.keypair), + )) + .map(|(peer_id, muxer), _| { + (peer_id, libp2p_core::muxing::StreamMuxerBox::new(muxer)) + }), + ) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + } + } + } + }; +} + +impl_quic_builder!("async-std", AsyncStd, async_std); +impl_quic_builder!("tokio", super::provider::Tokio, tokio); + +impl SwarmBuilder> { + pub(crate) fn without_quic(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: OtherTransportPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +impl SwarmBuilder> { + /// See [`SwarmBuilder::with_relay_client`]. + #[cfg(feature = "relay")] + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } + + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + self.without_quic().with_other_transport(constructor) + } + + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_behaviour(constructor) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_quic() + .without_any_other_transports() + .with_dns() + .await + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_quic() + .without_any_other_transports() + .with_dns() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + self.without_quic() + .without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + self.without_quic() + .without_any_other_transports() + .with_dns_config(cfg, opts) + } +} + +macro_rules! impl_quic_phase_with_websocket { + ($providerKebabCase:literal, $providerPascalCase:ty, $websocketStream:ty) => { + #[cfg(all(feature = $providerKebabCase, not(target_arch = "wasm32"), feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, QuicPhase> { + /// See [`SwarmBuilder::with_websocket`]. + pub async fn with_websocket < + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + > ( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + super::websocket::WebsocketError, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_quic() + .without_any_other_transports() + .without_dns() + .with_websocket(security_upgrade, multiplexer_upgrade) + .await + } + } + } +} +impl_quic_phase_with_websocket!( + "async-std", + super::provider::AsyncStd, + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_quic_phase_with_websocket!( + "tokio", + super::provider::Tokio, + rw_stream_sink::RwStreamSink> +); +impl SwarmBuilder> { + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder< + Provider, + BandwidthMetricsPhase, + >, + Arc, + ) { + #[allow(deprecated)] + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_bandwidth_logging() + } +} +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs new file mode 100644 index 000000000000..f8305f9d2460 --- /dev/null +++ b/libp2p/src/builder/phase/relay.rs @@ -0,0 +1,143 @@ +use std::marker::PhantomData; + +#[cfg(feature = "relay")] +use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(feature = "relay")] +use libp2p_core::Transport; +#[cfg(any(feature = "relay", feature = "websocket"))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, UpgradeInfo}; +#[cfg(feature = "relay")] +use libp2p_identity::PeerId; + +use crate::SwarmBuilder; + +use super::*; + +pub struct RelayPhase { + pub(crate) transport: T, +} + +#[cfg(feature = "relay")] +impl SwarmBuilder> { + /// Adds a relay client transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_tcp( + /// Default::default(), + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// .with_relay_client( + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// # ; + /// # Ok(()) + /// # } + /// ``` + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + let (relay_transport, relay_behaviour) = + libp2p_relay::client::new(self.keypair.public().to_peer_id()); + let relay_transport = relay_transport + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate(security_upgrade.into_security_upgrade(&self.keypair)?) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))); + + Ok(SwarmBuilder { + phase: BandwidthLoggingPhase { + relay_behaviour, + transport: relay_transport + .or_transport(self.phase.transport) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +pub struct NoRelayBehaviour; + +impl SwarmBuilder> { + pub(crate) fn without_relay( + self, + ) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: BandwidthLoggingPhase { + transport: self.phase.transport, + relay_behaviour: NoRelayBehaviour, + }, + } + } +} + +// Shortcuts +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_relay() + .without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/swarm.rs b/libp2p/src/builder/phase/swarm.rs new file mode 100644 index 000000000000..ee456ced9271 --- /dev/null +++ b/libp2p/src/builder/phase/swarm.rs @@ -0,0 +1,60 @@ +#[allow(unused_imports)] +use super::*; + +#[allow(dead_code)] +pub struct SwarmPhase { + pub(crate) behaviour: B, + pub(crate) transport: T, +} + +macro_rules! impl_with_swarm_config { + ($providerKebabCase:literal, $providerPascalCase:ty, $config:expr) => { + #[cfg(feature = $providerKebabCase)] + impl SwarmBuilder<$providerPascalCase, SwarmPhase> { + pub fn with_swarm_config( + self, + constructor: impl FnOnce(libp2p_swarm::Config) -> libp2p_swarm::Config, + ) -> SwarmBuilder<$providerPascalCase, BuildPhase> { + SwarmBuilder { + phase: BuildPhase { + behaviour: self.phase.behaviour, + transport: self.phase.transport, + swarm_config: constructor($config), + }, + keypair: self.keypair, + phantom: std::marker::PhantomData, + } + } + + // Shortcuts + pub fn build(self) -> libp2p_swarm::Swarm + where + B: libp2p_swarm::NetworkBehaviour, + T: AuthenticatedMultiplexedTransport, + { + self.with_swarm_config(std::convert::identity).build() + } + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +impl_with_swarm_config!( + "async-std", + super::provider::AsyncStd, + libp2p_swarm::Config::with_async_std_executor() +); + +#[cfg(not(target_arch = "wasm32"))] +impl_with_swarm_config!( + "tokio", + super::provider::Tokio, + libp2p_swarm::Config::with_tokio_executor() +); + +#[cfg(target_arch = "wasm32")] +impl_with_swarm_config!( + "wasm-bindgen", + super::provider::WasmBindgen, + libp2p_swarm::Config::with_wasm_executor() +); diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs new file mode 100644 index 000000000000..4b7cf29b3d29 --- /dev/null +++ b/libp2p/src/builder/phase/tcp.rs @@ -0,0 +1,251 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all( + not(target_arch = "wasm32"), + any(feature = "tcp", feature = "websocket") +))] +use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; +#[cfg(all(feature = "websocket", not(target_arch = "wasm32")))] +use libp2p_core::Transport; +#[cfg(all( + not(target_arch = "wasm32"), + any(feature = "tcp", feature = "websocket") +))] +use libp2p_core::{ + upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo, +}; +use std::marker::PhantomData; + +pub struct TcpPhase {} + +macro_rules! impl_tcp_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $path:ident) => { + #[cfg(all( + not(target_arch = "wasm32"), + feature = "tcp", + feature = $providerKebabCase, + ))] + impl SwarmBuilder<$providerPascalCase, TcpPhase> { + /// Adds a TCP based transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_tcp( + /// Default::default(), + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// # ; + /// # Ok(()) + /// # } + /// ``` + pub fn with_tcp( + self, + tcp_config: libp2p_tcp::Config, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder<$providerPascalCase, QuicPhase>, + SecUpgrade::Error, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + Ok(SwarmBuilder { + phase: QuicPhase { + transport: libp2p_tcp::$path::Transport::new(tcp_config) + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate( + security_upgrade.into_security_upgrade(&self.keypair)?, + ) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } + } + }; +} + +impl_tcp_builder!("async-std", super::provider::AsyncStd, async_io); +impl_tcp_builder!("tokio", super::provider::Tokio, tokio); + +impl SwarmBuilder { + pub(crate) fn without_tcp( + self, + ) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: QuicPhase { + transport: libp2p_core::transport::dummy::DummyTransport::new(), + }, + } + } +} + +// Shortcuts +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "async-std"))] +impl SwarmBuilder { + pub fn with_quic( + self, + ) -> SwarmBuilder< + super::provider::AsyncStd, + OtherTransportPhase, + > { + self.without_tcp().with_quic() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "tokio"))] +impl SwarmBuilder { + pub fn with_quic( + self, + ) -> SwarmBuilder< + super::provider::Tokio, + OtherTransportPhase, + > { + self.without_tcp().with_quic() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "async-std"))] +impl SwarmBuilder { + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + super::provider::AsyncStd, + OtherTransportPhase, + > { + self.without_tcp().with_quic_config(constructor) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "tokio"))] +impl SwarmBuilder { + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + super::provider::Tokio, + OtherTransportPhase, + > { + self.without_tcp().with_quic_config(constructor) + } +} +impl SwarmBuilder { + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + self.without_tcp() + .without_quic() + .with_other_transport(constructor) + } +} +macro_rules! impl_tcp_phase_with_websocket { + ($providerKebabCase:literal, $providerPascalCase:ty, $websocketStream:ty) => { + #[cfg(all(feature = $providerKebabCase, not(target_arch = "wasm32"), feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, TcpPhase> { + /// See [`SwarmBuilder::with_websocket`]. + pub async fn with_websocket < + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + > ( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + WebsocketError, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_tcp() + .without_quic() + .without_any_other_transports() + .without_dns() + .with_websocket(security_upgrade, multiplexer_upgrade) + .await + } + } + } +} +impl_tcp_phase_with_websocket!( + "async-std", + super::provider::AsyncStd, + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_tcp_phase_with_websocket!( + "tokio", + super::provider::Tokio, + rw_stream_sink::RwStreamSink> +); diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs new file mode 100644 index 000000000000..68a85bb77b7d --- /dev/null +++ b/libp2p/src/builder/phase/websocket.rs @@ -0,0 +1,229 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::Transport; +#[cfg(any( + all(not(target_arch = "wasm32"), feature = "websocket"), + feature = "relay" +))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; +#[cfg(any( + all(not(target_arch = "wasm32"), feature = "websocket"), + feature = "relay" +))] +use libp2p_identity::PeerId; +use std::marker::PhantomData; + +pub struct WebsocketPhase { + pub(crate) transport: T, +} + +macro_rules! impl_websocket_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $dnsTcp:expr, $websocketStream:ty) => { + /// Adds a websocket client transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_websocket( + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// ) + /// .await? + /// # ; + /// # Ok(()) + /// # } + /// ``` + #[cfg(all(not(target_arch = "wasm32"), feature = $providerKebabCase, feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, WebsocketPhase> { + pub async fn with_websocket< + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + >( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + WebsocketError, + > + + where + T: AuthenticatedMultiplexedTransport, + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + { + let security_upgrade = security_upgrade.into_security_upgrade(&self.keypair) + .map_err(WebsocketErrorInner::SecurityUpgrade)?; + let websocket_transport = libp2p_websocket::WsConfig::new( + $dnsTcp.await.map_err(WebsocketErrorInner::Dns)?, + ) + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate(security_upgrade) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))); + + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: RelayPhase { + transport: websocket_transport + .or_transport(self.phase.transport) + .map(|either, _| either.into_inner()), + }, + }) + } + } + }; +} + +impl_websocket_builder!( + "async-std", + super::provider::AsyncStd, + libp2p_dns::async_std::Transport::system(libp2p_tcp::async_io::Transport::new( + libp2p_tcp::Config::default(), + )), + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_websocket_builder!( + "tokio", + super::provider::Tokio, + // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent + // with above AsyncStd construction. + futures::future::ready(libp2p_dns::tokio::Transport::system( + libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default()) + )), + rw_stream_sink::RwStreamSink> +); + +impl SwarmBuilder> { + pub(crate) fn without_websocket(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: RelayPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +#[cfg(feature = "relay")] +impl SwarmBuilder> { + /// See [`SwarmBuilder::with_relay_client`]. + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } +} +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_behaviour(constructor) + } +} + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub struct WebsocketError(#[from] WebsocketErrorInner); + +#[derive(Debug, thiserror::Error)] +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +enum WebsocketErrorInner { + #[error("SecurityUpgrade")] + SecurityUpgrade(Sec), + #[cfg(feature = "dns")] + #[error("Dns")] + Dns(#[from] std::io::Error), +} diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs new file mode 100644 index 000000000000..c93ba9d99913 --- /dev/null +++ b/libp2p/src/builder/select_muxer.rs @@ -0,0 +1,98 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![allow(unreachable_pub)] + +use either::Either; +use futures::future; +use libp2p_core::either::EitherFuture; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; +use std::iter::{Chain, Map}; + +#[derive(Debug, Clone)] +pub struct SelectMuxerUpgrade(A, B); + +impl SelectMuxerUpgrade { + pub fn new(a: A, b: B) -> Self { + SelectMuxerUpgrade(a, b) + } +} + +impl UpgradeInfo for SelectMuxerUpgrade +where + A: UpgradeInfo, + B: UpgradeInfo, +{ + type Info = Either; + type InfoIter = Chain< + Map<::IntoIter, fn(A::Info) -> Self::Info>, + Map<::IntoIter, fn(B::Info) -> Self::Info>, + >; + + fn protocol_info(&self) -> Self::InfoIter { + let a = self + .0 + .protocol_info() + .into_iter() + .map(Either::Left as fn(A::Info) -> _); + let b = self + .1 + .protocol_info() + .into_iter() + .map(Either::Right as fn(B::Info) -> _); + + a.chain(b) + } +} + +impl InboundConnectionUpgrade for SelectMuxerUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + } +} + +impl OutboundConnectionUpgrade for SelectMuxerUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + } +} diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs new file mode 100644 index 000000000000..d6c7f8c172f0 --- /dev/null +++ b/libp2p/src/builder/select_security.rs @@ -0,0 +1,115 @@ +// Copyright 2023 Protocol Labs. +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![allow(unreachable_pub)] + +use either::Either; +use futures::future::MapOk; +use futures::{future, TryFutureExt}; +use libp2p_core::either::EitherFuture; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use libp2p_identity::PeerId; +use std::iter::{Chain, Map}; + +/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either +/// sub-upgrade. +/// +/// The protocols supported by the first element have a higher priority. +#[derive(Debug, Clone)] +pub struct SelectSecurityUpgrade(A, B); + +impl SelectSecurityUpgrade { + /// Combines two upgrades into an `SelectUpgrade`. + /// + /// The protocols supported by the first element have a higher priority. + pub fn new(a: A, b: B) -> Self { + SelectSecurityUpgrade(a, b) + } +} + +impl UpgradeInfo for SelectSecurityUpgrade +where + A: UpgradeInfo, + B: UpgradeInfo, +{ + type Info = Either; + type InfoIter = Chain< + Map<::IntoIter, fn(A::Info) -> Self::Info>, + Map<::IntoIter, fn(B::Info) -> Self::Info>, + >; + + fn protocol_info(&self) -> Self::InfoIter { + let a = self + .0 + .protocol_info() + .into_iter() + .map(Either::Left as fn(A::Info) -> _); + let b = self + .1 + .protocol_info() + .into_iter() + .map(Either::Right as fn(B::Info) -> _); + + a.chain(b) + } +} + +impl InboundConnectionUpgrade for SelectSecurityUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = (PeerId, future::Either); + type Error = Either; + type Future = MapOk< + EitherFuture, + fn(future::Either<(PeerId, TA), (PeerId, TB)>) -> (PeerId, future::Either), + >; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + .map_ok(future::Either::factor_first) + } +} + +impl OutboundConnectionUpgrade for SelectSecurityUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = (PeerId, future::Either); + type Error = Either; + type Future = MapOk< + EitherFuture, + fn(future::Either<(PeerId, TA), (PeerId, TB)>) -> (PeerId, future::Either), + >; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + .map_ok(future::Either::factor_first) + } +} diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index e3934e6bc349..58f911e94459 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -22,8 +22,9 @@ //! //! To learn more about the general libp2p multi-language framework visit . //! -//! To get started with this libp2p implementation in Rust, please take a look at the [`tutorials`]. -//! Further examples can be found in the [examples] directory. +//! To get started with this libp2p implementation in Rust, please take a look +//! at the [`tutorials`]. Further examples can be found in the +//! [examples] directory. //! //! [examples]: https://github.com/libp2p/rust-libp2p/tree/master/examples @@ -50,15 +51,6 @@ pub use libp2p_core as core; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; - -#[cfg(feature = "deflate")] -#[cfg(not(target_arch = "wasm32"))] -#[deprecated( - note = "Will be removed in the next release, see https://github.com/libp2p/rust-libp2p/issues/4522 for details." -)] -pub mod deflate { - pub use libp2p_deflate::*; -} #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] #[cfg(not(target_arch = "wasm32"))] @@ -134,13 +126,13 @@ pub use libp2p_uds as uds; #[cfg(not(target_arch = "wasm32"))] #[doc(inline)] pub use libp2p_upnp as upnp; -#[cfg(feature = "wasm-ext")] -#[doc(inline)] -pub use libp2p_wasm_ext as wasm_ext; #[cfg(feature = "websocket")] #[cfg(not(target_arch = "wasm32"))] #[doc(inline)] pub use libp2p_websocket as websocket; +#[cfg(feature = "websocket-websys")] +#[doc(inline)] +pub use libp2p_websocket_websys as websocket_websys; #[cfg(feature = "webtransport-websys")] #[cfg_attr(docsrs, doc(cfg(feature = "webtransport-websys")))] #[doc(inline)] @@ -149,6 +141,7 @@ pub use libp2p_webtransport_websys as webtransport_websys; #[doc(inline)] pub use libp2p_yamux as yamux; +mod builder; mod transport_ext; pub mod bandwidth; @@ -156,6 +149,7 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; +pub use self::builder::SwarmBuilder; pub use self::core::{ transport::TransportError, upgrade::{InboundUpgrade, OutboundUpgrade}, @@ -167,89 +161,3 @@ pub use self::transport_ext::TransportExt; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; pub use libp2p_swarm::{Stream, StreamProtocol}; - -/// Builds a `Transport` based on TCP/IP that supports the most commonly-used features of libp2p: -/// -/// * DNS resolution. -/// * Noise protocol encryption. -/// * Websockets. -/// * Yamux for substream multiplexing. -/// -/// All async I/O of the transport is based on `async-std`. -/// -/// > **Note**: This `Transport` is not suitable for production usage, as its implementation -/// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all( - not(target_arch = "wasm32"), - feature = "tcp", - feature = "dns", - feature = "websocket", - feature = "noise", - feature = "yamux", - feature = "async-std", -))] -pub async fn development_transport( - keypair: identity::Keypair, -) -> std::io::Result> { - let transport = { - let dns_tcp = dns::async_std::Transport::system(tcp::async_io::Transport::new( - tcp::Config::new().nodelay(true), - )) - .await?; - let ws_dns_tcp = websocket::WsConfig::new( - dns::async_std::Transport::system(tcp::async_io::Transport::new( - tcp::Config::new().nodelay(true), - )) - .await?, - ); - dns_tcp.or_transport(ws_dns_tcp) - }; - - Ok(transport - .upgrade(core::upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .timeout(std::time::Duration::from_secs(20)) - .boxed()) -} - -/// Builds a `Transport` based on TCP/IP that supports the most commonly-used features of libp2p: -/// -/// * DNS resolution. -/// * Noise protocol encryption. -/// * Websockets. -/// * Yamux for substream multiplexing. -/// -/// All async I/O of the transport is based on `tokio`. -/// -/// > **Note**: This `Transport` is not suitable for production usage, as its implementation -/// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all( - not(target_arch = "wasm32"), - feature = "tcp", - feature = "dns", - feature = "websocket", - feature = "noise", - feature = "yamux", - feature = "tokio", -))] -pub fn tokio_development_transport( - keypair: identity::Keypair, -) -> std::io::Result> { - let transport = { - let dns_tcp = dns::tokio::Transport::system(tcp::tokio::Transport::new( - tcp::Config::new().nodelay(true), - ))?; - let ws_dns_tcp = websocket::WsConfig::new(dns::tokio::Transport::system( - tcp::tokio::Transport::new(tcp::Config::new().nodelay(true)), - )?); - dns_tcp.or_transport(ws_dns_tcp) - }; - - Ok(transport - .upgrade(core::upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .timeout(std::time::Duration::from_secs(20)) - .boxed()) -} diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs index 8f7c16574f64..4f07484fc1ff 100644 --- a/libp2p/src/transport_ext.rs +++ b/libp2p/src/transport_ext.rs @@ -20,21 +20,20 @@ //! Provides the `TransportExt` trait. +#[allow(deprecated)] +use crate::bandwidth::{BandwidthLogging, BandwidthSinks}; use crate::core::{ muxing::{StreamMuxer, StreamMuxerBox}, transport::Boxed, }; -use crate::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - Transport, -}; +use crate::Transport; use libp2p_identity::PeerId; use std::sync::Arc; /// Trait automatically implemented on all objects that implement `Transport`. Provides some /// additional utilities. pub trait TransportExt: Transport { - /// Adds a layer on the `Transport` that logs all trafic that passes through the streams + /// Adds a layer on the `Transport` that logs all traffic that passes through the streams /// created by it. /// /// This method returns an `Arc` that can be used to retrieve the total number @@ -66,6 +65,10 @@ pub trait TransportExt: Transport { /// /// let (transport, sinks) = transport.with_bandwidth_logging(); /// ``` + #[allow(deprecated)] + #[deprecated( + note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." + )] fn with_bandwidth_logging(self) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) where Self: Sized + Send + Unpin + 'static, diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 5fd74fe754eb..f9f42432ba41 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -166,18 +166,9 @@ //! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } //! ``` //! -//! 2. The listening client initiating a direct connection upgrade for the new relayed connection. -//! Reported by [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::RemoteInitiatedDirectConnectionUpgrade). +//! 2. The direct connection upgrade, also known as hole punch, succeeding. +//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] RemoteInitiatedDirectConnectionUpgrade { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), remote_relayed_addr: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" } -//! ``` -//! -//! 3. The direct connection upgrade, also known as hole punch, succeeding. Reported by -//! [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::DirectConnectionUpgradeSucceeded). -//! -//! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] DirectConnectionUpgradeSucceeded { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") } +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } //! ``` diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index aedc149228e5..1413531cd721 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -55,10 +55,10 @@ //! edition = "2021" //! //! [dependencies] -//! libp2p = { version = "0.50", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } +//! libp2p = { version = "0.52", features = ["tcp", "tls", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } //! futures = "0.3.21" -//! env_logger = "0.10.0" //! async-std = { version = "1.12.0", features = ["attributes"] } +//! tracing-subscriber = { version = "0.3", features = ["env-filter"] } //! ``` //! //! ## Network identity @@ -71,50 +71,44 @@ //! derived from their public key. Now, replace the contents of main.rs by: //! //! ```rust -//! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity(); //! //! Ok(()) //! } //! ``` //! -//! Go ahead and build and run the above code with: `cargo run`. A unique -//! [`PeerId`](crate::PeerId) should be displayed. +//! Go ahead and build and run the above code with: `cargo run`. Nothing happening thus far. //! //! ## Transport //! -//! Next up we need to construct a transport. A transport in libp2p provides -//! connection-oriented communication channels (e.g. TCP) as well as upgrades -//! on top of those like authentication and encryption protocols. Technically, -//! a libp2p transport is anything that implements the [`Transport`] trait. -//! -//! Instead of constructing a transport ourselves for this tutorial, we use the -//! convenience function [`development_transport`](crate::development_transport) -//! that creates a TCP transport with [`noise`](crate::noise) for authenticated -//! encryption. -//! -//! Furthermore, [`development_transport`] builds a multiplexed transport, -//! whereby multiple logical substreams can coexist on the same underlying (TCP) -//! connection. For further details on substream multiplexing, take a look at -//! [`crate::core::muxing`] and [`yamux`](crate::yamux). +//! Next up we need to construct a transport. Each transport in libp2p provides encrypted streams. +//! E.g. combining TCP to establish connections, TLS to encrypt these connections and Yamux to run +//! one or more streams on a connection. Another libp2p transport is QUIC, providing encrypted +//! streams out-of-the-box. We will stick to TCP for now. Each of these implement the [`Transport`] +//! trait. //! //! ```rust -//! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! -//! let transport = libp2p::development_transport(local_key).await?; +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )?; //! //! Ok(()) //! } @@ -125,37 +119,40 @@ //! Now it is time to look at another core trait of rust-libp2p: the //! [`NetworkBehaviour`]. While the previously introduced trait [`Transport`] //! defines _how_ to send bytes on the network, a [`NetworkBehaviour`] defines -//! _what_ bytes to send on the network. +//! _what_ bytes and to _whom_ to send on the network. //! //! To make this more concrete, let's take a look at a simple implementation of //! the [`NetworkBehaviour`] trait: the [`ping::Behaviour`](crate::ping::Behaviour). -//! As you might have guessed, similar to the good old `ping` network tool, +//! As you might have guessed, similar to the good old ICMP `ping` network tool, //! libp2p [`ping::Behaviour`](crate::ping::Behaviour) sends a ping to a peer and expects //! to receive a pong in turn. The [`ping::Behaviour`](crate::ping::Behaviour) does not care _how_ //! the ping and pong messages are sent on the network, whether they are sent via //! TCP, whether they are encrypted via [noise](crate::noise) or just in -//! [plaintext](crate::plaintext). It only cares about _what_ messages are sent -//! on the network. +//! [plaintext](crate::plaintext). It only cares about _what_ messages and to _whom_ to sent on the +//! network. //! //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly -//! separate _how_ to send bytes from _what_ bytes to send. +//! separate _how_ to send bytes from _what_ bytes and to _whom_ to send. //! //! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust -//! use libp2p::swarm::NetworkBehaviour; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; +//! use tracing_subscriber::EnvFilter; //! use std::error::Error; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = ping::Behaviour::default(); +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())?; //! //! Ok(()) //! } @@ -163,33 +160,29 @@ //! //! ## Swarm //! -//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we need -//! something that connects the two, allowing both to make progress. This job is -//! carried out by a [`Swarm`]. Put simply, a [`Swarm`] drives both a -//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the -//! [`NetworkBehaviour`] to the [`Transport`] as well as events from the -//! [`Transport`] to the [`NetworkBehaviour`]. As you can see, after [`Swarm`] initialization, we -//! removed the print of the local [`PeerId`](crate::PeerId) because every time a [`Swarm`] is -//! created, it prints the local [`PeerId`](crate::PeerId) in the logs at the INFO level. In order -//! to continue to see the local [`PeerId`](crate::PeerId) you must initialize the logger -//! (In our example, `env_logger` is used) +//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we can build the [`Swarm`] +//! which connects the two, allowing both to make progress. Put simply, a [`Swarm`] drives both a +//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the [`NetworkBehaviour`] +//! to the [`Transport`] as well as events from the [`Transport`] to the [`NetworkBehaviour`]. //! //! ```rust -//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = ping::Behaviour::default(); -//! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .build(); //! //! Ok(()) //! } @@ -206,24 +199,24 @@ //! Thus, without any other behaviour in place, we would not be able to observe the pings. //! //! ```rust -//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! use std::time::Duration; -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = ping::Behaviour::default(); -//! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) -//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. //! .build(); //! //! Ok(()) @@ -257,23 +250,24 @@ //! remote peer. //! //! ```rust -//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = ping::Behaviour::default(); -//! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) -//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -300,23 +294,25 @@ //! //! ```no_run //! use futures::prelude::*; -//! use libp2p::swarm::{NetworkBehaviour, SwarmEvent, SwarmBuilder}; -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::swarm::SwarmEvent; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = ping::Behaviour::default(); -//! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) -//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -383,4 +379,3 @@ //! [`Transport`]: crate::core::Transport //! [`PeerId`]: crate::core::PeerId //! [`Swarm`]: crate::swarm::Swarm -//! [`development_transport`]: crate::development_transport diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md index 4ce2f66b2bfa..7778e9248861 100644 --- a/misc/allow-block-list/CHANGELOG.md +++ b/misc/allow-block-list/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.2.0 +## 0.3.0 + + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml index 84125f0c0a75..c620e7f4a2bb 100644 --- a/misc/allow-block-list/Cargo.toml +++ b/misc/allow-block-list/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-allow-block-list" edition = "2021" rust-version = { workspace = true } description = "Allow/block list connection management for libp2p." -version = "0.2.0" +version = "0.3.0" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index 1950c47f28b9..c1d31433db1b 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -64,8 +64,8 @@ use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashSet, VecDeque}; use std::fmt; @@ -231,23 +231,7 @@ where Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -261,7 +245,6 @@ where fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, ) -> Poll>> { if let Some(peer) = self.close_connections.pop_front() { return Poll::Ready(ToSwarm::CloseConnection { @@ -285,7 +268,7 @@ mod tests { async fn cannot_dial_blocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); @@ -299,7 +282,7 @@ mod tests { async fn can_dial_unblocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); dialer @@ -313,7 +296,7 @@ mod tests { async fn blocked_peer_cannot_dial_us() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; listener.behaviour_mut().block_peer(*dialer.local_peer_id()); dial(&mut dialer, &listener).unwrap(); @@ -335,7 +318,7 @@ mod tests { async fn connections_get_closed_upon_blocked() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.connect(&mut listener).await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); @@ -361,7 +344,7 @@ mod tests { async fn cannot_dial_peer_unless_allowed() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { panic!("unexpected dial error") @@ -376,7 +359,7 @@ mod tests { async fn cannot_dial_disallowed_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); dialer @@ -393,7 +376,7 @@ mod tests { async fn not_allowed_peer_cannot_dial_us() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer .dial( @@ -430,7 +413,7 @@ mod tests { async fn connections_get_closed_upon_disallow() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); listener.behaviour_mut().allow_peer(*dialer.local_peer_id()); diff --git a/misc/connection-limits/CHANGELOG.md b/misc/connection-limits/CHANGELOG.md index a8bd071e6fe6..4654281a83e6 100644 --- a/misc/connection-limits/CHANGELOG.md +++ b/misc/connection-limits/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.3.1 + +- Add function to mutate `ConnectionLimits`. + See [PR 4964](https://github.com/libp2p/rust-libp2p/pull/4964). + +## 0.3.0 + + ## 0.2.1 - Do not count a connection as established when it is denied by another sibling `NetworkBehaviour`. diff --git a/misc/connection-limits/Cargo.toml b/misc/connection-limits/Cargo.toml index 1ba551470a00..8ecb0005cb10 100644 --- a/misc/connection-limits/Cargo.toml +++ b/misc/connection-limits/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-connection-limits" edition = "2021" rust-version = { workspace = true } description = "Connection limits for libp2p." -version = "0.2.1" +version = "0.3.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index 880904f7c170..dbe68a8ad11e 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -22,8 +22,8 @@ use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::{ConnectionEstablished, DialFailure, ListenFailure}, - dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -80,21 +80,22 @@ impl Behaviour { } } - fn check_limit( - &mut self, - limit: Option, - current: usize, - kind: Kind, - ) -> Result<(), ConnectionDenied> { - let limit = limit.unwrap_or(u32::MAX); - let current = current as u32; + /// Returns a mutable reference to [`ConnectionLimits`]. + /// > **Note**: A new limit will not be enforced against existing connections. + pub fn limits_mut(&mut self) -> &mut ConnectionLimits { + &mut self.limits + } +} - if current >= limit { - return Err(ConnectionDenied::new(Exceeded { limit, kind })); - } +fn check_limit(limit: Option, current: usize, kind: Kind) -> Result<(), ConnectionDenied> { + let limit = limit.unwrap_or(u32::MAX); + let current = current as u32; - Ok(()) + if current >= limit { + return Err(ConnectionDenied::new(Exceeded { limit, kind })); } + + Ok(()) } /// A connection limit has been exceeded. @@ -210,7 +211,7 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, _: &Multiaddr, ) -> Result<(), ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_incoming, self.pending_inbound_connections.len(), Kind::PendingIncoming, @@ -230,12 +231,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_inbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_incoming, self.established_inbound_connections.len(), Kind::EstablishedIncoming, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -243,7 +244,7 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), @@ -260,7 +261,7 @@ impl NetworkBehaviour for Behaviour { _: &[Multiaddr], _: Endpoint, ) -> Result, ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_outgoing, self.pending_outbound_connections.len(), Kind::PendingOutgoing, @@ -280,12 +281,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_outbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_outgoing, self.established_outbound_connections.len(), Kind::EstablishedOutgoing, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -293,7 +294,7 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), @@ -303,7 +304,7 @@ impl NetworkBehaviour for Behaviour { Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, @@ -340,18 +341,10 @@ impl NetworkBehaviour for Behaviour { FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { self.pending_outbound_connections.remove(&connection_id); } - FromSwarm::AddressChange(_) => {} FromSwarm::ListenFailure(ListenFailure { connection_id, .. }) => { self.pending_inbound_connections.remove(&connection_id); } - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } @@ -364,11 +357,7 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } } @@ -377,7 +366,8 @@ impl NetworkBehaviour for Behaviour { mod tests { use super::*; use libp2p_swarm::{ - behaviour::toggle::Toggle, dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent, + behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError, + ListenError, Swarm, SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use quickcheck::*; @@ -401,6 +391,8 @@ mod tests { network .dial( DialOpts::peer_id(target) + // Dial always, even if already dialing or connected. + .condition(PeerCondition::Always) .addresses(vec![addr.clone()]) .build(), ) @@ -408,7 +400,12 @@ mod tests { } match network - .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) + .dial( + DialOpts::peer_id(target) + .condition(PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) .expect_err("Unexpected dialing success.") { DialError::Denied { cause } => { @@ -444,7 +441,7 @@ mod tests { }); async_std::task::block_on(async { - let (listen_addr, _) = swarm1.listen().await; + let (listen_addr, _) = swarm1.listen().with_memory_addr_external().await; for _ in 0..limit { swarm2.connect(&mut swarm1).await; @@ -579,7 +576,7 @@ mod tests { ))) } - fn on_swarm_event(&mut self, _event: FromSwarm) {} + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -592,8 +589,7 @@ mod tests { fn poll( &mut self, - _cx: &mut Context<'_>, - _params: &mut impl PollParameters, + _: &mut Context<'_>, ) -> Poll>> { Poll::Pending } diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md index bd05a0f82618..72b0b4f457df 100644 --- a/misc/futures-bounded/CHANGELOG.md +++ b/misc/futures-bounded/CHANGELOG.md @@ -1,3 +1,23 @@ +## 0.2.3 + +- Introduce `FuturesTupleSet`, holding tuples of a `Future` together with an arbitrary piece of data. + See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). + +## 0.2.2 + +- Fix an issue where `{Futures,Stream}Map` returns `Poll::Pending` despite being ready after an item has been replaced as part of `try_push`. + See [PR 4865](https://github.com/libp2p/rust-libp2p/pull/4865). + +## 0.2.1 + +- Add `.len()` getter to `FuturesMap`, `FuturesSet`, `StreamMap` and `StreamSet`. + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). + +## 0.2.0 + +- Add `StreamMap` type and remove `Future`-suffix from `PushError::ReplacedFuture` to reuse it for `StreamMap`. + See [PR 4616](https://github.com/libp2p/rust-libp2p/pull/4616). + ## 0.1.0 Initial release. diff --git a/misc/futures-bounded/Cargo.toml b/misc/futures-bounded/Cargo.toml index 4d70779e282d..89288b6f70cc 100644 --- a/misc/futures-bounded/Cargo.toml +++ b/misc/futures-bounded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "futures-bounded" -version = "0.1.0" +version = "0.2.3" edition = "2021" rust-version.workspace = true license = "MIT" @@ -13,11 +13,12 @@ publish = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -futures-util = { version = "0.3.28" } +futures-util = { version = "0.3.30" } futures-timer = "3.0.2" [dev-dependencies] -tokio = { version = "1.29.1", features = ["macros", "rt"] } +tokio = { version = "1.36.0", features = ["macros", "rt", "sync"] } +futures = "0.3.30" [lints] workspace = true diff --git a/misc/futures-bounded/src/map.rs b/misc/futures-bounded/src/futures_map.rs similarity index 66% rename from misc/futures-bounded/src/map.rs rename to misc/futures-bounded/src/futures_map.rs index cecf6070efe2..fba3543f67bb 100644 --- a/misc/futures-bounded/src/map.rs +++ b/misc/futures-bounded/src/futures_map.rs @@ -1,16 +1,16 @@ use std::future::Future; use std::hash::Hash; -use std::mem; use std::pin::Pin; use std::task::{Context, Poll, Waker}; use std::time::Duration; +use std::{future, mem}; use futures_timer::Delay; use futures_util::future::BoxFuture; use futures_util::stream::FuturesUnordered; use futures_util::{FutureExt, StreamExt}; -use crate::Timeout; +use crate::{PushError, Timeout}; /// Represents a map of [`Future`]s. /// @@ -23,15 +23,6 @@ pub struct FuturesMap { full_waker: Option, } -/// Error of a future pushing -#[derive(PartialEq, Debug)] -pub enum PushError { - /// The length of the set is equal to the capacity - BeyondCapacity(F), - /// The set already contains the given future's ID - ReplacedFuture(F), -} - impl FuturesMap { pub fn new(timeout: Duration, capacity: usize) -> Self { Self { @@ -47,6 +38,7 @@ impl FuturesMap { impl FuturesMap where ID: Clone + Hash + Eq + Send + Unpin + 'static, + O: 'static, { /// Push a future into the map. /// @@ -54,7 +46,7 @@ where /// If the length of the map is equal to the capacity, this method returns [PushError::BeyondCapacity], /// that contains the passed future. In that case, the future is not inserted to the map. /// If a future with the given `future_id` already exists, then the old future will be replaced by a new one. - /// In that case, the returned error [PushError::ReplacedFuture] contains the old future. + /// In that case, the returned error [PushError::Replaced] contains the old future. pub fn try_push(&mut self, future_id: ID, future: F) -> Result<(), PushError>> where F: Future + Send + 'static, @@ -67,32 +59,34 @@ where waker.wake(); } - match self.inner.iter_mut().find(|tagged| tagged.tag == future_id) { - None => { - self.inner.push(TaggedFuture { - tag: future_id, - inner: TimeoutFuture { - inner: future.boxed(), - timeout: Delay::new(self.timeout), - }, - }); - - Ok(()) - } - Some(existing) => { - let old_future = mem::replace( - &mut existing.inner, - TimeoutFuture { - inner: future.boxed(), - timeout: Delay::new(self.timeout), - }, - ); - - Err(PushError::ReplacedFuture(old_future.inner)) - } + let old = self.remove(future_id.clone()); + self.inner.push(TaggedFuture { + tag: future_id, + inner: TimeoutFuture { + inner: future.boxed(), + timeout: Delay::new(self.timeout), + cancelled: false, + }, + }); + match old { + None => Ok(()), + Some(old) => Err(PushError::Replaced(old)), } } + pub fn remove(&mut self, id: ID) -> Option> { + let tagged = self.inner.iter_mut().find(|s| s.tag == id)?; + + let inner = mem::replace(&mut tagged.inner.inner, future::pending().boxed()); + tagged.inner.cancelled = true; + + Some(inner) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } @@ -109,15 +103,20 @@ where } pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(ID, Result)> { - let maybe_result = futures_util::ready!(self.inner.poll_next_unpin(cx)); + loop { + let maybe_result = futures_util::ready!(self.inner.poll_next_unpin(cx)); - match maybe_result { - None => { - self.empty_waker = Some(cx.waker().clone()); - Poll::Pending + match maybe_result { + None => { + self.empty_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + Some((id, Ok(output))) => return Poll::Ready((id, Ok(output))), + Some((id, Err(TimeoutError::Timeout))) => { + return Poll::Ready((id, Err(Timeout::new(self.timeout)))) + } + Some((_, Err(TimeoutError::Cancelled))) => continue, } - Some((id, Ok(output))) => Poll::Ready((id, Ok(output))), - Some((id, Err(_timeout))) => Poll::Ready((id, Err(Timeout::new(self.timeout)))), } } } @@ -125,23 +124,34 @@ where struct TimeoutFuture { inner: F, timeout: Delay, + + cancelled: bool, } impl Future for TimeoutFuture where F: Future + Unpin, { - type Output = Result; + type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.cancelled { + return Poll::Ready(Err(TimeoutError::Cancelled)); + } + if self.timeout.poll_unpin(cx).is_ready() { - return Poll::Ready(Err(())); + return Poll::Ready(Err(TimeoutError::Timeout)); } self.inner.poll_unpin(cx).map(Ok) } } +enum TimeoutError { + Timeout, + Cancelled, +} + struct TaggedFuture { tag: T, inner: F, @@ -163,6 +173,8 @@ where #[cfg(test)] mod tests { + use futures::channel::oneshot; + use futures_util::task::noop_waker_ref; use std::future::{pending, poll_fn, ready}; use std::pin::Pin; use std::time::Instant; @@ -187,7 +199,7 @@ mod tests { assert!(futures.try_push("ID", ready(())).is_ok()); matches!( futures.try_push("ID", ready(())), - Err(PushError::ReplacedFuture(_)) + Err(PushError::Replaced(_)) ); } @@ -202,6 +214,45 @@ mod tests { assert!(result.is_err()) } + #[test] + fn resources_of_removed_future_are_cleaned_up() { + let mut futures = FuturesMap::new(Duration::from_millis(100), 1); + + let _ = futures.try_push("ID", pending::<()>()); + futures.remove("ID"); + + let poll = futures.poll_unpin(&mut Context::from_waker(noop_waker_ref())); + assert!(poll.is_pending()); + + assert_eq!(futures.len(), 0); + } + + #[tokio::test] + async fn replaced_pending_future_is_polled() { + let mut streams = FuturesMap::new(Duration::from_millis(100), 3); + + let (_tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + + let _ = streams.try_push("ID1", rx1); + let _ = streams.try_push("ID2", rx2); + + let _ = tx2.send(2); + let (id, res) = poll_fn(|cx| streams.poll_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 2); + + let (new_tx1, new_rx1) = oneshot::channel(); + let replaced = streams.try_push("ID1", new_rx1); + assert!(matches!(replaced.unwrap_err(), PushError::Replaced(_))); + + let _ = new_tx1.send(4); + let (id, res) = poll_fn(|cx| streams.poll_unpin(cx)).await; + + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 4); + } + // Each future causes a delay, `Task` only has a capacity of 1, meaning they must be processed in sequence. // We stop after NUM_FUTURES tasks, meaning the overall execution must at least take DELAY * NUM_FUTURES. #[tokio::test] diff --git a/misc/futures-bounded/src/set.rs b/misc/futures-bounded/src/futures_set.rs similarity index 89% rename from misc/futures-bounded/src/set.rs rename to misc/futures-bounded/src/futures_set.rs index 96140d82f9a0..af7cedfcc85b 100644 --- a/misc/futures-bounded/src/set.rs +++ b/misc/futures-bounded/src/futures_set.rs @@ -23,7 +23,10 @@ impl FuturesSet { } } -impl FuturesSet { +impl FuturesSet +where + O: 'static, +{ /// Push a future into the list. /// /// This method adds the given future to the list. @@ -38,10 +41,14 @@ impl FuturesSet { match self.inner.try_push(self.id, future) { Ok(()) => Ok(()), Err(PushError::BeyondCapacity(w)) => Err(w), - Err(PushError::ReplacedFuture(_)) => unreachable!("we never reuse IDs"), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), } } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/misc/futures-bounded/src/futures_tuple_set.rs b/misc/futures-bounded/src/futures_tuple_set.rs new file mode 100644 index 000000000000..e19b236aaf84 --- /dev/null +++ b/misc/futures-bounded/src/futures_tuple_set.rs @@ -0,0 +1,94 @@ +use std::collections::HashMap; +use std::future::Future; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use futures_util::future::BoxFuture; + +use crate::{FuturesMap, PushError, Timeout}; + +/// Represents a list of tuples of a [Future] and an associated piece of data. +/// +/// Each future must finish within the specified time and the list never outgrows its capacity. +pub struct FuturesTupleSet { + id: u32, + inner: FuturesMap, + data: HashMap, +} + +impl FuturesTupleSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: FuturesMap::new(timeout, capacity), + data: HashMap::new(), + } + } +} + +impl FuturesTupleSet +where + O: 'static, +{ + /// Push a future into the list. + /// + /// This method adds the given future to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed future. + /// In that case, the future is not added to the set. + pub fn try_push(&mut self, future: F, data: D) -> Result<(), (BoxFuture, D)> + where + F: Future + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, future) { + Ok(()) => {} + Err(PushError::BeyondCapacity(w)) => return Err((w, data)), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), + } + self.data.insert(self.id, data); + + Ok(()) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(Result, D)> { + let (id, res) = ready!(self.inner.poll_unpin(cx)); + let data = self.data.remove(&id).expect("must have data for future"); + + Poll::Ready((res, data)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_util::future::poll_fn; + use futures_util::FutureExt; + use std::future::ready; + + #[test] + fn tracks_associated_data_of_future() { + let mut set = FuturesTupleSet::new(Duration::from_secs(10), 10); + + let _ = set.try_push(ready(1), 1); + let _ = set.try_push(ready(2), 2); + + let (res1, data1) = poll_fn(|cx| set.poll_unpin(cx)).now_or_never().unwrap(); + let (res2, data2) = poll_fn(|cx| set.poll_unpin(cx)).now_or_never().unwrap(); + + assert_eq!(res1.unwrap(), data1); + assert_eq!(res2.unwrap(), data2); + } +} diff --git a/misc/futures-bounded/src/lib.rs b/misc/futures-bounded/src/lib.rs index e7b461dc8229..da8483a595fc 100644 --- a/misc/futures-bounded/src/lib.rs +++ b/misc/futures-bounded/src/lib.rs @@ -1,8 +1,15 @@ -mod map; -mod set; +mod futures_map; +mod futures_set; +mod futures_tuple_set; +mod stream_map; +mod stream_set; + +pub use futures_map::FuturesMap; +pub use futures_set::FuturesSet; +pub use futures_tuple_set::FuturesTupleSet; +pub use stream_map::StreamMap; +pub use stream_set::StreamSet; -pub use map::{FuturesMap, PushError}; -pub use set::FuturesSet; use std::fmt; use std::fmt::Formatter; use std::time::Duration; @@ -25,4 +32,15 @@ impl fmt::Display for Timeout { } } +/// Error of a future pushing +#[derive(PartialEq, Debug)] +pub enum PushError { + /// The length of the set is equal to the capacity + BeyondCapacity(T), + /// The map already contained an item with this key. + /// + /// The old item is returned. + Replaced(T), +} + impl std::error::Error for Timeout {} diff --git a/misc/futures-bounded/src/stream_map.rs b/misc/futures-bounded/src/stream_map.rs new file mode 100644 index 000000000000..8464f432d027 --- /dev/null +++ b/misc/futures-bounded/src/stream_map.rs @@ -0,0 +1,362 @@ +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll, Waker}; +use std::time::Duration; + +use futures_timer::Delay; +use futures_util::stream::{BoxStream, SelectAll}; +use futures_util::{stream, FutureExt, Stream, StreamExt}; + +use crate::{PushError, Timeout}; + +/// Represents a map of [`Stream`]s. +/// +/// Each stream must finish within the specified time and the map never outgrows its capacity. +pub struct StreamMap { + timeout: Duration, + capacity: usize, + inner: SelectAll>>>, + empty_waker: Option, + full_waker: Option, +} + +impl StreamMap +where + ID: Clone + Unpin, +{ + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + timeout, + capacity, + inner: Default::default(), + empty_waker: None, + full_waker: None, + } + } +} + +impl StreamMap +where + ID: Clone + PartialEq + Send + Unpin + 'static, + O: Send + 'static, +{ + /// Push a stream into the map. + pub fn try_push(&mut self, id: ID, stream: F) -> Result<(), PushError>> + where + F: Stream + Send + 'static, + { + if self.inner.len() >= self.capacity { + return Err(PushError::BeyondCapacity(stream.boxed())); + } + + if let Some(waker) = self.empty_waker.take() { + waker.wake(); + } + + let old = self.remove(id.clone()); + self.inner.push(TaggedStream::new( + id, + TimeoutStream { + inner: stream.boxed(), + timeout: Delay::new(self.timeout), + }, + )); + + match old { + None => Ok(()), + Some(old) => Err(PushError::Replaced(old)), + } + } + + pub fn remove(&mut self, id: ID) -> Option> { + let tagged = self.inner.iter_mut().find(|s| s.key == id)?; + + let inner = mem::replace(&mut tagged.inner.inner, stream::pending().boxed()); + tagged.exhausted = true; // Setting this will emit `None` on the next poll and ensure `SelectAll` cleans up the resources. + + Some(inner) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // &mut Context is idiomatic. + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + if self.inner.len() < self.capacity { + return Poll::Ready(()); + } + + self.full_waker = Some(cx.waker().clone()); + + Poll::Pending + } + + pub fn poll_next_unpin( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<(ID, Option>)> { + match futures_util::ready!(self.inner.poll_next_unpin(cx)) { + None => { + self.empty_waker = Some(cx.waker().clone()); + Poll::Pending + } + Some((id, Some(Ok(output)))) => Poll::Ready((id, Some(Ok(output)))), + Some((id, Some(Err(())))) => { + self.remove(id.clone()); // Remove stream, otherwise we keep reporting the timeout. + + Poll::Ready((id, Some(Err(Timeout::new(self.timeout))))) + } + Some((id, None)) => Poll::Ready((id, None)), + } + } +} + +struct TimeoutStream { + inner: S, + timeout: Delay, +} + +impl Stream for TimeoutStream +where + F: Stream + Unpin, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.timeout.poll_unpin(cx).is_ready() { + return Poll::Ready(Some(Err(()))); + } + + self.inner.poll_next_unpin(cx).map(|a| a.map(Ok)) + } +} + +struct TaggedStream { + key: K, + inner: S, + + exhausted: bool, +} + +impl TaggedStream { + fn new(key: K, inner: S) -> Self { + Self { + key, + inner, + exhausted: false, + } + } +} + +impl Stream for TaggedStream +where + K: Clone + Unpin, + S: Stream + Unpin, +{ + type Item = (K, Option); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.exhausted { + return Poll::Ready(None); + } + + match futures_util::ready!(self.inner.poll_next_unpin(cx)) { + Some(item) => Poll::Ready(Some((self.key.clone(), Some(item)))), + None => { + self.exhausted = true; + + Poll::Ready(Some((self.key.clone(), None))) + } + } + } +} + +#[cfg(test)] +mod tests { + use futures::channel::mpsc; + use futures_util::stream::{once, pending}; + use futures_util::SinkExt; + use std::future::{poll_fn, ready, Future}; + use std::pin::Pin; + use std::time::Instant; + + use super::*; + + #[test] + fn cannot_push_more_than_capacity_tasks() { + let mut streams = StreamMap::new(Duration::from_secs(10), 1); + + assert!(streams.try_push("ID_1", once(ready(()))).is_ok()); + matches!( + streams.try_push("ID_2", once(ready(()))), + Err(PushError::BeyondCapacity(_)) + ); + } + + #[test] + fn cannot_push_the_same_id_few_times() { + let mut streams = StreamMap::new(Duration::from_secs(10), 5); + + assert!(streams.try_push("ID", once(ready(()))).is_ok()); + matches!( + streams.try_push("ID", once(ready(()))), + Err(PushError::Replaced(_)) + ); + } + + #[tokio::test] + async fn streams_timeout() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + let (_, result) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + assert!(result.unwrap().is_err()) + } + + #[tokio::test] + async fn timed_out_stream_gets_removed() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + let poll = streams.poll_next_unpin(&mut Context::from_waker( + futures_util::task::noop_waker_ref(), + )); + assert!(poll.is_pending()) + } + + #[test] + fn removing_stream() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", stream::once(ready(()))); + + { + let cancelled_stream = streams.remove("ID"); + assert!(cancelled_stream.is_some()); + } + + let poll = streams.poll_next_unpin(&mut Context::from_waker( + futures_util::task::noop_waker_ref(), + )); + + assert!(poll.is_pending()); + assert_eq!( + streams.len(), + 0, + "resources of cancelled streams are cleaned up properly" + ); + } + + #[tokio::test] + async fn replaced_stream_is_still_registered() { + let mut streams = StreamMap::new(Duration::from_millis(100), 3); + + let (mut tx1, rx1) = mpsc::channel(5); + let (mut tx2, rx2) = mpsc::channel(5); + + let _ = streams.try_push("ID1", rx1); + let _ = streams.try_push("ID2", rx2); + + let _ = tx2.send(2).await; + let _ = tx1.send(1).await; + let _ = tx2.send(3).await; + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 1); + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 2); + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 3); + + let (mut new_tx1, new_rx1) = mpsc::channel(5); + let replaced = streams.try_push("ID1", new_rx1); + assert!(matches!(replaced.unwrap_err(), PushError::Replaced(_))); + + let _ = new_tx1.send(4).await; + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 4); + } + + // Each stream emits 1 item with delay, `Task` only has a capacity of 1, meaning they must be processed in sequence. + // We stop after NUM_STREAMS tasks, meaning the overall execution must at least take DELAY * NUM_STREAMS. + #[tokio::test] + async fn backpressure() { + const DELAY: Duration = Duration::from_millis(100); + const NUM_STREAMS: u32 = 10; + + let start = Instant::now(); + Task::new(DELAY, NUM_STREAMS, 1).await; + let duration = start.elapsed(); + + assert!(duration >= DELAY * NUM_STREAMS); + } + + struct Task { + item_delay: Duration, + num_streams: usize, + num_processed: usize, + inner: StreamMap, + } + + impl Task { + fn new(item_delay: Duration, num_streams: u32, capacity: usize) -> Self { + Self { + item_delay, + num_streams: num_streams as usize, + num_processed: 0, + inner: StreamMap::new(Duration::from_secs(60), capacity), + } + } + } + + impl Future for Task { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while this.num_processed < this.num_streams { + match this.inner.poll_next_unpin(cx) { + Poll::Ready((_, Some(result))) => { + if result.is_err() { + panic!("Timeout is great than item delay") + } + + this.num_processed += 1; + continue; + } + Poll::Ready((_, None)) => { + continue; + } + _ => {} + } + + if let Poll::Ready(()) = this.inner.poll_ready_unpin(cx) { + // We push the constant ID to prove that user can use the same ID if the stream was finished + let maybe_future = this.inner.try_push(1u8, once(Delay::new(this.item_delay))); + assert!(maybe_future.is_ok(), "we polled for readiness"); + + continue; + } + + return Poll::Pending; + } + + Poll::Ready(()) + } + } +} diff --git a/misc/futures-bounded/src/stream_set.rs b/misc/futures-bounded/src/stream_set.rs new file mode 100644 index 000000000000..bb32835065f0 --- /dev/null +++ b/misc/futures-bounded/src/stream_set.rs @@ -0,0 +1,64 @@ +use futures_util::stream::BoxStream; +use futures_util::Stream; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use crate::{PushError, StreamMap, Timeout}; + +/// Represents a set of [Stream]s. +/// +/// Each stream must finish within the specified time and the list never outgrows its capacity. +pub struct StreamSet { + id: u32, + inner: StreamMap, +} + +impl StreamSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: StreamMap::new(timeout, capacity), + } + } +} + +impl StreamSet +where + O: Send + 'static, +{ + /// Push a stream into the list. + /// + /// This method adds the given stream to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed stream. + /// In that case, the stream is not added to the set. + pub fn try_push(&mut self, stream: F) -> Result<(), BoxStream> + where + F: Stream + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, stream) { + Ok(()) => Ok(()), + Err(PushError::BeyondCapacity(w)) => Err(w), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), + } + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_next_unpin(&mut self, cx: &mut Context<'_>) -> Poll>> { + let (_, res) = ready!(self.inner.poll_next_unpin(cx)); + + Poll::Ready(res) + } +} diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index da3e4a143958..c7989fb47930 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -9,13 +9,16 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] publish = false +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.16", features = ["derive"] } zeroize = "1" -serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde = { version = "1.0.196", features = ["derive"] } +serde_json = "1.0.113" libp2p-core = { workspace = true } -base64 = "0.21.4" +base64 = "0.21.7" libp2p-identity = { workspace = true } [lints] diff --git a/misc/memory-connection-limits/CHANGELOG.md b/misc/memory-connection-limits/CHANGELOG.md index 951a5a3f138a..fc598872d505 100644 --- a/misc/memory-connection-limits/CHANGELOG.md +++ b/misc/memory-connection-limits/CHANGELOG.md @@ -1,3 +1,6 @@ +## 0.2.0 + + ## 0.1.0 - Initial release. diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index eb65d26ac660..ae6bb3863733 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-memory-connection-limits" edition = "2021" rust-version = { workspace = true } description = "Memory usage based connection limits for libp2p." -version = "0.1.0" +version = "0.2.0" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -14,8 +14,8 @@ memory-stats = { version = "1", features = ["always_use_statm"] } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -log = "0.4" sysinfo = "0.29" +tracing = "0.1.37" void = "1" [dev-dependencies] diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index 36f0d1648d72..ac911654979f 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -21,8 +21,8 @@ use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use void::Void; @@ -124,12 +124,9 @@ impl Behaviour { return; } - let stats = match memory_stats::memory_stats() { - Some(stats) => stats, - None => { - log::warn!("Failed to retrieve process memory stats"); - return; - } + let Some(stats) = memory_stats::memory_stats() else { + tracing::warn!("Failed to retrieve process memory stats"); + return; }; self.last_refreshed = now; @@ -181,7 +178,7 @@ impl NetworkBehaviour for Behaviour { Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, _: FromSwarm) {} + fn on_swarm_event(&mut self, _: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -192,11 +189,7 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } } diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs index af86b0487855..7f89e2c7a9a1 100644 --- a/misc/memory-connection-limits/tests/max_bytes.rs +++ b/misc/memory-connection-limits/tests/max_bytes.rs @@ -61,6 +61,8 @@ fn max_bytes() { network .dial( DialOpts::peer_id(target) + // Always dial, even if connected or already dialing. + .condition(libp2p_swarm::dial_opts::PeerCondition::Always) .addresses(vec![addr.clone()]) .build(), ) @@ -70,7 +72,12 @@ fn max_bytes() { std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. match network - .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) + .dial( + DialOpts::peer_id(target) + .condition(libp2p_swarm::dial_opts::PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) .expect_err("Unexpected dialing success.") { DialError::Denied { cause } => { diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs index ea3f20e6cbc5..daee20703eeb 100644 --- a/misc/memory-connection-limits/tests/max_percentage.rs +++ b/misc/memory-connection-limits/tests/max_percentage.rs @@ -27,7 +27,10 @@ use std::time::Duration; use sysinfo::{RefreshKind, SystemExt}; use util::*; -use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm}; +use libp2p_swarm::{ + dial_opts::{DialOpts, PeerCondition}, + DialError, Swarm, +}; use libp2p_swarm_test::SwarmExt; #[test] @@ -63,6 +66,8 @@ fn max_percentage() { network .dial( DialOpts::peer_id(target) + // Always dial, even if already dialing or connected. + .condition(PeerCondition::Always) .addresses(vec![addr.clone()]) .build(), ) @@ -72,7 +77,12 @@ fn max_percentage() { std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. match network - .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) + .dial( + DialOpts::peer_id(target) + .condition(PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) .expect_err("Unexpected dialing success.") { DialError::Denied { cause } => { diff --git a/misc/memory-connection-limits/tests/util.rs b/misc/memory-connection-limits/tests/util.rs index a2fd7c20fedb..f40ce3199290 100644 --- a/misc/memory-connection-limits/tests/util.rs +++ b/misc/memory-connection-limits/tests/util.rs @@ -23,8 +23,8 @@ use std::task::{Context, Poll}; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use void::Void; @@ -107,7 +107,7 @@ impl NetworkBehaviour Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, _: FromSwarm) {} + fn on_swarm_event(&mut self, _: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -118,11 +118,7 @@ impl NetworkBehaviour void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } } diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index 5c3d3c140ba7..67c304680db5 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,3 +1,14 @@ +## 0.14.1 + +- Add `BandwidthTransport`, wrapping an existing `Transport`, exposing Prometheus bandwidth metrics. + See also `SwarmBuilder::with_bandwidth_metrics`. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.14.0 + +- Add metrics for `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}`. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). + ## 0.13.1 - Enable gossipsub related data-type fields when compiling for wasm. diff --git a/misc/metrics/Cargo.toml b/misc/metrics/Cargo.toml index 0cb9840955bc..38c4777d4a6e 100644 --- a/misc/metrics/Cargo.toml +++ b/misc/metrics/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-metrics" edition = "2021" rust-version = { workspace = true } description = "Metrics for libp2p" -version = "0.13.1" +version = "0.14.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -19,6 +19,7 @@ ping = ["libp2p-ping"] relay = ["libp2p-relay"] [dependencies] +futures = "0.3.30" instant = "0.1.12" libp2p-core = { workspace = true } libp2p-dcutr = { workspace = true, optional = true } @@ -29,8 +30,11 @@ libp2p-kad = { workspace = true, optional = true } libp2p-ping = { workspace = true, optional = true } libp2p-relay = { workspace = true, optional = true } libp2p-swarm = { workspace = true } -once_cell = "1.18.0" -prometheus-client = { version = "0.21.2"} +pin-project = "1.1.4" +prometheus-client = { workspace = true } + +[dev-dependencies] +libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs new file mode 100644 index 000000000000..2792e00612cb --- /dev/null +++ b/misc/metrics/src/bandwidth.rs @@ -0,0 +1,312 @@ +use crate::protocol_stack; +use futures::{ + future::{MapOk, TryFutureExt}, + io::{IoSlice, IoSliceMut}, + prelude::*, + ready, +}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + transport::{ListenerId, TransportError, TransportEvent}, + Multiaddr, +}; +use libp2p_identity::PeerId; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::{Registry, Unit}, +}; +use std::{ + convert::TryFrom as _, + io, + pin::Pin, + task::{Context, Poll}, +}; + +#[derive(Debug, Clone)] +#[pin_project::pin_project] +pub struct Transport { + #[pin] + transport: T, + metrics: Family, +} + +impl Transport { + pub fn new(transport: T, registry: &mut Registry) -> Self { + let metrics = Family::::default(); + registry + .sub_registry_with_prefix("libp2p") + .register_with_unit( + "bandwidth", + "Bandwidth usage by direction and transport protocols", + Unit::Bytes, + metrics.clone(), + ); + + Transport { transport, metrics } + } +} + +#[derive(EncodeLabelSet, Hash, Clone, Eq, PartialEq, Debug)] +struct Labels { + protocols: String, + direction: Direction, +} + +#[derive(Clone, Hash, PartialEq, Eq, EncodeLabelValue, Debug)] +enum Direction { + Inbound, + Outbound, +} + +impl libp2p_core::Transport for Transport +where + T: libp2p_core::Transport, + M: StreamMuxer + Send + 'static, + M::Substream: Send + 'static, + M::Error: Send + Sync + 'static, +{ + type Output = (PeerId, Muxer); + type Error = T::Error; + type ListenerUpgrade = + MapOk (PeerId, Muxer) + Send>>; + type Dial = MapOk (PeerId, Muxer) + Send>>; + + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.transport.listen_on(id, addr) + } + + fn remove_listener(&mut self, id: ListenerId) -> bool { + self.transport.remove_listener(id) + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + let metrics = ConnectionMetrics::from_family_and_addr(&self.metrics, &addr); + Ok(self + .transport + .dial(addr.clone())? + .map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + }))) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + let metrics = ConnectionMetrics::from_family_and_addr(&self.metrics, &addr); + Ok(self + .transport + .dial_as_listener(addr.clone())? + .map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + }))) + } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + match this.transport.poll(cx) { + Poll::Ready(TransportEvent::Incoming { + listener_id, + upgrade, + local_addr, + send_back_addr, + }) => { + let metrics = + ConnectionMetrics::from_family_and_addr(this.metrics, &send_back_addr); + Poll::Ready(TransportEvent::Incoming { + listener_id, + upgrade: upgrade.map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + })), + local_addr, + send_back_addr, + }) + } + Poll::Ready(other) => { + let mapped = other.map_upgrade(|_upgrade| unreachable!("case already matched")); + Poll::Ready(mapped) + } + Poll::Pending => Poll::Pending, + } + } +} + +#[derive(Clone, Debug)] +struct ConnectionMetrics { + outbound: Counter, + inbound: Counter, +} + +impl ConnectionMetrics { + fn from_family_and_addr(family: &Family, protocols: &Multiaddr) -> Self { + let protocols = protocol_stack::as_string(protocols); + + // Additional scope to make sure to drop the lock guard from `get_or_create`. + let outbound = { + let m = family.get_or_create(&Labels { + protocols: protocols.clone(), + direction: Direction::Outbound, + }); + m.clone() + }; + // Additional scope to make sure to drop the lock guard from `get_or_create`. + let inbound = { + let m = family.get_or_create(&Labels { + protocols, + direction: Direction::Inbound, + }); + m.clone() + }; + ConnectionMetrics { outbound, inbound } + } +} + +/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened +/// streams. +#[derive(Clone)] +#[pin_project::pin_project] +pub struct Muxer { + #[pin] + inner: SMInner, + metrics: ConnectionMetrics, +} + +impl Muxer { + /// Creates a new [`Muxer`] wrapping around the provided stream muxer. + fn new(inner: SMInner, metrics: ConnectionMetrics) -> Self { + Self { inner, metrics } + } +} + +impl StreamMuxer for Muxer +where + SMInner: StreamMuxer, +{ + type Substream = InstrumentedStream; + type Error = SMInner::Error; + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + this.inner.poll(cx) + } + + fn poll_inbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + let inner = ready!(this.inner.poll_inbound(cx)?); + let logged = InstrumentedStream { + inner, + metrics: this.metrics.clone(), + }; + Poll::Ready(Ok(logged)) + } + + fn poll_outbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + let inner = ready!(this.inner.poll_outbound(cx)?); + let logged = InstrumentedStream { + inner, + metrics: this.metrics.clone(), + }; + Poll::Ready(Ok(logged)) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_close(cx) + } +} + +/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that goes through it. +#[pin_project::pin_project] +pub struct InstrumentedStream { + #[pin] + inner: SMInner, + metrics: ConnectionMetrics, +} + +impl AsyncRead for InstrumentedStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_read(cx, buf))?; + this.metrics + .inbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_read_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_read_vectored(cx, bufs))?; + this.metrics + .inbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } +} + +impl AsyncWrite for InstrumentedStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_write(cx, buf))?; + this.metrics + .outbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_write_vectored(cx, bufs))?; + this.metrics + .outbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_close(cx) + } +} diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 18ee8a14d1e2..3e60dca2cab5 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -49,8 +49,6 @@ struct EventLabels { #[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelValue)] enum EventType { - InitiateDirectConnectionUpgrade, - RemoteInitiatedDirectConnectionUpgrade, DirectConnectionUpgradeSucceeded, DirectConnectionUpgradeFailed, } @@ -58,20 +56,13 @@ enum EventType { impl From<&libp2p_dcutr::Event> for EventType { fn from(event: &libp2p_dcutr::Event) -> Self { match event { - libp2p_dcutr::Event::InitiatedDirectConnectionUpgrade { + libp2p_dcutr::Event { remote_peer_id: _, - local_relayed_addr: _, - } => EventType::InitiateDirectConnectionUpgrade, - libp2p_dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { + result: Ok(_), + } => EventType::DirectConnectionUpgradeSucceeded, + libp2p_dcutr::Event { remote_peer_id: _, - remote_relayed_addr: _, - } => EventType::RemoteInitiatedDirectConnectionUpgrade, - libp2p_dcutr::Event::DirectConnectionUpgradeSucceeded { remote_peer_id: _ } => { - EventType::DirectConnectionUpgradeSucceeded - } - libp2p_dcutr::Event::DirectConnectionUpgradeFailed { - remote_peer_id: _, - error: _, + result: Err(_), } => EventType::DirectConnectionUpgradeFailed, } } diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 4e320e7b0a05..b1d4e9f0c899 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -21,45 +21,15 @@ use crate::protocol_stack; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use once_cell::sync::Lazy; use prometheus_client::collector::Collector; -use prometheus_client::encoding::EncodeLabelSet; +use prometheus_client::encoding::{DescriptorEncoder, EncodeLabelSet, EncodeMetric}; use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::ConstFamily; use prometheus_client::metrics::gauge::ConstGauge; -use prometheus_client::registry::{Descriptor, LocalMetric, Registry}; -use prometheus_client::MaybeOwned; -use std::borrow::Cow; +use prometheus_client::metrics::MetricType; +use prometheus_client::registry::Registry; use std::collections::HashMap; use std::sync::{Arc, Mutex}; -static PROTOCOLS_DESCRIPTOR: Lazy = Lazy::new(|| { - Descriptor::new( - "remote_protocols", - "Number of connected nodes supporting a specific protocol, with \"unrecognized\" for each peer supporting one or more unrecognized protocols", - None, - None, - vec![], - ) -}); -static LISTEN_ADDRESSES_DESCRIPTOR: Lazy = Lazy::new(|| { - Descriptor::new( - "remote_listen_addresses", - "Number of connected nodes advertising a specific listen address", - None, - None, - vec![], - ) -}); -static OBSERVED_ADDRESSES_DESCRIPTOR: Lazy = Lazy::new(|| { - Descriptor::new( - "local_observed_addresses", - "Number of connected nodes observing the local node at a specific address", - None, - None, - vec![], - ) -}); const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[ #[cfg(feature = "dcutr")] libp2p_dcutr::PROTOCOL_NAME, @@ -153,8 +123,8 @@ impl super::Recorder for Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { if let libp2p_swarm::SwarmEvent::ConnectionClosed { peer_id, num_established, @@ -187,10 +157,7 @@ impl Peers { } impl Collector for Peers { - fn collect<'a>( - &'a self, - ) -> Box, MaybeOwned<'a, Box>)> + 'a> - { + fn encode(&self, mut encoder: DescriptorEncoder) -> Result<(), std::fmt::Error> { let mut count_by_protocols: HashMap = Default::default(); let mut count_by_listen_addresses: HashMap = Default::default(); let mut count_by_observed_addresses: HashMap = Default::default(); @@ -240,40 +207,49 @@ impl Collector for Peers { } } - let count_by_protocols: Box = - Box::new(ConstFamily::new(count_by_protocols.into_iter().map( - |(protocol, count)| ([("protocol", protocol)], ConstGauge::new(count)), - ))); + { + let mut family_encoder = encoder.encode_descriptor( + "remote_protocols", + "Number of connected nodes supporting a specific protocol, with \"unrecognized\" for each peer supporting one or more unrecognized protocols", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_protocols.into_iter() { + let labels = [("protocol", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + let metric = ConstGauge::new(count); + metric.encode(metric_encoder)?; + } + } - let count_by_listen_addresses: Box = - Box::new(ConstFamily::new(count_by_listen_addresses.into_iter().map( - |(protocol, count)| ([("listen_address", protocol)], ConstGauge::new(count)), - ))); + { + let mut family_encoder = encoder.encode_descriptor( + "remote_listen_addresses", + "Number of connected nodes advertising a specific listen address", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_listen_addresses.into_iter() { + let labels = [("listen_address", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + ConstGauge::new(count).encode(metric_encoder)?; + } + } - let count_by_observed_addresses: Box = Box::new(ConstFamily::new( - count_by_observed_addresses - .into_iter() - .map(|(protocol, count)| { - ([("observed_address", protocol)], ConstGauge::new(count)) - }), - )); + { + let mut family_encoder = encoder.encode_descriptor( + "local_observed_addresses", + "Number of connected nodes observing the local node at a specific address", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_observed_addresses.into_iter() { + let labels = [("observed_address", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + ConstGauge::new(count).encode(metric_encoder)?; + } + } - Box::new( - [ - ( - Cow::Borrowed(&*PROTOCOLS_DESCRIPTOR), - MaybeOwned::Owned(count_by_protocols), - ), - ( - Cow::Borrowed(&*LISTEN_ADDRESSES_DESCRIPTOR), - MaybeOwned::Owned(count_by_listen_addresses), - ), - ( - Cow::Borrowed(&*OBSERVED_ADDRESSES_DESCRIPTOR), - MaybeOwned::Owned(count_by_observed_addresses), - ), - ] - .into_iter(), - ) + Ok(()) } } diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 2132dd5d7fb3..74fd15e2181e 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -27,6 +27,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod bandwidth; #[cfg(feature = "dcutr")] mod dcutr; #[cfg(feature = "gossipsub")] @@ -42,7 +43,8 @@ mod protocol_stack; mod relay; mod swarm; -use prometheus_client::registry::Registry; +pub use bandwidth::Transport as BandwidthTransport; +pub use prometheus_client::registry::Registry; /// Set of Swarm and protocol metrics derived from emitted events. pub struct Metrics { @@ -138,8 +140,8 @@ impl Recorder for Metrics { } } -impl Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { self.swarm.record(event); #[cfg(feature = "identify")] diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 9ba692721e9b..607daf3f1e12 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -66,20 +66,25 @@ impl From<&libp2p_relay::Event> for EventType { fn from(event: &libp2p_relay::Event) -> Self { match event { libp2p_relay::Event::ReservationReqAccepted { .. } => EventType::ReservationReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqAcceptFailed { .. } => { EventType::ReservationReqAcceptFailed } libp2p_relay::Event::ReservationReqDenied { .. } => EventType::ReservationReqDenied, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqDenyFailed { .. } => { EventType::ReservationReqDenyFailed } libp2p_relay::Event::ReservationTimedOut { .. } => EventType::ReservationTimedOut, libp2p_relay::Event::CircuitReqDenied { .. } => EventType::CircuitReqDenied, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqOutboundConnectFailed { .. } => { EventType::CircuitReqOutboundConnectFailed } + #[allow(deprecated)] libp2p_relay::Event::CircuitReqDenyFailed { .. } => EventType::CircuitReqDenyFailed, libp2p_relay::Event::CircuitReqAccepted { .. } => EventType::CircuitReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqAcceptFailed { .. } => EventType::CircuitReqAcceptFailed, libp2p_relay::Event::CircuitClosed { .. } => EventType::CircuitClosed, } diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 8837457d36aa..ad83401f316a 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -23,7 +23,7 @@ use std::sync::{Arc, Mutex}; use crate::protocol_stack; use instant::Instant; -use libp2p_swarm::ConnectionId; +use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -41,6 +41,10 @@ pub(crate) struct Metrics { new_listen_addr: Family, expired_listen_addr: Family, + external_addr_candidates: Family, + external_addr_confirmed: Family, + external_addr_expired: Family, + listener_closed: Family, listener_error: Counter, @@ -82,6 +86,27 @@ impl Metrics { expired_listen_addr.clone(), ); + let external_addr_candidates = Family::default(); + sub_registry.register( + "external_addr_candidates", + "Number of new external address candidates", + external_addr_candidates.clone(), + ); + + let external_addr_confirmed = Family::default(); + sub_registry.register( + "external_addr_confirmed", + "Number of confirmed external addresses", + external_addr_confirmed.clone(), + ); + + let external_addr_expired = Family::default(); + sub_registry.register( + "external_addr_expired", + "Number of expired external addresses", + external_addr_expired.clone(), + ); + let listener_closed = Family::default(); sub_registry.register( "listener_closed", @@ -146,6 +171,9 @@ impl Metrics { connections_established, new_listen_addr, expired_listen_addr, + external_addr_candidates, + external_addr_confirmed, + external_addr_expired, listener_closed, listener_error, dial_attempt, @@ -157,11 +185,11 @@ impl Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &SwarmEvent) { match event { - libp2p_swarm::SwarmEvent::Behaviour(_) => {} - libp2p_swarm::SwarmEvent::ConnectionEstablished { + SwarmEvent::Behaviour(_) => {} + SwarmEvent::ConnectionEstablished { endpoint, established_in: time_taken, connection_id, @@ -180,7 +208,7 @@ impl super::Recorder super::Recorder { + SwarmEvent::IncomingConnection { send_back_addr, .. } => { self.connections_incoming .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(send_back_addr), }) .inc(); } - libp2p_swarm::SwarmEvent::IncomingConnectionError { + SwarmEvent::IncomingConnectionError { error, send_back_addr, .. @@ -222,7 +250,7 @@ impl super::Recorder { + SwarmEvent::OutgoingConnectionError { error, peer_id, .. } => { let peer = match peer_id { Some(_) => PeerStatus::Known, None => PeerStatus::Unknown, @@ -235,7 +263,7 @@ impl super::Recorder { + DialError::Transport(errors) => { for (_multiaddr, error) in errors { match error { libp2p_core::transport::TransportError::MultiaddrNotSupported( @@ -249,39 +277,31 @@ impl super::Recorder { - record(OutgoingConnectionError::LocalPeerId) - } - libp2p_swarm::DialError::NoAddresses => { - record(OutgoingConnectionError::NoAddresses) - } - libp2p_swarm::DialError::DialPeerConditionFalse(_) => { + DialError::LocalPeerId { .. } => record(OutgoingConnectionError::LocalPeerId), + DialError::NoAddresses => record(OutgoingConnectionError::NoAddresses), + DialError::DialPeerConditionFalse(_) => { record(OutgoingConnectionError::DialPeerConditionFalse) } - libp2p_swarm::DialError::Aborted => record(OutgoingConnectionError::Aborted), - libp2p_swarm::DialError::WrongPeerId { .. } => { - record(OutgoingConnectionError::WrongPeerId) - } - libp2p_swarm::DialError::Denied { .. } => { - record(OutgoingConnectionError::Denied) - } + DialError::Aborted => record(OutgoingConnectionError::Aborted), + DialError::WrongPeerId { .. } => record(OutgoingConnectionError::WrongPeerId), + DialError::Denied { .. } => record(OutgoingConnectionError::Denied), }; } - libp2p_swarm::SwarmEvent::NewListenAddr { address, .. } => { + SwarmEvent::NewListenAddr { address, .. } => { self.new_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ExpiredListenAddr { address, .. } => { + SwarmEvent::ExpiredListenAddr { address, .. } => { self.expired_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ListenerClosed { addresses, .. } => { + SwarmEvent::ListenerClosed { addresses, .. } => { for address in addresses { self.listener_closed .get_or_create(&AddressLabels { @@ -290,12 +310,34 @@ impl super::Recorder { + SwarmEvent::ListenerError { .. } => { self.listener_error.inc(); } - libp2p_swarm::SwarmEvent::Dialing { .. } => { + SwarmEvent::Dialing { .. } => { self.dial_attempt.inc(); } + SwarmEvent::NewExternalAddrCandidate { address } => { + self.external_addr_candidates + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrConfirmed { address } => { + self.external_addr_confirmed + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrExpired { address } => { + self.external_addr_expired + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + _ => {} } } } @@ -317,15 +359,13 @@ struct ConnectionClosedLabels { enum ConnectionError { Io, KeepAliveTimeout, - Handler, } -impl From<&libp2p_swarm::ConnectionError> for ConnectionError { - fn from(value: &libp2p_swarm::ConnectionError) -> Self { +impl From<&libp2p_swarm::ConnectionError> for ConnectionError { + fn from(value: &libp2p_swarm::ConnectionError) -> Self { match value { libp2p_swarm::ConnectionError::IO(_) => ConnectionError::Io, libp2p_swarm::ConnectionError::KeepAliveTimeout => ConnectionError::KeepAliveTimeout, - libp2p_swarm::ConnectionError::Handler(_) => ConnectionError::Handler, } } } diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 6bd072070e79..b24562432524 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -13,18 +13,18 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" futures = "0.3" -log = "0.4" -pin-project = "1.1.3" -smallvec = "1.11.1" -unsigned-varint = "0.7" +tracing = "0.1.37" +pin-project = "1.1.4" +smallvec = "1.12.0" +unsigned-varint = { workspace = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } rand = "0.8" rw-stream-sink = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index af9f79d876a0..83bb4909041d 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -131,7 +131,7 @@ where if let Err(err) = Pin::new(&mut io).start_send(Message::Protocol(p.clone())) { return Poll::Ready(Err(From::from(err))); } - log::debug!("Dialer: Proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Proposed protocol"); if this.protocols.peek().is_some() { *this.state = State::FlushProtocol { io, protocol } @@ -143,7 +143,7 @@ where // the dialer supports for this negotiation. Notably, // the dialer expects a regular `V1` response. Version::V1Lazy => { - log::debug!("Dialer: Expecting proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Expecting proposed protocol"); let hl = HeaderLine::from(Version::V1Lazy); let io = Negotiated::expecting(io.into_reader(), p, Some(hl)); return Poll::Ready(Ok((protocol, io))); @@ -180,14 +180,14 @@ where *this.state = State::AwaitProtocol { io, protocol }; } Message::Protocol(ref p) if p.as_ref() == protocol.as_ref() => { - log::debug!("Dialer: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Received confirmation for protocol"); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); } Message::NotAvailable => { - log::debug!( - "Dialer: Received rejection of protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Dialer: Received rejection of protocol" ); let protocol = this.protocols.next().ok_or(NegotiationError::Failed)?; *this.state = State::SendProtocol { io, protocol } @@ -201,3 +201,204 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::listener_select_proto; + use async_std::future::timeout; + use async_std::net::{TcpListener, TcpStream}; + use quickcheck::{Arbitrary, Gen, GenRange}; + use std::time::Duration; + use tracing::metadata::LevelFilter; + use tracing_subscriber::EnvFilter; + + #[test] + fn select_proto_basic() { + async fn run(version: Version) { + let (client_connection, server_connection) = futures_ringbuf::Endpoint::pair(100, 100); + + let server = async_std::task::spawn(async move { + let protos = vec!["/proto1", "/proto2"]; + let (proto, mut io) = listener_select_proto(server_connection, protos) + .await + .unwrap(); + assert_eq!(proto, "/proto2"); + + let mut out = vec![0; 32]; + let n = io.read(&mut out).await.unwrap(); + out.truncate(n); + assert_eq!(out, b"ping"); + + io.write_all(b"pong").await.unwrap(); + io.flush().await.unwrap(); + }); + + let client = async_std::task::spawn(async move { + let protos = vec!["/proto3", "/proto2"]; + let (proto, mut io) = dialer_select_proto(client_connection, protos, version) + .await + .unwrap(); + assert_eq!(proto, "/proto2"); + + io.write_all(b"ping").await.unwrap(); + io.flush().await.unwrap(); + + let mut out = vec![0; 32]; + let n = io.read(&mut out).await.unwrap(); + out.truncate(n); + assert_eq!(out, b"pong"); + }); + + server.await; + client.await; + } + + async_std::task::block_on(run(Version::V1)); + async_std::task::block_on(run(Version::V1Lazy)); + } + + /// Tests the expected behaviour of failed negotiations. + #[test] + fn negotiation_failed() { + fn prop( + version: Version, + DialerProtos(dial_protos): DialerProtos, + ListenerProtos(listen_protos): ListenerProtos, + DialPayload(dial_payload): DialPayload, + ) { + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env_lossy(), + ) + .try_init(); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + + let server = async_std::task::spawn(async move { + let server_connection = listener.accept().await.unwrap().0; + + let io = match timeout( + Duration::from_secs(2), + listener_select_proto(server_connection, listen_protos), + ) + .await + .unwrap() + { + Ok((_, io)) => io, + Err(NegotiationError::Failed) => return, + Err(NegotiationError::ProtocolError(e)) => { + panic!("Unexpected protocol error {e}") + } + }; + match io.complete().await { + Err(NegotiationError::Failed) => {} + _ => panic!(), + } + }); + + let client = async_std::task::spawn(async move { + let client_connection = TcpStream::connect(addr).await.unwrap(); + + let mut io = match timeout( + Duration::from_secs(2), + dialer_select_proto(client_connection, dial_protos, version), + ) + .await + .unwrap() + { + Err(NegotiationError::Failed) => return, + Ok((_, io)) => io, + Err(_) => panic!(), + }; + // The dialer may write a payload that is even sent before it + // got confirmation of the last proposed protocol, when `V1Lazy` + // is used. + + tracing::info!("Writing early data"); + + io.write_all(&dial_payload).await.unwrap(); + match io.complete().await { + Err(NegotiationError::Failed) => {} + _ => panic!(), + } + }); + + server.await; + client.await; + + tracing::info!("---------------------------------------") + }); + } + + quickcheck::QuickCheck::new() + .tests(1000) + .quickcheck(prop as fn(_, _, _, _)); + } + + #[async_std::test] + async fn v1_lazy_do_not_wait_for_negotiation_on_poll_close() { + let (client_connection, _server_connection) = + futures_ringbuf::Endpoint::pair(1024 * 1024, 1); + + let client = async_std::task::spawn(async move { + // Single protocol to allow for lazy (or optimistic) protocol negotiation. + let protos = vec!["/proto1"]; + let (proto, mut io) = dialer_select_proto(client_connection, protos, Version::V1Lazy) + .await + .unwrap(); + assert_eq!(proto, "/proto1"); + + // client can close the connection even though protocol negotiation is not yet done, i.e. + // `_server_connection` had been untouched. + io.close().await.unwrap(); + }); + + async_std::future::timeout(Duration::from_secs(10), client) + .await + .unwrap(); + } + + #[derive(Clone, Debug)] + struct DialerProtos(Vec<&'static str>); + + impl Arbitrary for DialerProtos { + fn arbitrary(g: &mut Gen) -> Self { + if bool::arbitrary(g) { + DialerProtos(vec!["/proto1"]) + } else { + DialerProtos(vec!["/proto1", "/proto2"]) + } + } + } + + #[derive(Clone, Debug)] + struct ListenerProtos(Vec<&'static str>); + + impl Arbitrary for ListenerProtos { + fn arbitrary(g: &mut Gen) -> Self { + if bool::arbitrary(g) { + ListenerProtos(vec!["/proto3"]) + } else { + ListenerProtos(vec!["/proto3", "/proto4"]) + } + } + } + + #[derive(Clone, Debug)] + struct DialPayload(Vec); + + impl Arbitrary for DialPayload { + fn arbitrary(g: &mut Gen) -> Self { + DialPayload( + (0..g.gen_range(0..2u8)) + .map(|_| g.gen_range(1..255)) // We can generate 0 as that will produce a different error. + .collect(), + ) + } + } +} diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index cff2f4abc391..6515d00c7176 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -170,7 +170,7 @@ where if (buf[*pos - 1] & 0x80) == 0 { // MSB is not set, indicating the end of the length prefix. let (len, _) = unsigned_varint::decode::u16(buf).map_err(|e| { - log::debug!("invalid length prefix: {}", e); + tracing::debug!("invalid length prefix: {e}"); io::Error::new(io::ErrorKind::InvalidData, "invalid length prefix") })?; diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index e85fae0f9777..5565623f25e3 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -140,3 +140,11 @@ pub enum Version { // Draft: https://github.com/libp2p/specs/pull/95 // V2, } + +#[cfg(test)] +impl quickcheck::Arbitrary for Version { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + *g.choose(&[Version::V1, Version::V1Lazy]) + .expect("slice not empty") + } +} diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index 5386114fab85..21c507096e20 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -52,7 +52,7 @@ where .filter_map(|n| match Protocol::try_from(n.as_ref()) { Ok(p) => Some((n, p)), Err(e) => { - log::warn!( + tracing::warn!( "Listener: Ignoring invalid protocol: {} due to {}", n.as_ref(), e @@ -124,9 +124,9 @@ where match mem::replace(this.state, State::Done) { State::RecvHeader { mut io } => { match io.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(Message::Header(h)))) => match h { - HeaderLine::V1 => *this.state = State::SendHeader { io }, - }, + Poll::Ready(Some(Ok(Message::Header(HeaderLine::V1)))) => { + *this.state = State::SendHeader { io } + } Poll::Ready(Some(Ok(_))) => { return Poll::Ready(Err(ProtocolError::InvalidMessage.into())) } @@ -186,7 +186,7 @@ where // the dialer also raises `NegotiationError::Failed` when finally // reading the `N/A` response. if let ProtocolError::InvalidMessage = &err { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with invalid \ message after protocol rejection." ); @@ -194,7 +194,7 @@ where } if let ProtocolError::IoError(e) = &err { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with EOF \ after protocol rejection." ); @@ -228,10 +228,10 @@ where }); let message = if protocol.is_some() { - log::debug!("Listener: confirming protocol: {}", p); + tracing::debug!(protocol=%p, "Listener: confirming protocol"); Message::Protocol(p.clone()) } else { - log::debug!("Listener: rejecting protocol: {}", p.as_ref()); + tracing::debug!(protocol=%p.as_ref(), "Listener: rejecting protocol"); Message::NotAvailable }; @@ -287,9 +287,9 @@ where // Otherwise expect to receive another message. match protocol { Some(protocol) => { - log::debug!( - "Listener: sent confirmed protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Listener: sent confirmed protocol" ); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index 941b60765ca3..a24014a4f5f2 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -171,7 +171,7 @@ impl Negotiated { if let Message::Protocol(p) = &msg { if p.as_ref() == protocol.as_ref() { - log::debug!("Negotiated: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Negotiated: Received confirmation for protocol"); *this.state = State::Completed { io: io.into_inner(), }; @@ -317,7 +317,7 @@ where StateProj::Expecting { io, .. } => { let close_poll = io.poll_close(cx); if let Poll::Ready(Ok(())) = close_poll { - log::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending.") + tracing::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending") } close_poll } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index be2f3122da03..d5c2bfa773a5 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -403,7 +403,7 @@ where return Poll::Ready(None); }; - log::trace!("Received message: {:?}", msg); + tracing::trace!(message=?msg, "Received message"); Poll::Ready(Some(Ok(msg))) } diff --git a/misc/multistream-select/tests/dialer_select.rs b/misc/multistream-select/tests/dialer_select.rs deleted file mode 100644 index d1b276458c4b..000000000000 --- a/misc/multistream-select/tests/dialer_select.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Integration tests for protocol negotiation. - -use futures::prelude::*; -use multistream_select::{dialer_select_proto, listener_select_proto, NegotiationError, Version}; -use std::time::Duration; - -#[test] -fn select_proto_basic() { - async fn run(version: Version) { - let (client_connection, server_connection) = futures_ringbuf::Endpoint::pair(100, 100); - - let server = async_std::task::spawn(async move { - let protos = vec!["/proto1", "/proto2"]; - let (proto, mut io) = listener_select_proto(server_connection, protos) - .await - .unwrap(); - assert_eq!(proto, "/proto2"); - - let mut out = vec![0; 32]; - let n = io.read(&mut out).await.unwrap(); - out.truncate(n); - assert_eq!(out, b"ping"); - - io.write_all(b"pong").await.unwrap(); - io.flush().await.unwrap(); - }); - - let client = async_std::task::spawn(async move { - let protos = vec!["/proto3", "/proto2"]; - let (proto, mut io) = dialer_select_proto(client_connection, protos, version) - .await - .unwrap(); - assert_eq!(proto, "/proto2"); - - io.write_all(b"ping").await.unwrap(); - io.flush().await.unwrap(); - - let mut out = vec![0; 32]; - let n = io.read(&mut out).await.unwrap(); - out.truncate(n); - assert_eq!(out, b"pong"); - }); - - server.await; - client.await; - } - - async_std::task::block_on(run(Version::V1)); - async_std::task::block_on(run(Version::V1Lazy)); -} - -/// Tests the expected behaviour of failed negotiations. -#[test] -fn negotiation_failed() { - let _ = env_logger::try_init(); - - async fn run( - Test { - version, - listen_protos, - dial_protos, - dial_payload, - }: Test, - ) { - let (client_connection, server_connection) = futures_ringbuf::Endpoint::pair(100, 100); - - let server = async_std::task::spawn(async move { - let io = match listener_select_proto(server_connection, listen_protos).await { - Ok((_, io)) => io, - Err(NegotiationError::Failed) => return, - Err(NegotiationError::ProtocolError(e)) => { - panic!("Unexpected protocol error {e}") - } - }; - match io.complete().await { - Err(NegotiationError::Failed) => {} - _ => panic!(), - } - }); - - let client = async_std::task::spawn(async move { - let mut io = match dialer_select_proto(client_connection, dial_protos, version).await { - Err(NegotiationError::Failed) => return, - Ok((_, io)) => io, - Err(_) => panic!(), - }; - // The dialer may write a payload that is even sent before it - // got confirmation of the last proposed protocol, when `V1Lazy` - // is used. - io.write_all(&dial_payload).await.unwrap(); - match io.complete().await { - Err(NegotiationError::Failed) => {} - _ => panic!(), - } - }); - - server.await; - client.await; - } - - /// Parameters for a single test run. - #[derive(Clone)] - struct Test { - version: Version, - listen_protos: Vec<&'static str>, - dial_protos: Vec<&'static str>, - dial_payload: Vec, - } - - // Disjunct combinations of listen and dial protocols to test. - // - // The choices here cover the main distinction between a single - // and multiple protocols. - let protos = vec![ - (vec!["/proto1"], vec!["/proto2"]), - (vec!["/proto1", "/proto2"], vec!["/proto3", "/proto4"]), - ]; - - // The payloads that the dialer sends after "successful" negotiation, - // which may be sent even before the dialer got protocol confirmation - // when `V1Lazy` is used. - // - // The choices here cover the specific situations that can arise with - // `V1Lazy` and which must nevertheless behave identically to `V1` w.r.t. - // the outcome of the negotiation. - let payloads = vec![ - // No payload, in which case all versions should behave identically - // in any case, i.e. the baseline test. - vec![], - // With this payload and `V1Lazy`, the listener interprets the first - // `1` as a message length and encounters an invalid message (the - // second `1`). The listener is nevertheless expected to fail - // negotiation normally, just like with `V1`. - vec![1, 1], - // With this payload and `V1Lazy`, the listener interprets the first - // `42` as a message length and encounters unexpected EOF trying to - // read a message of that length. The listener is nevertheless expected - // to fail negotiation normally, just like with `V1` - vec![42, 1], - ]; - - for (listen_protos, dial_protos) in protos { - for dial_payload in payloads.clone() { - for &version in &[Version::V1, Version::V1Lazy] { - async_std::task::block_on(run(Test { - version, - listen_protos: listen_protos.clone(), - dial_protos: dial_protos.clone(), - dial_payload: dial_payload.clone(), - })) - } - } - } -} - -#[async_std::test] -async fn v1_lazy_do_not_wait_for_negotiation_on_poll_close() { - let (client_connection, _server_connection) = futures_ringbuf::Endpoint::pair(1024 * 1024, 1); - - let client = async_std::task::spawn(async move { - // Single protocol to allow for lazy (or optimistic) protocol negotiation. - let protos = vec!["/proto1"]; - let (proto, mut io) = dialer_select_proto(client_connection, protos, Version::V1Lazy) - .await - .unwrap(); - assert_eq!(proto, "/proto1"); - - // client can close the connection even though protocol negotiation is not yet done, i.e. - // `_server_connection` had been untouched. - io.close().await.unwrap(); - }); - - async_std::future::timeout(Duration::from_secs(10), client) - .await - .unwrap(); -} diff --git a/misc/quick-protobuf-codec/CHANGELOG.md b/misc/quick-protobuf-codec/CHANGELOG.md index 740201f80d7d..a301293621f4 100644 --- a/misc/quick-protobuf-codec/CHANGELOG.md +++ b/misc/quick-protobuf-codec/CHANGELOG.md @@ -1,3 +1,13 @@ +## 0.3.1 + +- Reduce allocations during encoding. + See [PR 4782](https://github.com/libp2p/rust-libp2p/pull/4782). + +## 0.3.0 + +- Update to `asynchronous-codec` `v0.7.0`. + See [PR 4636](https://github.com/libp2p/rust-libp2p/pull/4636). + ## 0.2.0 - Raise MSRV to 1.65. diff --git a/misc/quick-protobuf-codec/Cargo.toml b/misc/quick-protobuf-codec/Cargo.toml index 0f793d1f1851..bc07b86b4272 100644 --- a/misc/quick-protobuf-codec/Cargo.toml +++ b/misc/quick-protobuf-codec/Cargo.toml @@ -3,7 +3,7 @@ name = "quick-protobuf-codec" edition = "2021" rust-version = { workspace = true } description = "Asynchronous de-/encoding of Protobuf structs using asynchronous-codec, unsigned-varint and quick-protobuf." -version = "0.2.0" +version = "0.3.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,12 +11,21 @@ keywords = ["networking"] categories = ["asynchronous"] [dependencies] -asynchronous-codec = { version = "0.6" } +asynchronous-codec = { workspace = true } bytes = { version = "1" } thiserror = "1.0" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +unsigned-varint = { workspace = true, features = ["std"] } quick-protobuf = "0.8" +[dev-dependencies] +criterion = "0.5.1" +futures = "0.3.30" +quickcheck = { workspace = true } + +[[bench]] +name = "codec" +harness = false + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] diff --git a/misc/quick-protobuf-codec/benches/codec.rs b/misc/quick-protobuf-codec/benches/codec.rs new file mode 100644 index 000000000000..0f6ce9469c52 --- /dev/null +++ b/misc/quick-protobuf-codec/benches/codec.rs @@ -0,0 +1,28 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use quick_protobuf_codec::{proto, Codec}; + +pub fn benchmark(c: &mut Criterion) { + for size in [1000, 10_000, 100_000, 1_000_000, 10_000_000] { + c.bench_with_input(BenchmarkId::new("encode", size), &size, |b, i| { + b.iter_batched( + || { + let mut out = BytesMut::new(); + out.reserve(i + 100); + let codec = Codec::::new(i + 100); + let msg = proto::Message { + data: vec![0; size], + }; + + (codec, out, msg) + }, + |(mut codec, mut out, msg)| codec.encode(msg, &mut out).unwrap(), + BatchSize::SmallInput, + ); + }); + } +} + +criterion_group!(benches, benchmark); +criterion_main!(benches); diff --git a/misc/quick-protobuf-codec/src/generated/mod.rs b/misc/quick-protobuf-codec/src/generated/mod.rs new file mode 100644 index 000000000000..b9f982f8dfd8 --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod test; diff --git a/misc/quick-protobuf-codec/src/generated/test.proto b/misc/quick-protobuf-codec/src/generated/test.proto new file mode 100644 index 000000000000..5b1f46c0bfac --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package test; + +message Message { + bytes data = 1; +} diff --git a/misc/quick-protobuf-codec/src/generated/test.rs b/misc/quick-protobuf-codec/src/generated/test.rs new file mode 100644 index 000000000000..b353e6d9183d --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.rs @@ -0,0 +1,47 @@ +// Automatically generated rust module for 'test.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub data: Vec, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.data = r.read_bytes(bytes)?.to_owned(), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + if self.data.is_empty() { 0 } else { 1 + sizeof_len((&self.data).len()) } + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if !self.data.is_empty() { w.write_with_tag(10, |w| w.write_bytes(&**&self.data))?; } + Ok(()) + } +} + diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index 04ee4980d3a8..c50b1264af6c 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,16 +1,21 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use asynchronous_codec::{Decoder, Encoder}; -use bytes::{Bytes, BytesMut}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use bytes::{Buf, BufMut, BytesMut}; +use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; +use std::io; use std::marker::PhantomData; -use unsigned_varint::codec::UviBytes; + +mod generated; + +#[doc(hidden)] // NOT public API. Do not use. +pub use generated::test as proto; /// [`Codec`] implements [`Encoder`] and [`Decoder`], uses [`unsigned_varint`] /// to prefix messages with their length and uses [`quick_protobuf`] and a provided /// `struct` implementing [`MessageRead`] and [`MessageWrite`] to do the encoding. pub struct Codec { - uvi: UviBytes, + max_message_len_bytes: usize, phantom: PhantomData<(In, Out)>, } @@ -21,30 +26,44 @@ impl Codec { /// Protobuf message. The limit does not include the bytes needed for the /// [`unsigned_varint`]. pub fn new(max_message_len_bytes: usize) -> Self { - let mut uvi = UviBytes::default(); - uvi.set_max_len(max_message_len_bytes); Self { - uvi, + max_message_len_bytes, phantom: PhantomData, } } } impl Encoder for Codec { - type Item = In; + type Item<'a> = In; type Error = Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { - let mut encoded_msg = Vec::new(); - let mut writer = Writer::new(&mut encoded_msg); - item.write_message(&mut writer) - .expect("Encoding to succeed"); - self.uvi.encode(Bytes::from(encoded_msg), dst)?; + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + write_length(&item, dst); + write_message(&item, dst)?; Ok(()) } } +/// Write the message's length (i.e. `size`) to `dst` as a variable-length integer. +fn write_length(message: &impl MessageWrite, dst: &mut BytesMut) { + let message_length = message.get_size(); + + let mut uvi_buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(message_length, &mut uvi_buf); + + dst.extend_from_slice(encoded_length); +} + +/// Write the message itself to `dst`. +fn write_message(item: &impl MessageWrite, dst: &mut BytesMut) -> io::Result<()> { + let mut writer = Writer::new(BytesMutWriterBackend::new(dst)); + item.write_message(&mut writer) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + Ok(()) +} + impl Decoder for Codec where Out: for<'a> MessageRead<'a>, @@ -53,24 +72,203 @@ where type Error = Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let msg = match self.uvi.decode(src)? { - None => return Ok(None), - Some(msg) => msg, + let (message_length, remaining) = match unsigned_varint::decode::usize(src) { + Ok((len, remaining)) => (len, remaining), + Err(unsigned_varint::decode::Error::Insufficient) => return Ok(None), + Err(e) => return Err(Error(io::Error::new(io::ErrorKind::InvalidData, e))), }; - let mut reader = BytesReader::from_bytes(&msg); - let message = Self::Item::from_reader(&mut reader, &msg) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + if message_length > self.max_message_len_bytes { + return Err(Error(io::Error::new( + io::ErrorKind::PermissionDenied, + format!( + "message with {message_length}b exceeds maximum of {}b", + self.max_message_len_bytes + ), + ))); + } + + // Compute how many bytes the varint itself consumed. + let varint_length = src.len() - remaining.len(); + + // Ensure we can read an entire message. + if src.len() < (message_length + varint_length) { + return Ok(None); + } + + // Safe to advance buffer now. + src.advance(varint_length); + + let message = src.split_to(message_length); + + let mut reader = BytesReader::from_bytes(&message); + let message = Self::Item::from_reader(&mut reader, &message) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(Some(message)) } } +struct BytesMutWriterBackend<'a> { + dst: &'a mut BytesMut, +} + +impl<'a> BytesMutWriterBackend<'a> { + fn new(dst: &'a mut BytesMut) -> Self { + Self { dst } + } +} + +impl<'a> WriterBackend for BytesMutWriterBackend<'a> { + fn pb_write_u8(&mut self, x: u8) -> quick_protobuf::Result<()> { + self.dst.put_u8(x); + + Ok(()) + } + + fn pb_write_u32(&mut self, x: u32) -> quick_protobuf::Result<()> { + self.dst.put_u32_le(x); + + Ok(()) + } + + fn pb_write_i32(&mut self, x: i32) -> quick_protobuf::Result<()> { + self.dst.put_i32_le(x); + + Ok(()) + } + + fn pb_write_f32(&mut self, x: f32) -> quick_protobuf::Result<()> { + self.dst.put_f32_le(x); + + Ok(()) + } + + fn pb_write_u64(&mut self, x: u64) -> quick_protobuf::Result<()> { + self.dst.put_u64_le(x); + + Ok(()) + } + + fn pb_write_i64(&mut self, x: i64) -> quick_protobuf::Result<()> { + self.dst.put_i64_le(x); + + Ok(()) + } + + fn pb_write_f64(&mut self, x: f64) -> quick_protobuf::Result<()> { + self.dst.put_f64_le(x); + + Ok(()) + } + + fn pb_write_all(&mut self, buf: &[u8]) -> quick_protobuf::Result<()> { + self.dst.put_slice(buf); + + Ok(()) + } +} + #[derive(thiserror::Error, Debug)] #[error("Failed to encode/decode message")] -pub struct Error(#[from] std::io::Error); +pub struct Error(#[from] io::Error); -impl From for std::io::Error { +impl From for io::Error { fn from(e: Error) -> Self { e.0 } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto; + use asynchronous_codec::FramedRead; + use futures::io::Cursor; + use futures::{FutureExt, StreamExt}; + use quickcheck::{Arbitrary, Gen, QuickCheck}; + use std::error::Error; + + #[test] + fn honors_max_message_length() { + let codec = Codec::::new(1); + let mut src = varint_zeroes(100); + + let mut read = FramedRead::new(Cursor::new(&mut src), codec); + let err = read.next().now_or_never().unwrap().unwrap().unwrap_err(); + + assert_eq!( + err.source().unwrap().to_string(), + "message with 100b exceeds maximum of 1b" + ) + } + + #[test] + fn empty_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let mut src = varint_zeroes(100); + src.truncate(50); + + let result = codec.decode(&mut src); + + assert!(result.unwrap().is_none()); + assert_eq!( + src.len(), + 50, + "to not modify `src` if we cannot read a full message" + ) + } + + #[test] + fn only_partial_message_in_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let result = codec.decode(&mut BytesMut::new()); + + assert!(result.unwrap().is_none()); + } + + #[test] + fn handles_arbitrary_initial_capacity() { + fn prop(message: proto::Message, initial_capacity: u16) { + let mut buffer = BytesMut::with_capacity(initial_capacity as usize); + let mut codec = Codec::::new(u32::MAX as usize); + + codec.encode(message.clone(), &mut buffer).unwrap(); + let decoded = codec.decode(&mut buffer).unwrap().unwrap(); + + assert_eq!(message, decoded); + } + + QuickCheck::new().quickcheck(prop as fn(_, _) -> _) + } + + /// Constructs a [`BytesMut`] of the provided length where the message is all zeros. + fn varint_zeroes(length: usize) -> BytesMut { + let mut buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(length, &mut buf); + + let mut src = BytesMut::new(); + src.extend_from_slice(encoded_length); + src.extend(std::iter::repeat(0).take(length)); + src + } + + impl Arbitrary for proto::Message { + fn arbitrary(g: &mut Gen) -> Self { + Self { + data: Vec::arbitrary(g), + } + } + } + + #[derive(Debug)] + struct Dummy; + + impl<'a> MessageRead<'a> for Dummy { + fn from_reader(_: &mut BytesReader, _: &'a [u8]) -> quick_protobuf::Result { + todo!() + } + } +} diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs new file mode 100644 index 000000000000..65dafe065d10 --- /dev/null +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -0,0 +1,16 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use quick_protobuf_codec::proto; +use quick_protobuf_codec::Codec; + +#[test] +fn encode_large_message() { + let mut codec = Codec::::new(1_001_000); + let mut dst = BytesMut::new(); + dst.reserve(1_001_000); + let message = proto::Message { + data: vec![0; 1_000_000], + }; + + codec.encode(message, &mut dst).unwrap(); +} diff --git a/misc/quickcheck-ext/Cargo.toml b/misc/quickcheck-ext/Cargo.toml index 65e45e47ab8e..9fe3cbf25c1f 100644 --- a/misc/quickcheck-ext/Cargo.toml +++ b/misc/quickcheck-ext/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" publish = false license = "Unlicense/MIT" +[package.metadata.release] +release = false + [dependencies] quickcheck = "1" num-traits = "0.2" diff --git a/misc/rw-stream-sink/Cargo.toml b/misc/rw-stream-sink/Cargo.toml index f1537e9e7ac7..f8f103bd6d90 100644 --- a/misc/rw-stream-sink/Cargo.toml +++ b/misc/rw-stream-sink/Cargo.toml @@ -11,8 +11,8 @@ keywords = ["networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" -pin-project = "1.1.3" +futures = "0.3.30" +pin-project = "1.1.4" static_assertions = "1" [dev-dependencies] diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md index 5fd4313f35cf..484964e27e95 100644 --- a/misc/server/CHANGELOG.md +++ b/misc/server/CHANGELOG.md @@ -1,10 +1,25 @@ -# Changelog -All notable changes to this project will be documented in this file. +## 0.12.6 -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +### Changed +- Stop using kad default protocol. + See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122) + +## 0.12.5 + +### Added + +- Add `/wss` support. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + +## 0.12.4 + +### Added + +- Expose `libp2p_bandwidth_bytes` Prometheus metrics. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.12.3 -## [0.12.3] ### Changed - Add libp2p-lookup to Dockerfile to enable healthchecks. @@ -16,14 +31,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [PR 4467]: https://github.com/libp2p/rust-libp2p/pull/4467 -## [0.12.2] +## 0.12.2 ### Fixed - Adhere to `--metrics-path` flag and listen on `0.0.0.0:8888` (default IPFS metrics port). [PR 4392] [PR 4392]: https://github.com/libp2p/rust-libp2p/pull/4392 -## [0.12.1] +## 0.12.1 ### Changed - Move to tokio and hyper. See [PR 4311]. @@ -32,40 +47,40 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [PR 4311]: https://github.com/libp2p/rust-libp2p/pull/4311 -## [0.8.0] +## 0.8.0 ### Changed - Remove mplex support. -## [0.7.0] +## 0.7.0 ### Changed - Update to libp2p v0.47.0. -## [0.6.0] - [2022-05-05] +## 0.6.0 - 2022-05-05 ### Changed - Update to libp2p v0.44.0. -## [0.5.4] - [2022-01-11] +## 0.5.4 - 2022-01-11 ### Changed - Pull latest autonat changes. -## [0.5.3] - [2021-12-25] +## 0.5.3 - 2021-12-25 ### Changed - Update dependencies. - Pull in autonat fixes. -## [0.5.2] - [2021-12-20] +## 0.5.2 - 2021-12-20 ### Added - Add support for libp2p autonat protocol via `--enable-autonat`. -## [0.5.1] - [2021-12-20] +## 0.5.1 - 2021-12-20 ### Fixed - Update dependencies. - Fix typo in command line flag `--enable-kademlia`. -## [0.5.0] - 2021-11-18 +## 0.5.0 - 2021-11-18 ### Changed - Disable Kademlia protocol by default. -## [0.4.0] - 2021-11-18 +## 0.4.0 - 2021-11-18 ### Fixed - Update dependencies. diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index e0d7af3a823a..9653ddc5d761 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-server" -version = "0.12.3" +version = "0.12.6" authors = ["Max Inden "] edition = "2021" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,18 +12,18 @@ license = "MIT" [dependencies] base64 = "0.21" -clap = { version = "4.3.12", features = ["derive"] } -env_logger = "0.10.0" +clap = { version = "4.4.16", features = ["derive"] } futures = "0.3" futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } -log = "0.4" -prometheus-client = "0.21.2" -serde = "1.0.188" +libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic", "websocket"] } +prometheus-client = { workspace = true } +serde = "1.0.196" serde_derive = "1.0.125" serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } zeroize = "1" [lints] diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile index 72641cc3b2e4..9d2742f97e85 100644 --- a/misc/server/Dockerfile +++ b/misc/server/Dockerfile @@ -1,19 +1,20 @@ -FROM rust:1.72-bullseye as builder -WORKDIR /usr/src/rust-libp2p-server +# syntax=docker/dockerfile:1.5-labs +FROM rust:1.73.0 as chef +RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin +RUN cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 +WORKDIR /app -# Run with access to the target cache to speed up builds -WORKDIR /workspace +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 - -ADD . . -RUN --mount=type=cache,target=./target \ - --mount=type=cache,target=/usr/local/cargo/registry \ - cargo build --release --package libp2p-server - -RUN --mount=type=cache,target=./target \ - mv ./target/release/libp2p-server /usr/local/bin/libp2p-server +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --package libp2p-server --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --package libp2p-server FROM gcr.io/distroless/cc COPY --from=builder /usr/local/bin/libp2p-server /usr/local/bin/libp2p-lookup /usr/local/bin/ diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs index 2f7741b9317e..36b18c9798d7 100644 --- a/misc/server/src/behaviour.rs +++ b/misc/server/src/behaviour.rs @@ -4,7 +4,8 @@ use libp2p::kad; use libp2p::ping; use libp2p::relay; use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::{identity, swarm::NetworkBehaviour, Multiaddr, PeerId}; +use libp2p::swarm::{NetworkBehaviour, StreamProtocol}; +use libp2p::{identity, Multiaddr, PeerId}; use std::str::FromStr; use std::time::Duration; @@ -15,12 +16,14 @@ const BOOTNODES: [&str; 4] = [ "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", ]; +const IPFS_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); + #[derive(NetworkBehaviour)] pub(crate) struct Behaviour { relay: relay::Behaviour, ping: ping::Behaviour, identify: identify::Behaviour, - pub(crate) kademlia: Toggle>, + pub(crate) kademlia: Toggle>, autonat: Toggle, } @@ -31,7 +34,7 @@ impl Behaviour { enable_autonat: bool, ) -> Self { let kademlia = if enable_kademlia { - let mut kademlia_config = kad::Config::default(); + let mut kademlia_config = kad::Config::new(IPFS_PROTO_NAME); // Instantly remove records and provider records. // // TODO: Replace hack with option to disable both. @@ -39,7 +42,7 @@ impl Behaviour { kademlia_config.set_provider_record_ttl(Some(Duration::from_secs(0))); let mut kademlia = kad::Behaviour::with_config( pub_key.to_peer_id(), - kad::record::store::MemoryStore::new(pub_key.to_peer_id()), + kad::store::MemoryStore::new(pub_key.to_peer_id()), kademlia_config, ); let bootaddr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io").unwrap(); diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index 1f5ebaff5930..7905933fbf58 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::info; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -38,11 +37,7 @@ pub(crate) async fn metrics_server( let addr = ([0, 0, 0, 0], 8888).into(); let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone())); - info!( - "Metrics server on http://{}{}", - server.local_addr(), - metrics_path - ); + tracing::info!(metrics_server=%format!("http://{}{}", server.local_addr(), metrics_path)); server.await?; Ok(()) } diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index e885301d5908..16e6530e9462 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -1,31 +1,22 @@ use base64::Engine; use clap::Parser; -use futures::future::Either; use futures::stream::StreamExt; use futures_timer::Delay; -use libp2p::core::muxing::StreamMuxerBox; -use libp2p::core::upgrade; -use libp2p::dns; -use libp2p::identify; use libp2p::identity; use libp2p::identity::PeerId; use libp2p::kad; use libp2p::metrics::{Metrics, Recorder}; -use libp2p::noise; -use libp2p::quic; -use libp2p::swarm::{SwarmBuilder, SwarmEvent}; +use libp2p::swarm::SwarmEvent; use libp2p::tcp; -use libp2p::yamux; -use libp2p::Transport; -use log::{debug, info, warn}; +use libp2p::{identify, noise, yamux}; use prometheus_client::metrics::info::Info; use prometheus_client::registry::Registry; use std::error::Error; -use std::io; use std::path::PathBuf; use std::str::FromStr; use std::task::Poll; use std::time::Duration; +use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; mod behaviour; @@ -56,13 +47,17 @@ struct Opts { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opts::parse(); let config = Zeroizing::new(config::Config::from_file(opt.config.as_path())?); - let (local_peer_id, local_keypair) = { + let mut metric_registry = Registry::default(); + + let local_keypair = { let keypair = identity::Keypair::from_protobuf_encoding(&Zeroizing::new( base64::engine::general_purpose::STANDARD .decode(config.identity.priv_key.as_bytes())?, @@ -75,63 +70,50 @@ async fn main() -> Result<(), Box> { "Expect peer id derived from private key and peer id retrieved from config to match." ); - (peer_id, keypair) - }; - - let transport = { - let tcp_transport = - tcp::tokio::Transport::new(tcp::Config::new().port_reuse(true).nodelay(true)) - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&local_keypair)?) - .multiplex(yamux::Config::default()) - .timeout(Duration::from_secs(20)); - - let quic_transport = quic::tokio::Transport::new(quic::Config::new(&local_keypair)); - - dns::tokio::Transport::system(libp2p::core::transport::OrTransport::new( - quic_transport, - tcp_transport, - ))? - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed() + keypair }; - let behaviour = behaviour::Behaviour::new( - local_keypair.public(), - opt.enable_kademlia, - opt.enable_autonat, - ); - let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_keypair) + .with_tokio() + .with_tcp( + tcp::Config::default().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_websocket(noise::Config::new, yamux::Config::default) + .await? + .with_bandwidth_metrics(&mut metric_registry) + .with_behaviour(|key| { + behaviour::Behaviour::new(key.public(), opt.enable_kademlia, opt.enable_autonat) + })? + .build(); if config.addresses.swarm.is_empty() { - warn!("No listen addresses configured."); + tracing::warn!("No listen addresses configured"); } for address in &config.addresses.swarm { match swarm.listen_on(address.clone()) { Ok(_) => {} Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => { - warn!("Failed to listen on {address}, continuing anyways, {e}") + tracing::warn!(%address, "Failed to listen on address, continuing anyways, {e}") } Err(e) => return Err(e.into()), } } if config.addresses.append_announce.is_empty() { - warn!("No external addresses configured."); + tracing::warn!("No external addresses configured"); } for address in &config.addresses.append_announce { swarm.add_external_address(address.clone()) } - info!( + tracing::info!( "External addresses: {:?}", swarm.external_addresses().collect::>() ); - let mut metric_registry = Registry::default(); let metrics = Metrics::new(&mut metric_registry); let build_info = Info::new(vec![("version".to_string(), env!("CARGO_PKG_VERSION"))]); metric_registry.register( @@ -141,7 +123,7 @@ async fn main() -> Result<(), Box> { ); tokio::spawn(async move { if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await { - log::error!("Metrics server failed: {e}"); + tracing::error!("Metrics server failed: {e}"); } }); @@ -161,7 +143,7 @@ async fn main() -> Result<(), Box> { metrics.record(&event); match event { SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e); if let identify::Event::Received { @@ -186,24 +168,24 @@ async fn main() -> Result<(), Box> { } } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e) } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); // TODO: Add metric recording for `NatStatus`. // metrics.record(&e) } SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address:?}"); + tracing::info!(%address, "Listening on address"); } _ => {} } diff --git a/misc/webrtc-utils/CHANGELOG.md b/misc/webrtc-utils/CHANGELOG.md index c3485aa1dbf4..6949113a3771 100644 --- a/misc/webrtc-utils/CHANGELOG.md +++ b/misc/webrtc-utils/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.0 + +- Update to latest version of `libp2p-noise`. + See [PR 4968](https://github.com/libp2p/rust-libp2p/pull/4968). + ## 0.1.0 - Initial release. diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 4401ef9bc449..7173dedae7be 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -7,17 +7,17 @@ license = "MIT" name = "libp2p-webrtc-utils" repository = "https://github.com/libp2p/rust-libp2p" rust-version = { workspace = true } -version = "0.1.0" +version = "0.2.0" publish = true [dependencies] +asynchronous-codec = { workspace = true } bytes = "1" futures = "0.3" hex = "0.4" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.19" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" @@ -25,11 +25,10 @@ serde = { version = "1.0", features = ["derive"] } sha2 = "0.10.8" thiserror = "1" tinytemplate = "1.2" -asynchronous-codec = "0.6" +tracing = "0.1.37" [dev-dependencies] hex-literal = "0.4" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [lints] workspace = true diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs index 023766bc1df9..9180acfc1ca4 100644 --- a/misc/webrtc-utils/src/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -19,19 +19,22 @@ // DEALINGS IN THE SOFTWARE. use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; use crate::fingerprint::Fingerprint; +pub use noise::Error; + pub async fn inbound( id_keys: identity::Keypair, stream: T, client_fingerprint: Fingerprint, server_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -53,7 +56,7 @@ pub async fn outbound( stream: T, server_fingerprint: Fingerprint, client_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 7c4facaf27e3..0796548f4490 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -34,7 +34,7 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & client_ufrag, ); - log::trace!("Created SDP answer: {answer}"); + tracing::trace!(%answer, "Created SDP answer"); answer } diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs index a6de759a4125..0e1496eb6402 100644 --- a/misc/webrtc-utils/src/stream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -260,10 +260,9 @@ where #[cfg(test)] mod tests { use super::*; + use crate::stream::framed_dc::codec; use asynchronous_codec::Encoder; use bytes::BytesMut; - use quick_protobuf::{MessageWrite, Writer}; - use unsigned_varint::codec::UviBytes; #[test] fn max_data_len() { @@ -275,21 +274,13 @@ mod tests { message: Some(message.to_vec()), }; - let mut encoded_msg = Vec::new(); - let mut writer = Writer::new(&mut encoded_msg); - protobuf - .write_message(&mut writer) - .expect("Encoding to succeed"); - assert_eq!(encoded_msg.len(), message.len() + PROTO_OVERHEAD); + let mut codec = codec(); - let mut uvi = UviBytes::default(); let mut dst = BytesMut::new(); - uvi.encode(encoded_msg.as_slice(), &mut dst).unwrap(); + codec.encode(protobuf, &mut dst).unwrap(); // Ensure the varint prefixed and protobuf encoded largest message is no longer than the // maximum limit specified in the libp2p WebRTC specification. assert_eq!(dst.len(), MAX_MSG_LEN); - - assert_eq!(dst.len() - encoded_msg.len(), VARINT_LEN); } } diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index b638ea84b09f..9745e3d43640 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -79,7 +79,7 @@ where return Poll::Ready(Ok(())); } Poll::Ready(Err(Canceled)) => { - log::info!("Stream dropped without graceful close, sending Reset"); + tracing::info!("Stream dropped without graceful close, sending Reset"); *state = State::SendingReset { stream }; continue; } diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs index 4409b79a0ed0..721178fdcd3d 100644 --- a/misc/webrtc-utils/src/stream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -29,12 +29,13 @@ pub(crate) fn new(inner: T) -> FramedDc where T: AsyncRead + AsyncWrite, { - let mut framed = Framed::new( - inner, - quick_protobuf_codec::Codec::new(MAX_MSG_LEN - VARINT_LEN), - ); + let mut framed = Framed::new(inner, codec()); // If not set, `Framed` buffers up to 131kB of data before sending, which leads to "outbound // packet larger than maximum message size" error in webrtc-rs. framed.set_send_high_water_mark(MAX_DATA_LEN); framed } + +pub(crate) fn codec() -> quick_protobuf_codec::Codec { + quick_protobuf_codec::Codec::new(MAX_MSG_LEN - VARINT_LEN) +} diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 41fc39625803..48ab616e1310 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,4 +1,9 @@ -## 0.40.0 +## 0.41.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index aca3ec6eadf5..8de1d66c7d02 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-mplex" edition = "2021" rust-version = { workspace = true } description = "Mplex multiplexing protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,26 +12,27 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" -futures = "0.3.28" -asynchronous-codec = "0.6" +futures = "0.3.30" +asynchronous-codec = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" rand = "0.8" -smallvec = "1.11.1" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +smallvec = "1.12.0" +tracing = "0.1.37" +unsigned-varint = { workspace = true, features = ["asynchronous_codec"] } [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } criterion = "0.5" -env_logger = "0.10" futures = "0.3" +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../test-harness" } libp2p-plaintext = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[bench]] name = "split_send_size" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 86f84ceab2c5..0125d49dcef7 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -35,6 +35,7 @@ use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; use std::pin::Pin; use std::time::Duration; +use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -51,7 +52,9 @@ const BENCH_SIZES: [usize; 8] = [ ]; fn prepare(c: &mut Criterion) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let payload: Vec = vec![1; 1024 * 1024]; @@ -98,7 +101,7 @@ fn prepare(c: &mut Criterion) { fn run( receiver_trans: &mut BenchTransport, sender_trans: &mut BenchTransport, - payload: &Vec, + payload: &[u8], listen_addr: &Multiaddr, ) { receiver_trans diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index ec605edc6a7d..014ee8992806 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -285,10 +285,10 @@ impl Decoder for Codec { } impl Encoder for Codec { - type Item = Frame; + type Item<'a> = Frame; type Error = io::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { let (header, data) = match item { Frame::Open { stream_id } => (stream_id.num << 3, Bytes::new()), Frame::Data { diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 8002ad383d67..50fc0fc1d3f2 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -24,7 +24,6 @@ use asynchronous_codec::Framed; use bytes::Bytes; use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; use futures::{prelude::*, ready, stream::Fuse}; -use log::{debug, trace}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; @@ -117,7 +116,7 @@ where /// Creates a new multiplexed I/O stream. pub(crate) fn new(io: C, config: MplexConfig) -> Self { let id = ConnectionId(rand::random()); - debug!("New multiplexed connection: {}", id); + tracing::debug!(connection=%id, "New multiplexed connection"); Multiplexed { id, config, @@ -225,7 +224,7 @@ where // yield to give the current task a chance to read // from the respective substreams. if num_buffered == self.config.max_buffer_len { - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); return Poll::Pending; } @@ -254,9 +253,11 @@ where // Check the stream limits. if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams reached ({})", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + total_substreams=%self.substreams.len(), + max_substreams=%self.config.max_substreams, + "Maximum number of substreams reached" ); self.notifier_open.register(cx.waker()); return Poll::Pending; @@ -276,11 +277,11 @@ where buf: Default::default(), }, ); - debug!( - "{}: New outbound substream: {} (total {})", - self.id, - stream_id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%stream_id, + total_substreams=%self.substreams.len(), + "New outbound substream" ); // The flush is delayed and the `Open` frame may be sent // together with other frames in the same transport packet. @@ -348,7 +349,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending close for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending close for substream" + ); self.pending_frames .push_front(Frame::Close { stream_id: id }); } @@ -356,7 +361,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending reset for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending reset for substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -447,7 +456,7 @@ where // next frame for `id`, yield to give the current task // a chance to read from the other substream(s). if num_buffered == self.config.max_buffer_len { - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); return Poll::Pending; } @@ -476,11 +485,11 @@ where frame @ Frame::Open { .. } => { if let Some(id) = self.on_open(frame.remote_id())? { self.open_buffer.push_front(id); - trace!( - "{}: Buffered new inbound stream {} (total: {})", - self.id, - id, - self.open_buffer.len() + tracing::trace!( + connection=%self.id, + inbound_stream=%id, + inbound_buffer_len=%self.open_buffer.len(), + "Buffered new inbound stream" ); self.notifier_read.wake_next_stream(); } @@ -516,7 +525,11 @@ where self.guard_open()?; ready!(self.poll_flush(cx))?; - trace!("{}: Flushed substream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Flushed substream" + ); Poll::Ready(Ok(())) } @@ -554,7 +567,11 @@ where self.substreams.insert(id, SubstreamState::Open { buf }); Poll::Pending } else { - debug!("{}: Closed substream {} (half-close)", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream (half-close)" + ); self.substreams .insert(id, SubstreamState::SendClosed { buf }); Poll::Ready(Ok(())) @@ -569,7 +586,11 @@ where .insert(id, SubstreamState::RecvClosed { buf }); Poll::Pending } else { - debug!("{}: Closed substream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream" + ); self.substreams.insert(id, SubstreamState::Closed { buf }); Poll::Ready(Ok(())) } @@ -589,7 +610,7 @@ where match ready!(self.io.poll_ready_unpin(&mut Context::from_waker(&waker))) { Ok(()) => { let frame = frame(); - trace!("{}: Sending {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Sending frame"); match self.io.start_send_unpin(frame) { Ok(()) => Poll::Ready(Ok(())), Err(e) => Poll::Ready(self.on_error(e)), @@ -618,7 +639,11 @@ where // Perform any pending flush before reading. if let Some(id) = &stream_id { if self.pending_flush_open.contains(id) { - trace!("{}: Executing pending flush for {}.", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Executing pending flush for substream" + ); ready!(self.poll_flush(cx))?; self.pending_flush_open = Default::default(); } @@ -634,11 +659,11 @@ where if !self.notifier_read.wake_read_stream(*blocked_id) { // No task dedicated to the blocked stream woken, so schedule // this task again to have a chance at progress. - trace!( - "{}: No task to read from blocked stream. Waking current task.", - self.id + tracing::trace!( + connection=%self.id, + "No task to read from blocked stream. Waking current task." ); - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); } else if let Some(id) = stream_id { // We woke some other task, but are still interested in // reading `Data` frames from the current stream when unblocked. @@ -664,7 +689,7 @@ where }; match ready!(self.io.poll_next_unpin(&mut Context::from_waker(&waker))) { Some(Ok(frame)) => { - trace!("{}: Received {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Received frame"); Poll::Ready(Ok(frame)) } Some(Err(e)) => Poll::Ready(self.on_error(e)), @@ -677,9 +702,10 @@ where let id = id.into_local(); if self.substreams.contains_key(&id) { - debug!( - "{}: Received unexpected `Open` frame for open substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Received unexpected `Open` frame for open substream", ); return self.on_error(io::Error::new( io::ErrorKind::Other, @@ -688,12 +714,17 @@ where } if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams exceeded: {}", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + max_substreams=%self.config.max_substreams, + "Maximum number of substreams exceeded" ); self.check_max_pending_frames()?; - debug!("{}: Pending reset for new stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for new substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); return Ok(None); @@ -706,11 +737,11 @@ where }, ); - debug!( - "{}: New inbound substream: {} (total {})", - self.id, - id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%id, + total_substreams=%self.substreams.len(), + "New inbound substream" ); Ok(Some(id)) @@ -721,23 +752,27 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::Closed { .. } => { - trace!( - "{}: Ignoring reset for mutually closed substream {}.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring reset for mutually closed substream" ); } SubstreamState::Reset { .. } => { - trace!( - "{}: Ignoring redundant reset for already reset substream {}", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring redundant reset for already reset substream" ); } SubstreamState::RecvClosed { buf } | SubstreamState::SendClosed { buf } | SubstreamState::Open { buf } => { - debug!("{}: Substream {} reset by remote.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream reset by remote" + ); self.substreams.insert(id, SubstreamState::Reset { buf }); // Notify tasks interested in reading from that stream, // so they may read the EOF. @@ -745,10 +780,10 @@ where } } } else { - trace!( - "{}: Ignoring `Reset` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Reset` for unknown substream, possibly dropped earlier" ); } } @@ -758,32 +793,36 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::RecvClosed { .. } | SubstreamState::Closed { .. } => { - debug!( - "{}: Ignoring `Close` frame for closed substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for closed substream" ); self.substreams.insert(id, state); } SubstreamState::Reset { buf } => { - debug!( - "{}: Ignoring `Close` frame for already reset substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for already reset substream" ); self.substreams.insert(id, SubstreamState::Reset { buf }); } SubstreamState::SendClosed { buf } => { - debug!( - "{}: Substream {} closed by remote (SendClosed -> Closed).", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (SendClosed -> Closed)" ); self.substreams.insert(id, SubstreamState::Closed { buf }); // Notify tasks interested in reading, so they may read the EOF. self.notifier_read.wake_read_stream(id); } SubstreamState::Open { buf } => { - debug!( - "{}: Substream {} closed by remote (Open -> RecvClosed)", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (Open -> RecvClosed)" ); self.substreams .insert(id, SubstreamState::RecvClosed { buf }); @@ -792,10 +831,10 @@ where } } } else { - trace!( - "{}: Ignoring `Close` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Close` for unknown substream, possibly dropped earlier." ); } } @@ -829,7 +868,11 @@ where /// Records a fatal error for the multiplexed I/O stream. fn on_error(&mut self, e: io::Error) -> io::Result { - debug!("{}: Multiplexed connection failed: {:?}", self.id, e); + tracing::debug!( + connection=%self.id, + "Multiplexed connection failed: {:?}", + e + ); self.status = Status::Err(io::Error::new(e.kind(), e.to_string())); self.pending_frames = Default::default(); self.substreams = Default::default(); @@ -869,48 +912,52 @@ where /// Fails the entire multiplexed stream if too many pending `Reset` /// frames accumulate when using [`MaxBufferBehaviour::ResetStream`]. fn buffer(&mut self, id: LocalStreamId, data: Bytes) -> io::Result<()> { - let state = if let Some(state) = self.substreams.get_mut(&id) { - state - } else { - trace!( - "{}: Dropping data {:?} for unknown substream {}", - self.id, - data, - id + let Some(state) = self.substreams.get_mut(&id) else { + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for unknown substream" ); return Ok(()); }; - let buf = if let Some(buf) = state.recv_buf_open() { - buf - } else { - trace!( - "{}: Dropping data {:?} for closed or reset substream {}", - self.id, - data, - id + let Some(buf) = state.recv_buf_open() else { + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for closed or reset substream", ); return Ok(()); }; debug_assert!(buf.len() <= self.config.max_buffer_len); - trace!( - "{}: Buffering {:?} for stream {} (total: {})", - self.id, - data, - id, - buf.len() + 1 + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + data_buffer=%buf.len() + 1, + "Buffering data for substream" ); buf.push(data); self.notifier_read.wake_read_stream(id); if buf.len() > self.config.max_buffer_len { - debug!("{}: Frame buffer of stream {} is full.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Frame buffer of substream is full" + ); match self.config.max_buffer_behaviour { MaxBufferBehaviour::ResetStream => { let buf = buf.clone(); self.check_max_pending_frames()?; self.substreams.insert(id, SubstreamState::Reset { buf }); - debug!("{}: Pending reset for stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for stream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -1179,7 +1226,10 @@ mod tests { #[test] fn max_buffer_behaviour() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, overflow: NonZeroU8) { let mut r_buf = BytesMut::new(); @@ -1314,7 +1364,10 @@ mod tests { #[test] fn close_on_error() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, num_streams: NonZeroU8) { let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize); diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index 81c5147af69d..c67e0e3baeca 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -32,7 +32,7 @@ use bytes::Bytes; use codec::LocalStreamId; use futures::{future, prelude::*, ready}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; use parking_lot::Mutex; use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; @@ -45,7 +45,7 @@ impl UpgradeInfo for MplexConfig { } } -impl InboundUpgrade for MplexConfig +impl InboundConnectionUpgrade for MplexConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -61,7 +61,7 @@ where } } -impl OutboundUpgrade for MplexConfig +impl OutboundConnectionUpgrade for MplexConfig where C: AsyncRead + AsyncWrite + Unpin, { diff --git a/muxers/test-harness/Cargo.toml b/muxers/test-harness/Cargo.toml index 5304509b9cc8..7aad5f1985a7 100644 --- a/muxers/test-harness/Cargo.toml +++ b/muxers/test-harness/Cargo.toml @@ -5,14 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] libp2p-core = { workspace = true } -futures = "0.3.28" -log = "0.4" +futures = "0.3.30" futures-timer = "3.0.2" futures_ringbuf = "0.4.0" +tracing = "0.1.37" [lints] workspace = true diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index 544e057c1085..16c71f414f0c 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -3,7 +3,8 @@ use futures::{future, AsyncRead, AsyncWrite}; use futures::{AsyncReadExt, Stream}; use futures::{AsyncWriteExt, StreamExt}; use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, StreamMuxer, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::{StreamMuxer, UpgradeInfo}; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; @@ -12,15 +13,15 @@ use std::{fmt, mem}; pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M) where - MC: InboundUpgrade - + OutboundUpgrade + MC: InboundConnectionUpgrade + + OutboundConnectionUpgrade + Send + 'static + Default, ::Info: Send, <::InfoIter as IntoIterator>::IntoIter: Send, - >::Future: Send, - >::Future: Send, + >::Future: Send, + >::Future: Send, E: std::error::Error + Send + Sync + 'static, { let (alice, bob) = futures_ringbuf::Endpoint::pair(100, 100); @@ -148,20 +149,20 @@ async fn run( loop { match futures::future::select(dialer.next(), listener.next()).await { Either::Left((Some(Event::SetupComplete), _)) => { - log::info!("Dialer opened outbound stream"); + tracing::info!("Dialer opened outbound stream"); } Either::Left((Some(Event::ProtocolComplete), _)) => { - log::info!("Dialer completed protocol"); + tracing::info!("Dialer completed protocol"); dialer_complete = true } Either::Left((Some(Event::Timeout), _)) => { panic!("Dialer protocol timed out"); } Either::Right((Some(Event::SetupComplete), _)) => { - log::info!("Listener received inbound stream"); + tracing::info!("Listener received inbound stream"); } Either::Right((Some(Event::ProtocolComplete), _)) => { - log::info!("Listener completed protocol"); + tracing::info!("Listener completed protocol"); listener_complete = true } Either::Right((Some(Event::Timeout), _)) => { diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index 92e9fbebc021..de608b195f86 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,3 +1,21 @@ +## 0.45.1 + +- Deprecate `WindowUpdateMode::on_receive`. + It does not enforce flow-control, i.e. breaks backpressure. + Use `WindowUpdateMode::on_read` instead. + See `yamux` crate version `v0.12.1` and [Yamux PR #177](https://github.com/libp2p/rust-yamux/pull/177). +- `yamux` `v0.13` enables auto-tuning for the Yamux stream receive window. + While preserving small buffers on low-latency and/or low-bandwidth connections, this change allows for high-latency and/or high-bandwidth connections to exhaust the available bandwidth on a single stream. + Have `libp2p-yamux` use `yamux` `v0.13` (new version) by default and fall back to `yamux` `v0.12` (old version) when setting any configuration options. + Thus default users benefit from the increased performance, while power users with custom configurations maintain the old behavior. + `libp2p-yamux` will switch over to `yamux` `v0.13` entirely with the next breaking release. + See [PR 4970](https://github.com/libp2p/rust-libp2p/pull/4970). + +## 0.45.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + ## 0.44.1 - Update to `yamux` `v0.12` which brings performance improvements and introduces an ACK backlog of 256 inbound streams. diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 3073c040ff30..14a5c0fe145b 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-yamux" edition = "2021" rust-version = { workspace = true } description = "Yamux multiplexing protocol for libp2p" -version = "0.44.1" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,11 +11,13 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" +either = "1" +futures = "0.3.30" libp2p-core = { workspace = true } thiserror = "1.0" -yamux = "0.12" -log = "0.4" +yamux012 = { version = "0.12.1", package = "yamux" } +yamux013 = { version = "0.13.1", package = "yamux" } +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 12e5dd8c1fff..2b5eb52a11ec 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,9 +22,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use either::Either; use futures::{future, prelude::*, ready}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; use std::collections::VecDeque; use std::io::{IoSlice, IoSliceMut}; use std::task::Waker; @@ -34,15 +35,14 @@ use std::{ task::{Context, Poll}, }; use thiserror::Error; -use yamux::ConnectionError; /// A Yamux connection. #[derive(Debug)] pub struct Muxer { - connection: yamux::Connection, + connection: Either, yamux013::Connection>, /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. /// - /// The only way how yamux can make progress is by calling [`yamux::Connection::poll_next_inbound`]. However, the + /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. @@ -65,9 +65,9 @@ where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { /// Create a new Yamux connection. - fn new(io: C, cfg: yamux::Config, mode: yamux::Mode) -> Self { + fn new(connection: Either, yamux013::Connection>) -> Self { Muxer { - connection: yamux::Connection::new(io, cfg, mode), + connection, inbound_stream_buffer: VecDeque::default(), inbound_stream_waker: None, } @@ -81,6 +81,7 @@ where type Substream = Stream; type Error = Error; + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_inbound", skip(self, cx))] fn poll_inbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -97,21 +98,31 @@ where Poll::Pending } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_outbound", skip(self, cx))] fn poll_outbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - let stream = ready!(self.connection.poll_new_outbound(cx).map_err(Error)?); - - Poll::Ready(Ok(Stream(stream))) + let stream = match self.connection.as_mut() { + Either::Left(c) => ready!(c.poll_new_outbound(cx)) + .map_err(|e| Error(Either::Left(e))) + .map(|s| Stream(Either::Left(s))), + Either::Right(c) => ready!(c.poll_new_outbound(cx)) + .map_err(|e| Error(Either::Right(e))) + .map(|s| Stream(Either::Right(s))), + }?; + Poll::Ready(Ok(stream)) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_close", skip(self, cx))] fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.connection.poll_close(cx).map_err(Error)?); - - Poll::Ready(Ok(())) + match self.connection.as_mut() { + Either::Left(c) => c.poll_close(cx).map_err(|e| Error(Either::Left(e))), + Either::Right(c) => c.poll_close(cx).map_err(|e| Error(Either::Right(e))), + } } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll", skip(self, cx))] fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -121,7 +132,10 @@ where let inbound_stream = ready!(this.poll_inner(cx))?; if this.inbound_stream_buffer.len() >= MAX_BUFFERED_INBOUND_STREAMS { - log::warn!("dropping {} because buffer is full", inbound_stream.0); + tracing::warn!( + stream=%inbound_stream.0, + "dropping stream because buffer is full" + ); drop(inbound_stream); } else { this.inbound_stream_buffer.push_back(inbound_stream); @@ -139,7 +153,7 @@ where /// A stream produced by the yamux multiplexer. #[derive(Debug)] -pub struct Stream(yamux::Stream); +pub struct Stream(Either); impl AsyncRead for Stream { fn poll_read( @@ -147,7 +161,7 @@ impl AsyncRead for Stream { cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - Pin::new(&mut self.0).poll_read(cx, buf) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_read(cx, buf)) } fn poll_read_vectored( @@ -155,7 +169,7 @@ impl AsyncRead for Stream { cx: &mut Context<'_>, bufs: &mut [IoSliceMut<'_>], ) -> Poll> { - Pin::new(&mut self.0).poll_read_vectored(cx, bufs) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_read_vectored(cx, bufs)) } } @@ -165,7 +179,7 @@ impl AsyncWrite for Stream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - Pin::new(&mut self.0).poll_write(cx, buf) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_write(cx, buf)) } fn poll_write_vectored( @@ -173,15 +187,15 @@ impl AsyncWrite for Stream { cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { - Pin::new(&mut self.0).poll_write_vectored(cx, bufs) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_write_vectored(cx, bufs)) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.0).poll_flush(cx) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_flush(cx)) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.0).poll_close(cx) + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_close(cx)) } } @@ -190,11 +204,16 @@ where C: AsyncRead + AsyncWrite + Unpin + 'static, { fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll> { - let stream = ready!(self.connection.poll_next_inbound(cx)) - .transpose() - .map_err(Error)? - .map(Stream) - .ok_or(Error(ConnectionError::Closed))?; + let stream = match self.connection.as_mut() { + Either::Left(c) => ready!(c.poll_next_inbound(cx)) + .ok_or(Error(Either::Left(yamux012::ConnectionError::Closed)))? + .map_err(|e| Error(Either::Left(e))) + .map(|s| Stream(Either::Left(s)))?, + Either::Right(c) => ready!(c.poll_next_inbound(cx)) + .ok_or(Error(Either::Right(yamux013::ConnectionError::Closed)))? + .map_err(|e| Error(Either::Right(e))) + .map(|s| Stream(Either::Right(s)))?, + }; Poll::Ready(Ok(stream)) } @@ -202,14 +221,33 @@ where /// The yamux configuration. #[derive(Debug, Clone)] -pub struct Config { - inner: yamux::Config, - mode: Option, +pub struct Config(Either); + +impl Default for Config { + fn default() -> Self { + Self(Either::Right(Config013::default())) + } +} + +#[derive(Debug, Clone)] +struct Config012 { + inner: yamux012::Config, + mode: Option, +} + +impl Default for Config012 { + fn default() -> Self { + let mut inner = yamux012::Config::default(); + // For conformity with mplex, read-after-close on a multiplexed + // connection is never permitted and not configurable. + inner.set_read_after_close(false); + Self { inner, mode: None } + } } /// The window update mode determines when window updates are /// sent to the remote, giving it new credit to send more data. -pub struct WindowUpdateMode(yamux::WindowUpdateMode); +pub struct WindowUpdateMode(yamux012::WindowUpdateMode); impl WindowUpdateMode { /// The window update mode whereby the remote is given @@ -224,8 +262,10 @@ impl WindowUpdateMode { /// > size must be tuned appropriately for the desired /// > throughput and level of tolerance for (temporarily) /// > slow receivers. + #[deprecated(note = "Use `WindowUpdateMode::on_read` instead.")] pub fn on_receive() -> Self { - WindowUpdateMode(yamux::WindowUpdateMode::OnReceive) + #[allow(deprecated)] + WindowUpdateMode(yamux012::WindowUpdateMode::OnReceive) } /// The window update mode whereby the remote is given new @@ -243,62 +283,71 @@ impl WindowUpdateMode { /// > **Note**: With this strategy, there is usually no point in the /// > receive buffer being larger than the window size. pub fn on_read() -> Self { - WindowUpdateMode(yamux::WindowUpdateMode::OnRead) + WindowUpdateMode(yamux012::WindowUpdateMode::OnRead) } } impl Config { /// Creates a new `YamuxConfig` in client mode, regardless of whether /// it will be used for an inbound or outbound upgrade. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn client() -> Self { - Self { - mode: Some(yamux::Mode::Client), + Self(Either::Left(Config012 { + mode: Some(yamux012::Mode::Client), ..Default::default() - } + })) } /// Creates a new `YamuxConfig` in server mode, regardless of whether /// it will be used for an inbound or outbound upgrade. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn server() -> Self { - Self { - mode: Some(yamux::Mode::Server), + Self(Either::Left(Config012 { + mode: Some(yamux012::Mode::Server), ..Default::default() - } + })) } /// Sets the size (in bytes) of the receive window per substream. + #[deprecated( + note = "Will be replaced in the next breaking release with a connection receive window size limit." + )] pub fn set_receive_window_size(&mut self, num_bytes: u32) -> &mut Self { - self.inner.set_receive_window(num_bytes); - self + self.set(|cfg| cfg.set_receive_window(num_bytes)) } /// Sets the maximum size (in bytes) of the receive buffer per substream. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn set_max_buffer_size(&mut self, num_bytes: usize) -> &mut Self { - self.inner.set_max_buffer_size(num_bytes); - self + self.set(|cfg| cfg.set_max_buffer_size(num_bytes)) } /// Sets the maximum number of concurrent substreams. pub fn set_max_num_streams(&mut self, num_streams: usize) -> &mut Self { - self.inner.set_max_num_streams(num_streams); - self + self.set(|cfg| cfg.set_max_num_streams(num_streams)) } /// Sets the window update mode that determines when the remote /// is given new credit for sending more data. + #[deprecated( + note = "`WindowUpdate::OnRead` is the default. `WindowUpdate::OnReceive` breaks backpressure, is thus not recommended, and will be removed in the next breaking release. Thus this method becomes obsolete and will be removed with the next breaking release." + )] pub fn set_window_update_mode(&mut self, mode: WindowUpdateMode) -> &mut Self { - self.inner.set_window_update_mode(mode.0); - self + self.set(|cfg| cfg.set_window_update_mode(mode.0)) } -} -impl Default for Config { - fn default() -> Self { - let mut inner = yamux::Config::default(); - // For conformity with mplex, read-after-close on a multiplexed - // connection is never permitted and not configurable. - inner.set_read_after_close(false); - Config { inner, mode: None } + fn set(&mut self, f: impl FnOnce(&mut yamux012::Config) -> &mut yamux012::Config) -> &mut Self { + let cfg012 = match self.0.as_mut() { + Either::Left(c) => &mut c.inner, + Either::Right(_) => { + self.0 = Either::Left(Config012::default()); + &mut self.0.as_mut().unwrap_left().inner + } + }; + + f(cfg012); + + self } } @@ -311,7 +360,7 @@ impl UpgradeInfo for Config { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -320,12 +369,22 @@ where type Future = future::Ready>; fn upgrade_inbound(self, io: C, _: Self::Info) -> Self::Future { - let mode = self.mode.unwrap_or(yamux::Mode::Server); - future::ready(Ok(Muxer::new(io, self.inner, mode))) + let connection = match self.0 { + Either::Left(Config012 { inner, mode }) => Either::Left(yamux012::Connection::new( + io, + inner, + mode.unwrap_or(yamux012::Mode::Server), + )), + Either::Right(Config013(cfg)) => { + Either::Right(yamux013::Connection::new(io, cfg, yamux013::Mode::Server)) + } + }; + + future::ready(Ok(Muxer::new(connection))) } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -334,21 +393,69 @@ where type Future = future::Ready>; fn upgrade_outbound(self, io: C, _: Self::Info) -> Self::Future { - let mode = self.mode.unwrap_or(yamux::Mode::Client); - future::ready(Ok(Muxer::new(io, self.inner, mode))) + let connection = match self.0 { + Either::Left(Config012 { inner, mode }) => Either::Left(yamux012::Connection::new( + io, + inner, + mode.unwrap_or(yamux012::Mode::Client), + )), + Either::Right(Config013(cfg)) => { + Either::Right(yamux013::Connection::new(io, cfg, yamux013::Mode::Client)) + } + }; + + future::ready(Ok(Muxer::new(connection))) + } +} + +#[derive(Debug, Clone)] +struct Config013(yamux013::Config); + +impl Default for Config013 { + fn default() -> Self { + let mut cfg = yamux013::Config::default(); + // For conformity with mplex, read-after-close on a multiplexed + // connection is never permitted and not configurable. + cfg.set_read_after_close(false); + Self(cfg) } } /// The Yamux [`StreamMuxer`] error type. #[derive(Debug, Error)] #[error(transparent)] -pub struct Error(yamux::ConnectionError); +pub struct Error(Either); impl From for io::Error { fn from(err: Error) -> Self { match err.0 { - yamux::ConnectionError::Io(e) => e, - e => io::Error::new(io::ErrorKind::Other, e), + Either::Left(err) => match err { + yamux012::ConnectionError::Io(e) => e, + e => io::Error::new(io::ErrorKind::Other, e), + }, + Either::Right(err) => match err { + yamux013::ConnectionError::Io(e) => e, + e => io::Error::new(io::ErrorKind::Other, e), + }, } } } + +#[cfg(test)] +mod test { + use super::*; + #[test] + fn config_set_switches_to_v012() { + // By default we use yamux v0.13. Thus we provide the benefits of yamux v0.13 to all users + // that do not depend on any of the behaviors (i.e. configuration options) of v0.12. + let mut cfg = Config::default(); + assert!(matches!( + cfg, + Config(Either::Right(Config013(yamux013::Config { .. }))) + )); + + // In case a user makes any configurations, use yamux v0.12 instead. + cfg.set_max_num_streams(42); + assert!(matches!(cfg, Config(Either::Left(Config012 { .. })))); + } +} diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index 2c7e0b211313..1259dd01fd41 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.11.0 +## 0.12.0 + +- Remove `Clone`, `PartialEq` and `Eq` implementations on `Event` and its sub-structs. + The `Event` also contains errors which are not clonable or comparable. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). + +## 0.11.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 3b81d62b516b..fce64ad0c12a 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,8 +3,8 @@ name = "libp2p-autonat" edition = "2021" rust-version = { workspace = true } description = "NAT and firewall detection for libp2p" -version = "0.11.0" authors = ["David Craven ", "Elena Frank "] +version = "0.12.0" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -19,14 +19,16 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-request-response = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" -rand = "0.8" quick-protobuf = "0.8" +rand = "0.8" +tracing = "0.1.37" +quick-protobuf-codec = { workspace = true } +asynchronous-codec = { workspace = true } [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index 439543f8318d..a770e61e88af 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -32,15 +32,12 @@ use instant::Instant; use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, ProtocolSupport, RequestId, ResponseChannel, + self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel, }; use libp2p_swarm::{ - behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, - ExternalAddrExpired, FromSwarm, - }, - ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, NewExternalAddrCandidate, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -133,7 +130,7 @@ impl ProbeId { } /// Event produced by [`Behaviour`]. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum Event { /// Event on an inbound probe. InboundProbe(InboundProbeEvent), @@ -187,14 +184,14 @@ pub struct Behaviour { PeerId, ( ProbeId, - RequestId, + InboundRequestId, Vec, ResponseChannel, ), >, // Ongoing outbound probes and mapped to the inner request id. - ongoing_outbound: HashMap, + ongoing_outbound: HashMap, // Connected peers with the observed address of each connection. // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips), @@ -220,9 +217,11 @@ pub struct Behaviour { impl Behaviour { pub fn new(local_peer_id: PeerId, config: Config) -> Self { let protocols = iter::once((DEFAULT_PROTOCOL_NAME, ProtocolSupport::Full)); - let mut cfg = request_response::Config::default(); - cfg.set_request_timeout(config.timeout); - let inner = request_response::Behaviour::with_codec(AutoNatCodec, protocols, cfg); + let inner = request_response::Behaviour::with_codec( + AutoNatCodec, + protocols, + request_response::Config::default().with_request_timeout(config.timeout), + ); Self { local_peer_id, inner, @@ -269,6 +268,7 @@ impl Behaviour { pub fn add_server(&mut self, peer: PeerId, address: Option) { self.servers.insert(peer); if let Some(addr) = address { + #[allow(deprecated)] self.inner.add_address(&peer, addr); } } @@ -361,20 +361,10 @@ impl Behaviour { ConnectionClosed { peer_id, connection_id, - endpoint, - handler, remaining_established, - }: ConnectionClosed<::ConnectionHandler>, + .. + }: ConnectionClosed, ) { - self.inner - .on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - if remaining_established == 0 { self.connected.remove(&peer_id); } else { @@ -386,20 +376,7 @@ impl Behaviour { } } - fn on_dial_failure( - &mut self, - DialFailure { - peer_id, - connection_id, - error, - }: DialFailure, - ) { - self.inner - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - })); + fn on_dial_failure(&mut self, DialFailure { peer_id, error, .. }: DialFailure) { if let Some(event) = self.as_server().on_outbound_dial_error(peer_id, error) { self.pending_actions .push_back(ToSwarm::GenerateEvent(Event::InboundProbe(event))); @@ -435,13 +412,17 @@ impl NetworkBehaviour for Behaviour { as NetworkBehaviour>::ConnectionHandler; type ToSwarm = Event; - fn poll(&mut self, cx: &mut Context<'_>, params: &mut impl PollParameters) -> Poll { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { loop { if let Some(event) = self.pending_actions.pop_front() { return Poll::Ready(event); } - match self.inner.poll(cx, params) { + match self.inner.poll(cx) { Poll::Ready(ToSwarm::GenerateEvent(event)) => { let actions = match event { request_response::Event::Message { @@ -449,14 +430,14 @@ impl NetworkBehaviour for Behaviour { .. } | request_response::Event::OutboundFailure { .. } => { - self.as_client().handle_event(params, event) + self.as_client().handle_event(event) } request_response::Event::Message { message: request_response::Message::Request { .. }, .. } | request_response::Event::InboundFailure { .. } => { - self.as_server().handle_event(params, event) + self.as_server().handle_event(event) } request_response::Event::ResponseSent { .. } => VecDeque::new(), }; @@ -536,59 +517,28 @@ impl NetworkBehaviour for Behaviour { .handle_established_outbound_connection(connection_id, peer, addr, role_override) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.listen_addresses.on_swarm_event(&event); + self.inner.on_swarm_event(event); match event { - FromSwarm::ConnectionEstablished(connection_established) => { - self.inner - .on_swarm_event(FromSwarm::ConnectionEstablished(connection_established)); - self.on_connection_established(connection_established) - } - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::AddressChange(address_change) => { - self.inner - .on_swarm_event(FromSwarm::AddressChange(address_change)); - self.on_address_change(address_change) - } - listen_addr @ FromSwarm::NewListenAddr(_) => { - self.inner.on_swarm_event(listen_addr); + FromSwarm::ConnectionEstablished(e) => self.on_connection_established(e), + FromSwarm::ConnectionClosed(e) => self.on_connection_closed(e), + FromSwarm::DialFailure(e) => self.on_dial_failure(e), + FromSwarm::AddressChange(e) => self.on_address_change(e), + FromSwarm::NewListenAddr(_) => { self.as_client().on_new_address(); } - FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { - self.inner - .on_swarm_event(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })); - self.as_client().on_expired_address(addr); - } - FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr }) => { - self.inner - .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); - self.as_client().on_expired_address(addr); - } - FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { - self.inner - .on_swarm_event(FromSwarm::NewExternalAddrCandidate( - NewExternalAddrCandidate { addr }, - )); - self.probe_address(addr.to_owned()); - } - listen_failure @ FromSwarm::ListenFailure(_) => { - self.inner.on_swarm_event(listen_failure) + FromSwarm::ExpiredListenAddr(e) => { + self.as_client().on_expired_address(e.addr); } - new_listener @ FromSwarm::NewListener(_) => self.inner.on_swarm_event(new_listener), - listener_error @ FromSwarm::ListenerError(_) => { - self.inner.on_swarm_event(listener_error) + FromSwarm::ExternalAddrExpired(e) => { + self.as_client().on_expired_address(e.addr); } - listener_closed @ FromSwarm::ListenerClosed(_) => { - self.inner.on_swarm_event(listener_closed) + FromSwarm::NewExternalAddrCandidate(e) => { + self.probe_address(e.addr.to_owned()); } - confirmed @ FromSwarm::ExternalAddrConfirmed(_) => self.inner.on_swarm_event(confirmed), + _ => {} } } @@ -609,7 +559,6 @@ type Action = ToSwarm<::ToSwarm, THandlerInEvent< trait HandleInnerEvent { fn handle_event( &mut self, - params: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque; } diff --git a/protocols/autonat/src/behaviour/as_client.rs b/protocols/autonat/src/behaviour/as_client.rs index e57523afaf8a..668f3b93719c 100644 --- a/protocols/autonat/src/behaviour/as_client.rs +++ b/protocols/autonat/src/behaviour/as_client.rs @@ -29,8 +29,8 @@ use futures_timer::Delay; use instant::Instant; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_request_response::{self as request_response, OutboundFailure, RequestId}; -use libp2p_swarm::{ConnectionId, ListenAddresses, PollParameters, ToSwarm}; +use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId}; +use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm}; use rand::{seq::SliceRandom, thread_rng}; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -39,7 +39,7 @@ use std::{ }; /// Outbound probe failed or was aborted. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundProbeError { /// Probe was aborted because no server is known, or all servers /// are throttled through [`Config::throttle_server_period`]. @@ -53,7 +53,7 @@ pub enum OutboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundProbeEvent { /// A dial-back request was sent to a remote peer. Request { @@ -91,7 +91,7 @@ pub(crate) struct AsClient<'a> { pub(crate) throttled_servers: &'a mut Vec<(PeerId, Instant)>, pub(crate) nat_status: &'a mut NatStatus, pub(crate) confidence: &'a mut usize, - pub(crate) ongoing_outbound: &'a mut HashMap, + pub(crate) ongoing_outbound: &'a mut HashMap, pub(crate) last_probe: &'a mut Option, pub(crate) schedule_probe: &'a mut Delay, pub(crate) listen_addresses: &'a ListenAddresses, @@ -101,7 +101,6 @@ pub(crate) struct AsClient<'a> { impl<'a> HandleInnerEvent for AsClient<'a> { fn handle_event( &mut self, - _: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque { match event { @@ -113,12 +112,12 @@ impl<'a> HandleInnerEvent for AsClient<'a> { response, }, } => { - log::debug!("Outbound dial-back request returned {:?}.", response); + tracing::debug!(?response, "Outbound dial-back request returned response"); let probe_id = self .ongoing_outbound .remove(&request_id) - .expect("RequestId exists."); + .expect("OutboundRequestId exists."); let event = match response.result.clone() { Ok(address) => OutboundProbeEvent::Response { @@ -155,10 +154,10 @@ impl<'a> HandleInnerEvent for AsClient<'a> { error, request_id, } => { - log::debug!( - "Outbound Failure {} when on dial-back request to peer {}.", + tracing::debug!( + %peer, + "Outbound Failure {} when on dial-back request to peer.", error, - peer ); let probe_id = self .ongoing_outbound @@ -276,16 +275,12 @@ impl<'a> AsClient<'a> { ) -> Result { let _ = self.last_probe.insert(Instant::now()); if addresses.is_empty() { - log::debug!("Outbound dial-back request aborted: No dial-back addresses."); + tracing::debug!("Outbound dial-back request aborted: No dial-back addresses"); return Err(OutboundProbeError::NoAddresses); } - let server = match self.random_server() { - Some(s) => s, - None => { - log::debug!("Outbound dial-back request aborted: No qualified server."); - return Err(OutboundProbeError::NoServer); - } - }; + + let server = self.random_server().ok_or(OutboundProbeError::NoServer)?; + let request_id = self.inner.send_request( &server, DialRequest { @@ -294,7 +289,7 @@ impl<'a> AsClient<'a> { }, ); self.throttled_servers.push((server, Instant::now())); - log::debug!("Send dial-back request to peer {}.", server); + tracing::debug!(peer=%server, "Send dial-back request to peer"); self.ongoing_outbound.insert(request_id, probe_id); Ok(server) } @@ -302,11 +297,8 @@ impl<'a> AsClient<'a> { // Set the delay to the next probe based on the time of our last probe // and the specified delay. fn schedule_next_probe(&mut self, delay: Duration) { - let last_probe_instant = match self.last_probe { - Some(instant) => instant, - None => { - return; - } + let Some(last_probe_instant) = self.last_probe else { + return; }; let schedule_next = *last_probe_instant + delay; self.schedule_probe @@ -345,10 +337,10 @@ impl<'a> AsClient<'a> { return None; } - log::debug!( - "Flipped assumed NAT status from {:?} to {:?}", - self.nat_status, - reported_status + tracing::debug!( + old_status=?self.nat_status, + new_status=?reported_status, + "Flipped assumed NAT status" ); let old_status = self.nat_status.clone(); diff --git a/protocols/autonat/src/behaviour/as_server.rs b/protocols/autonat/src/behaviour/as_server.rs index 09c70a27e937..878fd713ddaf 100644 --- a/protocols/autonat/src/behaviour/as_server.rs +++ b/protocols/autonat/src/behaviour/as_server.rs @@ -26,11 +26,11 @@ use instant::Instant; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, InboundFailure, RequestId, ResponseChannel, + self as request_response, InboundFailure, InboundRequestId, ResponseChannel, }; use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, - ConnectionId, DialError, PollParameters, ToSwarm, + ConnectionId, DialError, ToSwarm, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -38,7 +38,7 @@ use std::{ }; /// Inbound probe failed. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundProbeError { /// Receiving the dial-back request or sending a response failed. InboundRequest(InboundFailure), @@ -46,7 +46,7 @@ pub enum InboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundProbeEvent { /// A dial-back request was received from a remote peer. Request { @@ -85,7 +85,7 @@ pub(crate) struct AsServer<'a> { PeerId, ( ProbeId, - RequestId, + InboundRequestId, Vec, ResponseChannel, ), @@ -95,7 +95,6 @@ pub(crate) struct AsServer<'a> { impl<'a> HandleInnerEvent for AsServer<'a> { fn handle_event( &mut self, - _params: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque { match event { @@ -111,9 +110,9 @@ impl<'a> HandleInnerEvent for AsServer<'a> { let probe_id = self.probe_id.next(); match self.resolve_inbound_request(peer, request) { Ok(addrs) => { - log::debug!( - "Inbound dial request from Peer {} with dial-back addresses {:?}.", - peer, + tracing::debug!( + %peer, + "Inbound dial request from peer with dial-back addresses {:?}", addrs ); @@ -141,10 +140,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { ]) } Err((status_text, error)) => { - log::debug!( - "Reject inbound dial request from peer {}: {}.", - peer, - status_text + tracing::debug!( + %peer, + status=%status_text, + "Reject inbound dial request from peer" ); let response = DialResponse { @@ -168,10 +167,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { error, request_id, } => { - log::debug!( - "Inbound Failure {} when on dial-back request from peer {}.", - error, - peer + tracing::debug!( + %peer, + "Inbound Failure {} when on dial-back request from peer", + error ); let probe_id = match self.ongoing_inbound.get(&peer) { @@ -207,10 +206,10 @@ impl<'a> AsServer<'a> { return None; } - log::debug!( - "Dial-back to peer {} succeeded at addr {:?}.", - peer, - address + tracing::debug!( + %peer, + %address, + "Dial-back to peer succeeded" ); let (probe_id, _, _, channel) = self.ongoing_inbound.remove(peer).unwrap(); @@ -233,11 +232,19 @@ impl<'a> AsServer<'a> { error: &DialError, ) -> Option { let (probe_id, _, _, channel) = peer.and_then(|p| self.ongoing_inbound.remove(&p))?; - log::debug!( - "Dial-back to peer {} failed with error {:?}.", - peer.unwrap(), - error - ); + + match peer { + Some(p) => tracing::debug!( + peer=%p, + "Dial-back to peer failed with error {:?}", + error + ), + None => tracing::debug!( + "Dial-back to non existent peer failed with error {:?}", + error + ), + }; + let response_error = ResponseError::DialError; let response = DialResponse { result: Err(response_error.clone()), @@ -319,13 +326,13 @@ impl<'a> AsServer<'a> { demanded: Vec, observed_remote_at: &Multiaddr, ) -> Vec { - let observed_ip = match observed_remote_at + let Some(observed_ip) = observed_remote_at .into_iter() .find(|p| matches!(p, Protocol::Ip4(_) | Protocol::Ip6(_))) - { - Some(ip) => ip, - None => return Vec::new(), + else { + return Vec::new(); }; + let mut distinct = HashSet::new(); demanded .into_iter() diff --git a/protocols/autonat/src/protocol.rs b/protocols/autonat/src/protocol.rs index a63fd8cdf4dc..c18620584002 100644 --- a/protocols/autonat/src/protocol.rs +++ b/protocols/autonat/src/protocol.rs @@ -20,12 +20,13 @@ use crate::proto; use async_trait::async_trait; -use futures::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::{upgrade, Multiaddr}; +use asynchronous_codec::{FramedRead, FramedWrite}; +use futures::io::{AsyncRead, AsyncWrite}; +use futures::{SinkExt, StreamExt}; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use quick_protobuf::{BytesReader, Writer}; use std::{convert::TryFrom, io}; /// The protocol name used for negotiating with multistream-select. @@ -44,8 +45,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let request = DialRequest::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let request = DialRequest::from_proto(message)?; + Ok(request) } @@ -57,8 +62,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let response = DialResponse::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let response = DialResponse::from_proto(message)?; + Ok(response) } @@ -71,8 +80,11 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } async fn write_response( @@ -84,11 +96,18 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } } +fn codec() -> quick_protobuf_codec::Codec { + quick_protobuf_codec::Codec::::new(1024) +} + #[derive(Clone, Debug, Eq, PartialEq)] pub struct DialRequest { pub peer_id: PeerId, @@ -96,31 +115,22 @@ pub struct DialRequest { } impl DialRequest { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } - let (peer_id, addrs) = if let Some(proto::Dial { - peer: - Some(proto::PeerInfo { - id: Some(peer_id), - addrs, - }), - }) = msg.dial - { - (peer_id, addrs) - } else { - log::debug!("Received malformed dial message."); - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid dial message", - )); - }; + + let peer_id_result = msg.dial.and_then(|dial| { + dial.peer.and_then(|peer_info| { + let Some(peer_id) = peer_info.id else { + return None; + }; + Some((peer_id, peer_info.addrs)) + }) + }); + + let (peer_id, addrs) = peer_id_result + .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "invalid dial message"))?; let peer_id = { PeerId::try_from(peer_id.to_vec()) @@ -132,7 +142,7 @@ impl DialRequest { .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) @@ -143,9 +153,7 @@ impl DialRequest { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let peer_id = self.peer_id.to_bytes(); let addrs = self .addresses @@ -153,7 +161,7 @@ impl DialRequest { .map(|addr| addr.to_vec()) .collect(); - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL), dial: Some(proto::Dial { peer: Some(proto::PeerInfo { @@ -162,12 +170,7 @@ impl DialRequest { }), }), dialResponse: None, - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } @@ -200,7 +203,7 @@ impl TryFrom for ResponseError { proto::ResponseStatus::E_BAD_REQUEST => Ok(ResponseError::BadRequest), proto::ResponseStatus::E_INTERNAL_ERROR => Ok(ResponseError::InternalError), proto::ResponseStatus::OK => { - log::debug!("Received response with status code OK but expected error."); + tracing::debug!("Received response with status code OK but expected error"); Err(io::Error::new( io::ErrorKind::InvalidData, "invalid response error type", @@ -217,12 +220,7 @@ pub struct DialResponse { } impl DialResponse { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL_RESPONSE) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } @@ -249,7 +247,7 @@ impl DialResponse { result: Err(ResponseError::try_from(status)?), }, _ => { - log::debug!("Received malformed response message."); + tracing::debug!("Received malformed response message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial response message", @@ -258,9 +256,7 @@ impl DialResponse { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let dial_response = match self.result { Ok(addr) => proto::DialResponse { status: Some(proto::ResponseStatus::OK), @@ -274,23 +270,17 @@ impl DialResponse { }, }; - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL_RESPONSE), dial: None, dialResponse: Some(dial_response), - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } #[cfg(test)] mod tests { use super::*; - use quick_protobuf::MessageWrite; #[test] fn test_request_encode_decode() { @@ -301,8 +291,8 @@ mod tests { "/ip4/192.168.1.42/tcp/30333".parse().unwrap(), ], }; - let bytes = request.clone().into_bytes(); - let request2 = DialRequest::from_bytes(&bytes).unwrap(); + let proto = request.clone().into_proto(); + let request2 = DialRequest::from_proto(proto).unwrap(); assert_eq!(request, request2); } @@ -312,8 +302,8 @@ mod tests { result: Ok("/ip4/8.8.8.8/tcp/30333".parse().unwrap()), status_text: None, }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -323,8 +313,8 @@ mod tests { result: Err(ResponseError::DialError), status_text: Some("dial failed".to_string()), }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -350,11 +340,7 @@ mod tests { dialResponse: None, }; - let mut bytes = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut bytes); - msg.write_message(&mut writer).expect("Encoding to succeed"); - - let request = DialRequest::from_bytes(&bytes).expect("not to fail"); + let request = DialRequest::from_proto(msg).expect("not to fail"); assert_eq!(request.addresses, vec![valid_multiaddr]) } diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 1911d1a6b2d2..7509d3ef425e 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -61,7 +61,7 @@ async fn test_auto_probe() { match client.next_behaviour_event().await { Event::OutboundProbe(OutboundProbeEvent::Error { peer, error, .. }) => { assert!(peer.is_none()); - assert_eq!(error, OutboundProbeError::NoAddresses); + assert!(matches!(error, OutboundProbeError::NoAddresses)); } other => panic!("Unexpected behaviour event: {other:?}."), } @@ -155,7 +155,7 @@ async fn test_confidence() { // Randomly test either for public or for private status the confidence. let test_public = rand::random::(); if test_public { - client.listen().await; + client.listen().with_memory_addr_external().await; } else { let unreachable_addr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); client.behaviour_mut().probe_address(unreachable_addr); @@ -181,10 +181,10 @@ async fn test_confidence() { peer, error, } if !test_public => { - assert_eq!( + assert!(matches!( error, OutboundProbeError::Response(ResponseError::DialError) - ); + )); (peer.unwrap(), probe_id) } other => panic!("Unexpected Outbound Event: {other:?}"), @@ -261,7 +261,7 @@ async fn test_throttle_server_period() { match client.next_behaviour_event().await { Event::OutboundProbe(OutboundProbeEvent::Error { peer, error, .. }) => { assert!(peer.is_none()); - assert_eq!(error, OutboundProbeError::NoServer); + assert!(matches!(error, OutboundProbeError::NoServer)); } other => panic!("Unexpected behaviour event: {other:?}."), } diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index 1bb5f6247933..b0610ef59a4d 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -168,7 +168,10 @@ async fn test_dial_error() { }) => { assert_eq!(probe_id, request_probe_id); assert_eq!(peer, client_id); - assert_eq!(error, InboundProbeError::Response(ResponseError::DialError)); + assert!(matches!( + error, + InboundProbeError::Response(ResponseError::DialError) + )); } other => panic!("Unexpected behaviour event: {other:?}."), } @@ -252,10 +255,10 @@ async fn test_throttle_peer_max() { }) => { assert_eq!(client_id, peer); assert_ne!(first_probe_id, probe_id); - assert_eq!( + assert!(matches!( error, InboundProbeError::Response(ResponseError::DialRefused) - ) + )); } other => panic!("Unexpected behaviour event: {other:?}."), }; @@ -305,7 +308,7 @@ async fn test_dial_multiple_addr() { let dial_errors = concurrent_dial_errors.unwrap(); // The concurrent dial might not be fast enough to produce a dial error. - if let Some((addr, _)) = dial_errors.get(0) { + if let Some((addr, _)) = dial_errors.first() { assert_eq!(addr, &dial_addresses[0]); } diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index 18a166477cc8..d3857373658e 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,4 +1,15 @@ -## 0.10.0 +## 0.11.0 + +- Add `ConnectionId` to `Event::DirectConnectionUpgradeSucceeded` and `Event::DirectConnectionUpgradeFailed`. + See [PR 4558](https://github.com/libp2p/rust-libp2p/pull/4558). +- Exchange address _candidates_ instead of external addresses in `CONNECT`. + If hole-punching wasn't working properly for you until now, this might be the reason why. + See [PR 4624](https://github.com/libp2p/rust-libp2p/pull/4624). +- Simplify public API. + We now only emit a single event: whether the hole-punch was successful or not. + See [PR 4749](https://github.com/libp2p/rust-libp2p/pull/4749). + +## 0.10.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index f12684816c68..aed7e2bf762e 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dcutr" edition = "2021" rust-version = { workspace = true } description = "Direct connection upgrade through relay" -version = "0.10.0" +version = "0.11.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,24 +11,25 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } either = "1.9.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0" instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } thiserror = "1.0" +tracing = "0.1.37" void = "1" +lru = "0.12.1" +futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -clap = { version = "4.3.23", features = ["derive"] } -env_logger = "0.10.0" +clap = { version = "4.4.16", features = ["derive"] } libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } libp2p-noise = { workspace = true } @@ -40,6 +41,7 @@ libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/dcutr/src/behaviour_impl.rs b/protocols/dcutr/src/behaviour.rs similarity index 69% rename from protocols/dcutr/src/behaviour_impl.rs rename to protocols/dcutr/src/behaviour.rs index 748e163429db..3742eb512f57 100644 --- a/protocols/dcutr/src/behaviour_impl.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -20,7 +20,7 @@ //! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. -use crate::handler; +use crate::{handler, protocol}; use either::Either; use libp2p_core::connection::ConnectedPoint; use libp2p_core::multiaddr::Protocol; @@ -29,13 +29,13 @@ use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm}; use libp2p_swarm::dial_opts::{self, DialOpts}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionId, THandler, THandlerOutEvent, -}; -use libp2p_swarm::{ - ExternalAddresses, NetworkBehaviour, NotifyHandler, PollParameters, StreamUpgradeError, - THandlerInEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, + THandlerOutEvent, }; +use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; +use lru::LruCache; use std::collections::{HashMap, HashSet, VecDeque}; +use std::num::NonZeroUsize; use std::task::{Context, Poll}; use thiserror::Error; use void::Void; @@ -44,30 +44,25 @@ pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; /// The events produced by the [`Behaviour`]. #[derive(Debug)] -pub enum Event { - InitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - local_relayed_addr: Multiaddr, - }, - RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - remote_relayed_addr: Multiaddr, - }, - DirectConnectionUpgradeSucceeded { - remote_peer_id: PeerId, - }, - DirectConnectionUpgradeFailed { - remote_peer_id: PeerId, - error: Error, - }, +pub struct Event { + pub remote_peer_id: PeerId, + pub result: Result, +} + +#[derive(Debug, Error)] +#[error("Failed to hole-punch connection: {inner}")] +pub struct Error { + inner: InnerError, } #[derive(Debug, Error)] -pub enum Error { - #[error("Failed to dial peer.")] - Dial, - #[error("Failed to establish substream: {0}.")] - Handler(StreamUpgradeError), +enum InnerError { + #[error("Giving up after {0} dial attempts")] + AttemptsExceeded(u8), + #[error("Inbound stream error: {0}")] + InboundError(protocol::inbound::Error), + #[error("Outbound stream error: {0}")] + OutboundError(protocol::outbound::Error), } pub struct Behaviour { @@ -77,9 +72,7 @@ pub struct Behaviour { /// All direct (non-relayed) connections. direct_connections: HashMap>, - external_addresses: ExternalAddresses, - - local_peer_id: PeerId, + address_candidates: Candidates, direct_to_relayed_connections: HashMap, @@ -93,20 +86,14 @@ impl Behaviour { Behaviour { queued_events: Default::default(), direct_connections: Default::default(), - external_addresses: Default::default(), - local_peer_id, + address_candidates: Candidates::new(local_peer_id), direct_to_relayed_connections: Default::default(), outgoing_direct_connection_attempts: Default::default(), } } fn observed_addresses(&self) -> Vec { - self.external_addresses - .iter() - .filter(|a| !a.iter().any(|p| p == Protocol::P2pCircuit)) - .cloned() - .map(|a| a.with(Protocol::P2p(self.local_peer_id))) - .collect() + self.address_candidates.iter().cloned().collect() } fn on_dial_failure( @@ -117,43 +104,37 @@ impl Behaviour { .. }: DialFailure, ) { - let peer_id = if let Some(peer_id) = peer_id { - peer_id - } else { + let Some(peer_id) = peer_id else { return; }; - let relayed_connection_id = if let Some(relayed_connection_id) = self + let Some(relayed_connection_id) = self .direct_to_relayed_connections .get(&failed_direct_connection) - { - *relayed_connection_id - } else { + else { return; }; - let attempt = if let Some(attempt) = self + let Some(attempt) = self .outgoing_direct_connection_attempts - .get(&(relayed_connection_id, peer_id)) - { - *attempt - } else { + .get(&(*relayed_connection_id, peer_id)) + else { return; }; - if attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { + if *attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { self.queued_events.push_back(ToSwarm::NotifyHandler { - handler: NotifyHandler::One(relayed_connection_id), + handler: NotifyHandler::One(*relayed_connection_id), peer_id, event: Either::Left(handler::relayed::Command::Connect), }) } else { - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: peer_id, - error: Error::Dial, - }, - )]); + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer_id, + result: Err(Error { + inner: InnerError::AttemptsExceeded(MAX_NUMBER_OF_UPGRADE_ATTEMPTS), + }), + })]); } } @@ -164,7 +145,7 @@ impl Behaviour { connection_id, endpoint: connected_point, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if !connected_point.is_relayed() { let connections = self @@ -202,13 +183,6 @@ impl NetworkBehaviour for Behaviour { handler::relayed::Handler::new(connected_point, self.observed_addresses()); handler.on_behaviour_event(handler::relayed::Command::Connect); - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::InitiatedDirectConnectionUpgrade { - remote_peer_id: peer, - local_relayed_addr: local_addr.clone(), - }, - )]); - return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. } self.direct_connections @@ -260,13 +234,11 @@ impl NetworkBehaviour for Behaviour { ); } - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeSucceeded { - remote_peer_id: peer, - }, - )]); + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer, + result: Ok(connection_id), + })]); } - Ok(Either::Right(dummy::ConnectionHandler)) } @@ -288,23 +260,9 @@ impl NetworkBehaviour for Behaviour { }; match handler_event { - Either::Left(handler::relayed::Event::InboundConnectRequest { remote_addr }) => { - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: event_source, - remote_relayed_addr: remote_addr, - }, - )]); - } - Either::Left(handler::relayed::Event::InboundNegotiationFailed { error }) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: event_source, - error: Error::Handler(error), - }, - )); - } - Either::Left(handler::relayed::Event::InboundConnectNegotiated(remote_addrs)) => { + Either::Left(handler::relayed::Event::InboundConnectNegotiated { remote_addrs }) => { + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); + let opts = DialOpts::peer_id(event_source) .addresses(remote_addrs) .condition(dial_opts::PeerCondition::Always) @@ -316,15 +274,27 @@ impl NetworkBehaviour for Behaviour { .insert(maybe_direct_connection_id, relayed_connection_id); self.queued_events.push_back(ToSwarm::Dial { opts }); } - Either::Left(handler::relayed::Event::OutboundNegotiationFailed { error }) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: event_source, - error: Error::Handler(error), - }, - )); + Either::Left(handler::relayed::Event::InboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::InboundError(error), + }), + })); + } + Either::Left(handler::relayed::Event::OutboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::OutboundError(error), + }), + })); + + // Maybe treat these as transient and retry? } Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as listener"); + let opts = DialOpts::peer_id(event_source) .condition(dial_opts::PeerCondition::Always) .addresses(remote_addrs) @@ -345,11 +315,8 @@ impl NetworkBehaviour for Behaviour { }; } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } @@ -357,27 +324,54 @@ impl NetworkBehaviour for Behaviour { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - self.external_addresses.on_swarm_event(&event); - + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::AddressChange(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { + self.address_candidates.add(addr.clone()); + } + _ => {} + } + } +} + +/// Stores our address candidates. +/// +/// We use an [`LruCache`] to favor addresses that are reported more often. +/// When attempting a hole-punch, we will try more frequent addresses first. +/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol). +/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch. +struct Candidates { + inner: LruCache, + me: PeerId, +} + +impl Candidates { + fn new(me: PeerId) -> Self { + Self { + inner: LruCache::new(NonZeroUsize::new(20).expect("20 > 0")), + me, } } + + fn add(&mut self, mut address: Multiaddr) { + if is_relayed(&address) { + return; + } + + if address.iter().last() != Some(Protocol::P2p(self.me)) { + address.push(Protocol::P2p(self.me)); + } + + self.inner.push(address, ()); + } + + fn iter(&self) -> impl Iterator { + self.inner.iter().map(|(a, _)| a) + } } fn is_relayed(addr: &Multiaddr) -> bool { diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index ff22f2b18e10..eba58f89313d 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -20,23 +20,26 @@ //! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. -use crate::behaviour_impl::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; -use crate::protocol; +use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; +use crate::{protocol, PROTOCOL_NAME}; use either::Either; use futures::future; -use futures::future::{BoxFuture, FutureExt}; use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_core::ConnectedPoint; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamUpgradeError, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, }; +use protocol::{inbound, outbound}; use std::collections::VecDeque; +use std::io; use std::task::{Context, Poll}; +use std::time::Duration; #[derive(Debug)] pub enum Command { @@ -45,41 +48,28 @@ pub enum Command { #[derive(Debug)] pub enum Event { - InboundConnectRequest { - remote_addr: Multiaddr, - }, - InboundNegotiationFailed { - error: StreamUpgradeError, - }, - InboundConnectNegotiated(Vec), - OutboundNegotiationFailed { - error: StreamUpgradeError, - }, - OutboundConnectNegotiated { - remote_addrs: Vec, - }, + InboundConnectNegotiated { remote_addrs: Vec }, + OutboundConnectNegotiated { remote_addrs: Vec }, + InboundConnectFailed { error: inbound::Error }, + OutboundConnectFailed { error: outbound::Error }, } pub struct Handler { endpoint: ConnectedPoint, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, /// Queue of events to return when polled. queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, - /// Inbound connect, accepted by the behaviour, pending completion. - inbound_connect: - Option, protocol::inbound::UpgradeError>>>, + + // Inbound DCUtR handshakes + inbound_stream: futures_bounded::FuturesSet, inbound::Error>>, + + // Outbound DCUtR handshake. + outbound_stream: futures_bounded::FuturesSet, outbound::Error>>, /// The addresses we will send to the other party for hole-punching attempts. holepunch_candidates: Vec, @@ -91,9 +81,9 @@ impl Handler { pub fn new(endpoint: ConnectedPoint, holepunch_candidates: Vec) -> Self { Self { endpoint, - pending_error: Default::default(), queued_events: Default::default(), - inbound_connect: Default::default(), + inbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), + outbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), holepunch_candidates, attempts: 0, } @@ -109,29 +99,19 @@ impl Handler { >, ) { match output { - future::Either::Left(inbound_connect) => { + future::Either::Left(stream) => { if self - .inbound_connect - .replace( - inbound_connect - .accept(self.holepunch_candidates.clone()) - .boxed(), - ) - .is_some() + .inbound_stream + .try_push(inbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() { - log::warn!( - "New inbound connect stream while still upgrading previous one. \ - Replacing previous with new.", + tracing::warn!( + "New inbound connect stream while still upgrading previous one. Replacing previous with new.", ); } - let remote_addr = match &self.endpoint { - ConnectedPoint::Dialer { address, role_override: _ } => address.clone(), - ConnectedPoint::Listener { ..} => unreachable!("`::listen_protocol` denies all incoming substreams as a listener."), - }; - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundConnectRequest { remote_addr }, - )); self.attempts += 1; } // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. @@ -142,8 +122,7 @@ impl Handler { fn on_fully_negotiated_outbound( &mut self, FullyNegotiatedOutbound { - protocol: protocol::outbound::Connect { obs_addrs }, - .. + protocol: stream, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, @@ -153,12 +132,18 @@ impl Handler { self.endpoint.is_listener(), "A connection dialer never initiates a connection upgrade." ); - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundConnectNegotiated { - remote_addrs: obs_addrs, - }, - )); + if self + .outbound_stream + .try_push(outbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() + { + tracing::warn!( + "New outbound connect stream while still upgrading previous one. Replacing previous with new.", + ); + } } fn on_listen_upgrade_error( @@ -168,10 +153,7 @@ impl Handler { ::InboundProtocol, >, ) { - self.pending_error = Some(StreamUpgradeError::Apply(match error { - Either::Left(e) => Either::Left(e), - Either::Right(v) => void::unreachable(v), - })); + void::unreachable(error.into_inner()); } fn on_dial_upgrade_error( @@ -181,50 +163,32 @@ impl Handler { ::OutboundProtocol, >, ) { - match error { - StreamUpgradeError::Timeout => { - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundNegotiationFailed { - error: StreamUpgradeError::Timeout, - }, - )); - } - StreamUpgradeError::NegotiationFailed => { - // The remote merely doesn't support the DCUtR protocol. - // This is no reason to close the connection, which may - // successfully communicate with other protocols already. - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundNegotiationFailed { - error: StreamUpgradeError::NegotiationFailed, - }, - )); - } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error.map_upgrade_err(Either::Right)); - } - } + let error = match error { + StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::NegotiationFailed => outbound::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound::Error::Io(e), + StreamUpgradeError::Timeout => outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }; + + self.queued_events + .push_back(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) } } impl ConnectionHandler for Handler { type FromBehaviour = Command; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; - type InboundProtocol = Either; - type OutboundProtocol = protocol::outbound::Upgrade; + type InboundProtocol = Either, DeniedUpgrade>; + type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { match self.endpoint { ConnectedPoint::Dialer { .. } => { - SubstreamProtocol::new(Either::Left(protocol::inbound::Upgrade {}), ()) + SubstreamProtocol::new(Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), ()) } ConnectedPoint::Listener { .. } => { // By the protocol specification the listening side of a relayed connection @@ -242,68 +206,77 @@ impl ConnectionHandler for Handler { Command::Connect => { self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - protocol::outbound::Upgrade::new(self.holepunch_candidates.clone()), - (), - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), }); self.attempts += 1; } } } - fn connection_keep_alive(&self) -> KeepAlive { - if !self.queued_events.is_empty() { - return KeepAlive::Yes; - } - - if self.inbound_connect.is_some() { - return KeepAlive::Yes; - } - + fn connection_keep_alive(&self) -> bool { if self.attempts < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { - return KeepAlive::Yes; + return true; } - KeepAlive::No + false } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - if let Some(Poll::Ready(result)) = self.inbound_connect.as_mut().map(|f| f.poll_unpin(cx)) { - self.inbound_connect = None; - match result { - Ok(addresses) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundConnectNegotiated(addresses), - )); - } - Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))) - } + match self.inbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { + error: inbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) + } + Poll::Pending => {} + } + + match self.outbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { + error: outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) } + Poll::Pending => {} } Poll::Pending @@ -331,9 +304,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/dcutr/src/lib.rs b/protocols/dcutr/src/lib.rs index 6001c9144e75..7c5d28aba193 100644 --- a/protocols/dcutr/src/lib.rs +++ b/protocols/dcutr/src/lib.rs @@ -23,7 +23,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod behaviour_impl; // TODO: Rename back `behaviour` once deprecation symbols are removed. +mod behaviour; mod handler; mod protocol; @@ -33,13 +33,11 @@ mod proto { pub(crate) use self::holepunch::pb::{mod_HolePunch::*, HolePunch}; } -pub use behaviour_impl::Behaviour; -pub use behaviour_impl::Error; -pub use behaviour_impl::Event; +pub use behaviour::{Behaviour, Error, Event}; pub use protocol::PROTOCOL_NAME; pub mod inbound { - pub use crate::protocol::inbound::UpgradeError; + pub use crate::protocol::inbound::ProtocolViolation; } pub mod outbound { - pub use crate::protocol::outbound::UpgradeError; + pub use crate::protocol::outbound::ProtocolViolation; } diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index d38b6f4559ab..b8f90daf3a17 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -20,114 +20,91 @@ use crate::proto; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{Stream, StreamProtocol}; +use futures::prelude::*; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade {} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); + + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; + + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + }; + + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + tracing::debug!(address=%a, "Dropping relayed address"); + false + } else { + true + } + }) + .collect(); - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = PendingConnect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - fn upgrade_inbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - async move { - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; - - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; + stream.send(msg).await?; + let proto::HolePunch { type_pb, .. } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } - - Ok(PendingConnect { - substream, - remote_obs_addrs: obs_addrs, - }) - } - .boxed() + if !matches!(type_pb, proto::Type::SYNC) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)); } -} -pub struct PendingConnect { - substream: Framed>, - remote_obs_addrs: Vec, + Ok(obs_addrs) } -impl PendingConnect { - pub async fn accept( - mut self, - local_obs_addrs: Vec, - ) -> Result, UpgradeError> { - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: local_obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; - - self.substream.send(msg).await?; - let proto::HolePunch { type_pb, .. } = self - .substream - .next() - .await - .ok_or(UpgradeError::StreamClosed)??; - - match type_pb { - proto::Type::CONNECT => return Err(UpgradeError::UnexpectedTypeConnect), - proto::Type::SYNC => {} - } +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} - Ok(self.remote_obs_addrs) +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected at least one address in reservation.")] NoAddresses, #[error("Failed to parse response type field.")] diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 960d98cbe662..d9cb60a01f69 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -19,115 +19,102 @@ // DEALINGS IN THE SOFTWARE. use crate::proto; +use crate::PROTOCOL_NAME; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; +use futures::prelude::*; use futures_timer::Delay; use instant::Instant; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{Stream, StreamProtocol}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade { - obs_addrs: Vec, -} +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) - } -} + stream.send(msg).await?; -impl Upgrade { - pub fn new(obs_addrs: Vec) -> Self { - Self { obs_addrs } - } -} + let sent_time = Instant::now(); -impl upgrade::OutboundUpgrade for Upgrade { - type Output = Connect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - fn upgrade_outbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let rtt = sent_time.elapsed(); - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: self.obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); + } - async move { - substream.send(msg).await?; + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + } - let sent_time = Instant::now(); + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + tracing::debug!(address=%a, "Dropping relayed address"); + false + } else { + true + } + }) + .collect(); - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; + let msg = proto::HolePunch { + type_pb: proto::Type::SYNC, + ObsAddrs: vec![], + }; - let rtt = sent_time.elapsed(); + stream.send(msg).await?; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } + Delay::new(rtt / 2).await; - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; - - let msg = proto::HolePunch { - type_pb: proto::Type::SYNC, - ObsAddrs: vec![], - }; - - substream.send(msg).await?; - - Delay::new(rtt / 2).await; - - Ok(Connect { obs_addrs }) - } - .boxed() - } + Ok(obs_addrs) } -pub struct Connect { - pub obs_addrs: Vec, +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Remote does not support the `{PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 162b4a5ec789..084ee7441456 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -22,33 +22,40 @@ use libp2p_core::multiaddr::{Multiaddr, Protocol}; use libp2p_core::transport::upgrade::Version; use libp2p_core::transport::{MemoryTransport, Transport}; use libp2p_dcutr as dcutr; +use libp2p_identify as identify; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut relay = build_relay(); let mut dst = build_client(); let mut src = build_client(); - // Have all swarms listen on a local memory address. - let (relay_addr, _) = relay.listen().await; - let (dst_addr, _) = dst.listen().await; + // Have all swarms listen on a local TCP address. + let (_, relay_tcp_addr) = relay.listen().with_tcp_addr_external().await; + let (_, dst_tcp_addr) = dst.listen().await; src.listen().await; + assert!(src.external_addresses().next().is_none()); + assert!(dst.external_addresses().next().is_none()); + let relay_peer_id = *relay.local_peer_id(); let dst_peer_id = *dst.local_peer_id(); async_std::task::spawn(relay.loop_on_next()); - let dst_relayed_addr = relay_addr + let dst_relayed_addr = relay_tcp_addr .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id)); @@ -65,50 +72,59 @@ async fn connect() { src.dial_and_wait(dst_relayed_addr.clone()).await; - loop { - match src - .next_swarm_event() - .await - .try_into_behaviour_event() - .unwrap() - { - ClientEvent::Dcutr(dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id, - remote_relayed_addr, - }) => { - if remote_peer_id == dst_peer_id && remote_relayed_addr == dst_relayed_addr { - break; - } - } - other => panic!("Unexpected event: {other:?}."), - } - } + let dst_addr = dst_tcp_addr.with(Protocol::P2p(dst_peer_id)); - let dst_addr = dst_addr.with(Protocol::P2p(dst_peer_id)); + let established_conn_id = src + .wait(move |e| match e { + SwarmEvent::ConnectionEstablished { + endpoint, + connection_id, + .. + } => (*endpoint.get_remote_address() == dst_addr).then_some(connection_id), + _ => None, + }) + .await; + + let reported_conn_id = src + .wait(move |e| match e { + SwarmEvent::Behaviour(ClientEvent::Dcutr(dcutr::Event { + result: Ok(connection_id), + .. + })) => Some(connection_id), + _ => None, + }) + .await; - src.wait(move |e| match e { - SwarmEvent::ConnectionEstablished { endpoint, .. } => { - (*endpoint.get_remote_address() == dst_addr).then_some(()) - } - _ => None, - }) - .await; + assert_eq!(established_conn_id, reported_conn_id); } -fn build_relay() -> Swarm { +fn build_relay() -> Swarm { Swarm::new_ephemeral(|identity| { let local_peer_id = identity.public().to_peer_id(); - relay::Behaviour::new( - local_peer_id, - relay::Config { - reservation_duration: Duration::from_secs(2), - ..Default::default() - }, - ) + Relay { + relay: relay::Behaviour::new( + local_peer_id, + relay::Config { + reservation_duration: Duration::from_secs(2), + ..Default::default() + }, + ), + identify: identify::Behaviour::new(identify::Config::new( + "/relay".to_owned(), + identity.public(), + )), + } }) } +#[derive(NetworkBehaviour)] +#[behaviour(prelude = "libp2p_swarm::derive_prelude")] +struct Relay { + relay: relay::Behaviour, + identify: identify::Behaviour, +} + fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = local_key.public().to_peer_id(); @@ -123,15 +139,19 @@ fn build_client() -> Swarm { .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::without_executor( + Swarm::new( transport, Client { relay: behaviour, dcutr: dcutr::Behaviour::new(local_peer_id), + identify: identify::Behaviour::new(identify::Config::new( + "/client".to_owned(), + local_key.public(), + )), }, local_peer_id, + Config::with_async_std_executor(), ) - .build() } #[derive(NetworkBehaviour)] @@ -139,6 +159,7 @@ fn build_client() -> Swarm { struct Client { relay: relay::client::Behaviour, dcutr: dcutr::Behaviour, + identify: identify::Behaviour, } async fn wait_for_reservation( @@ -149,14 +170,16 @@ async fn wait_for_reservation( ) { let mut new_listen_addr_for_relayed_addr = false; let mut reservation_req_accepted = false; + let mut addr_observed = false; + loop { + if new_listen_addr_for_relayed_addr && reservation_req_accepted && addr_observed { + break; + } + match client.next_swarm_event().await { - SwarmEvent::NewListenAddr { address, .. } if address != client_addr => {} SwarmEvent::NewListenAddr { address, .. } if address == client_addr => { new_listen_addr_for_relayed_addr = true; - if reservation_req_accepted { - break; - } } SwarmEvent::Behaviour(ClientEvent::Relay( relay::client::Event::ReservationReqAccepted { @@ -166,15 +189,21 @@ async fn wait_for_reservation( }, )) if relay_peer_id == peer_id && renewal == is_renewal => { reservation_req_accepted = true; - if new_listen_addr_for_relayed_addr { - break; - } } SwarmEvent::Dialing { peer_id: Some(peer_id), .. } if peer_id == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == relay_peer_id => {} + SwarmEvent::Behaviour(ClientEvent::Identify(identify::Event::Received { .. })) => { + addr_observed = true; + } + SwarmEvent::Behaviour(ClientEvent::Identify(_)) => {} + SwarmEvent::NewExternalAddrCandidate { .. } => {} + SwarmEvent::ExternalAddrConfirmed { address } if !is_renewal => { + assert_eq!(address, client_addr); + } + SwarmEvent::NewExternalAddrOfPeer { .. } => {} e => panic!("{e:?}"), } } diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index e59aaa9225f9..8e3cb70ddf14 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,4 +1,9 @@ -## 0.43.0 +## 0.44.0 + +- Change publish to require `data: impl Into` to internally avoid any costly cloning / allocation. + See [PR 4754](https://github.com/libp2p/rust-libp2p/pull/4754). + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index dfa9ad63bd2f..e1bd23fe2d13 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-floodsub" edition = "2021" rust-version = { workspace = true } description = "Floodsub protocol for libp2p" -version = "0.43.0" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,19 +11,20 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } cuckoofilter = "0.5.0" fnv = "1.0" -futures = "0.3.28" +bytes = "1.5" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -smallvec = "1.11.1" -thiserror = "1.0.49" +smallvec = "1.12.0" +thiserror = "1.0.57" +tracing = "0.1.37" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 29fe8ba250fc..35711408a8d0 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -24,16 +24,16 @@ use crate::protocol::{ }; use crate::topic::Topic; use crate::FloodsubConfig; +use bytes::Bytes; use cuckoofilter::{CuckooError, CuckooFilter}; use fnv::FnvHashSet; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::{ - dial_opts::DialOpts, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, - OneShotHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, + NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::warn; use smallvec::SmallVec; use std::collections::hash_map::{DefaultHasher, HashMap}; use std::task::{Context, Poll}; @@ -146,9 +146,8 @@ impl Floodsub { /// /// Returns true if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: Topic) -> bool { - let pos = match self.subscribed_topics.iter().position(|t| *t == topic) { - Some(pos) => pos, - None => return false, + let Some(pos) = self.subscribed_topics.iter().position(|t| *t == topic) else { + return false; }; self.subscribed_topics.remove(pos); @@ -171,12 +170,12 @@ impl Floodsub { } /// Publishes a message to the network, if we're subscribed to the topic only. - pub fn publish(&mut self, topic: impl Into, data: impl Into>) { + pub fn publish(&mut self, topic: impl Into, data: impl Into) { self.publish_many(iter::once(topic), data) } /// Publishes a message to the network, even if we're not subscribed to the topic. - pub fn publish_any(&mut self, topic: impl Into, data: impl Into>) { + pub fn publish_any(&mut self, topic: impl Into, data: impl Into) { self.publish_many_any(iter::once(topic), data) } @@ -187,7 +186,7 @@ impl Floodsub { pub fn publish_many( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, ) { self.publish_many_inner(topic, data, true) } @@ -196,7 +195,7 @@ impl Floodsub { pub fn publish_many_any( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, ) { self.publish_many_inner(topic, data, false) } @@ -204,7 +203,7 @@ impl Floodsub { fn publish_many_inner( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, check_self_subscriptions: bool, ) { let message = FloodsubMessage { @@ -223,7 +222,7 @@ impl Floodsub { .any(|t| message.topics.iter().any(|u| t == u)); if self_subscribed { if let Err(e @ CuckooError::NotEnoughSpace) = self.received.add(&message) { - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -307,7 +306,7 @@ impl Floodsub { peer_id, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if remaining_established > 0 { // we only care about peer disconnections @@ -354,13 +353,21 @@ impl NetworkBehaviour for Floodsub { fn on_connection_handler_event( &mut self, propagation_source: PeerId, - _connection_id: ConnectionId, + connection_id: ConnectionId, event: THandlerOutEvent, ) { // We ignore successful sends or timeouts. let event = match event { - InnerMessage::Rx(event) => event, - InnerMessage::Sent => return, + Ok(InnerMessage::Rx(event)) => event, + Ok(InnerMessage::Sent) => return, + Err(e) => { + tracing::debug!("Failed to send floodsub message: {e}"); + self.events.push_back(ToSwarm::CloseConnection { + peer_id: propagation_source, + connection: CloseConnection::One(connection_id), + }); + return; + } }; // Update connected peers topics @@ -406,7 +413,7 @@ impl NetworkBehaviour for Floodsub { Ok(false) => continue, // Message already existed. Err(e @ CuckooError::NotEnoughSpace) => { // Message added, but some other removed. - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -466,11 +473,8 @@ impl NetworkBehaviour for Floodsub { } } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } @@ -478,7 +482,7 @@ impl NetworkBehaviour for Floodsub { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -486,17 +490,7 @@ impl NetworkBehaviour for Floodsub { FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index ebd3d8b3bc88..edc842be8cec 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -21,6 +21,7 @@ use crate::proto; use crate::topic::Topic; use asynchronous_codec::Framed; +use bytes::Bytes; use futures::{ io::{AsyncRead, AsyncWrite}, Future, @@ -81,7 +82,7 @@ where messages.push(FloodsubMessage { source: PeerId::from_bytes(&publish.from.unwrap_or_default()) .map_err(|_| FloodsubError::InvalidPeerId)?, - data: publish.data.unwrap_or_default(), + data: publish.data.unwrap_or_default().into(), sequence_number: publish.seqno.unwrap_or_default(), topics: publish.topic_ids.into_iter().map(Topic::new).collect(), }); @@ -172,7 +173,7 @@ impl FloodsubRpc { .into_iter() .map(|msg| proto::Message { from: Some(msg.source.to_bytes()), - data: Some(msg.data), + data: Some(msg.data.to_vec()), seqno: Some(msg.sequence_number), topic_ids: msg.topics.into_iter().map(|topic| topic.into()).collect(), }) @@ -197,7 +198,7 @@ pub struct FloodsubMessage { pub source: PeerId, /// Content of the message. Its meaning is out of scope of this library. - pub data: Vec, + pub data: Bytes, /// An incrementing sequence number. pub sequence_number: Vec, diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index a1f4ef6c9736..5ff4cfa27d68 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,6 +1,41 @@ +## 0.46.1 + +- Deprecate `Rpc` in preparation for removing it from the public API because it is an internal type. + See [PR 4833](https://github.com/libp2p/rust-libp2p/pull/4833). + +## 0.46.0 + +- Remove `fast_message_id_fn` mechanism from `Config`. + See [PR 4285](https://github.com/libp2p/rust-libp2p/pull/4285). +- Remove deprecated `gossipsub::Config::idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4642](https://github.com/libp2p/rust-libp2p/pull/4642). +- Return typed error from config builder. + See [PR 4445](https://github.com/libp2p/rust-libp2p/pull/4445). +- Process outbound stream before inbound stream in `EnabledHandler::poll(..)`. + See [PR 4778](https://github.com/libp2p/rust-libp2p/pull/4778). + +## 0.45.2 + +- Deprecate `gossipsub::Config::idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4648]. + + + +[PR 4648]: (https://github.com/libp2p/rust-libp2p/pull/4648) + + + ## 0.45.1 -- Add getter function to obtain `TopicScoreParams`. +- Add getter function to o btain `TopicScoreParams`. See [PR 4231]. [PR 4231]: https://github.com/libp2p/rust-libp2p/pull/4231 diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 684cc8786542..4d484a812494 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-gossipsub" edition = "2021" rust-version = { workspace = true } description = "Gossipsub protocol for libp2p" -version = "0.45.1" +version = "0.46.1" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,43 +14,42 @@ categories = ["network-programming", "asynchronous"] wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] [dependencies] -asynchronous-codec = "0.6" -base64 = "0.21.4" -byteorder = "1.3.4" +asynchronous-codec = { workspace = true } +base64 = "0.21.7" +byteorder = "1.5.0" bytes = "1.5" either = "1.9" fnv = "1.0.7" -futures = "0.3.28" +futures = "0.3.30" futures-ticker = "0.0.3" -getrandom = "0.2.9" +getrandom = "0.2.12" hex_fmt = "0.3.0" instant = "0.1.12" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-swarm = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -regex = "1.9.6" +regex = "1.10.3" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" -smallvec = "1.11.1" -unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec"] } +smallvec = "1.12.0" +tracing = "0.1.37" void = "1.0.2" # Metrics dependencies -prometheus-client = "0.21.2" +prometheus-client = { workspace = true } [dev-dependencies] async-std = { version = "1.6.3", features = ["unstable"] } -env_logger = "0.10.0" hex = "0.4.2" libp2p-core = { workspace = true } libp2p-yamux = { workspace = true } libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 402420f378ec..24a32de4cc79 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -31,7 +31,6 @@ use std::{ use futures::StreamExt; use futures_ticker::Ticker; -use log::{debug, error, trace, warn}; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -42,8 +41,8 @@ use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use crate::backoff::BackoffStorage; @@ -55,14 +54,14 @@ use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; use crate::protocol::SIGNING_PREFIX; use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; -use crate::time_cache::{DuplicateCache, TimeCache}; +use crate::time_cache::DuplicateCache; use crate::topic::{Hasher, Topic, TopicHash}; use crate::transform::{DataTransform, IdentityTransform}; use crate::types::{ - ControlAction, FastMessageId, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, - Subscription, SubscriptionAction, + ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, + SubscriptionAction, }; -use crate::types::{PeerConnections, PeerKind, Rpc}; +use crate::types::{PeerConnections, PeerKind, RpcOut}; use crate::{rpc_proto::proto, TopicScoreParams}; use crate::{PublishError, SubscriptionError, ValidationError}; use instant::SystemTime; @@ -323,9 +322,6 @@ pub struct Behaviour { /// our own messages back if the messages are anonymous or use a random author. published_message_ids: DuplicateCache, - /// Short term cache for fast message ids mapping them to the real message ids - fast_message_id_cache: TimeCache, - /// The filter used to handle message subscriptions. subscription_filter: F, @@ -446,7 +442,6 @@ where control_pool: HashMap::new(), publish_config: privacy.into(), duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), - fast_message_id_cache: TimeCache::new(config.duplicate_cache_time()), topic_peers: HashMap::new(), peer_topics: HashMap::new(), explicit_peers: HashSet::new(), @@ -527,41 +522,28 @@ where /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { - debug!("Subscribing to topic: {}", topic); + tracing::debug!(%topic, "Subscribing to topic"); let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); } if self.mesh.get(&topic_hash).is_some() { - debug!("Topic: {} is already in the mesh.", topic); + tracing::debug!(%topic, "Topic is already in the mesh"); return Ok(false); } // send subscription request to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - debug!("Sending SUBSCRIBE to peer: {:?}", peer); - self.send_message(peer, event.clone()) - .map_err(SubscriptionError::PublishError)?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); + let event = RpcOut::Subscribe(topic_hash.clone()); + self.send_message(peer, event); } // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); - debug!("Subscribed to topic: {}", topic); + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -569,39 +551,27 @@ where /// /// Returns [`Ok(true)`] if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: &Topic) -> Result { - debug!("Unsubscribing from topic: {}", topic); + tracing::debug!(%topic, "Unsubscribing from topic"); let topic_hash = topic.hash(); if self.mesh.get(&topic_hash).is_none() { - debug!("Already unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); // we are not subscribed return Ok(false); } // announce to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Unsubscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - debug!("Sending UNSUBSCRIBE to peer: {}", peer.to_string()); - self.send_message(peer, event.clone())?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); + let event = RpcOut::Unsubscribe(topic_hash.clone()); + self.send_message(peer, event); } // call LEAVE(topic) // this will remove the topic from the mesh self.leave(&topic_hash); - debug!("Unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); Ok(true) } @@ -629,15 +599,8 @@ where topic: raw_message.topic.clone(), }); - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![raw_message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); - // check that the size doesn't exceed the max transmission size - if event.get_size() > self.config.max_transmit_size() { + if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { return Err(PublishError::MessageTooLarge); } @@ -645,34 +608,71 @@ where if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already // been published on the network. - warn!( - "Not publishing a message that has already been published. Msg-id {}", - msg_id + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" ); return Err(PublishError::Duplicate); } - trace!("Publishing message: {:?}", msg_id); + tracing::trace!(message=%msg_id, "Publishing message"); let topic_hash = raw_message.topic.clone(); - // If we are not flood publishing forward the message to mesh peers. - let mesh_peers_sent = !self.config.flood_publish() - && self.forward_msg(&msg_id, raw_message.clone(), None, HashSet::new())?; - let mut recipient_peers = HashSet::new(); if let Some(set) = self.topic_peers.get(&topic_hash) { if self.config.flood_publish() { // Forward to all peers above score and all explicit peers - recipient_peers.extend( - set.iter() - .filter(|p| { - self.explicit_peers.contains(*p) - || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 - }) - .cloned(), - ); + recipient_peers.extend(set.iter().filter(|p| { + self.explicit_peers.contains(*p) + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 + })); } else { + match self.mesh.get(&raw_message.topic) { + // Mesh peers + Some(mesh_peers) => { + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // If we have fanout peers add them to the map. + if self.fanout.contains_key(&topic_hash) { + for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + recipient_peers.insert(*peer); + } + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let mesh_n = self.config.mesh_n(); + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + &topic_hash, + mesh_n, + { + |p| { + !self.explicit_peers.contains(p) + && !self + .score_below_threshold(p, |pst| { + pst.publish_threshold + }) + .0 + } + }, + ); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + // Explicit peers for peer in &self.explicit_peers { if set.contains(peer) { @@ -690,54 +690,17 @@ where recipient_peers.insert(*peer); } } - - // Gossipsub peers - if self.mesh.get(&topic_hash).is_none() { - debug!("Topic: {:?} not in the mesh", topic_hash); - // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { - recipient_peers.insert(*peer); - } - } else { - // We have no fanout peers, select mesh_n of them and add them to the fanout - let mesh_n = self.config.mesh_n(); - let new_peers = get_random_peers( - &self.topic_peers, - &self.connected_peers, - &topic_hash, - mesh_n, - { - |p| { - !self.explicit_peers.contains(p) - && !self - .score_below_threshold(p, |pst| pst.publish_threshold) - .0 - } - }, - ); - // Add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - debug!("Peer added to fanout: {:?}", peer); - recipient_peers.insert(peer); - } - } - // We are publishing to fanout peers - update the time we published - self.fanout_last_pub - .insert(topic_hash.clone(), Instant::now()); - } } } - if recipient_peers.is_empty() && !mesh_peers_sent { + if recipient_peers.is_empty() { return Err(PublishError::InsufficientPeers); } // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. self.duplicate_cache.insert(msg_id.clone()); - self.mcache.put(&msg_id, raw_message); + self.mcache.put(&msg_id, raw_message.clone()); // If the message is anonymous or has a random author add it to the published message ids // cache. @@ -748,17 +711,12 @@ where } // Send to peers we know are subscribed to the topic. - let msg_bytes = event.get_size(); for peer_id in recipient_peers.iter() { - trace!("Sending message to peer: {:?}", peer_id); - self.send_message(*peer_id, event.clone())?; - - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&topic_hash, msg_bytes); - } + tracing::trace!(peer=%peer_id, "Sending message to peer"); + self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); } - debug!("Published message: {:?}", &msg_id); + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { metrics.register_published_message(&topic_hash); @@ -799,9 +757,9 @@ where (raw_message.clone(), originating_peers) } None => { - warn!( - "Message not in cache. Ignoring forwarding. Message Id: {}", - msg_id + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" ); if let Some(metrics) = self.metrics.as_mut() { metrics.memcache_miss(); @@ -846,14 +804,14 @@ where } Ok(true) } else { - warn!("Rejected message not in cache. Message Id: {}", msg_id); + tracing::warn!(message=%msg_id, "Rejected message not in cache"); Ok(false) } } /// Adds a new peer to the list of explicitly connected peers. pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Adding explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Adding explicit peer"); self.explicit_peers.insert(*peer_id); @@ -863,7 +821,7 @@ where /// This removes the peer from explicitly connected peers, note that this does not disconnect /// the peer. pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Removing explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Removing explicit peer"); self.explicit_peers.remove(peer_id); } @@ -871,14 +829,14 @@ where /// created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.insert(*peer_id) { - debug!("Peer has been blacklisted: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); } } /// Removes a peer from the blacklist if it has previously been blacklisted. pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.remove(peer_id) { - debug!("Peer has been removed from the blacklist: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); } } @@ -947,11 +905,11 @@ where /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. fn join(&mut self, topic_hash: &TopicHash) { - debug!("Running JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); // if we are already in the mesh, return if self.mesh.contains_key(topic_hash) { - debug!("JOIN: The topic is already in the mesh, ignoring JOIN"); + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); return; } @@ -964,9 +922,9 @@ where // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, // removing the fanout entry. if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { - debug!( - "JOIN: Removing peers from the fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" ); // remove explicit peers, peers with negative scores, and backoffed peers @@ -979,11 +937,12 @@ where // Add up to mesh_n of them them to the mesh // NOTE: These aren't randomly added, currently FIFO let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); - debug!( - "JOIN: Adding {:?} peers from the fanout for topic: {:?}", - add_peers, topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers ); - added_peers.extend(peers.iter().cloned().take(add_peers)); + added_peers.extend(peers.iter().take(add_peers)); self.mesh.insert( topic_hash.clone(), @@ -1016,7 +975,7 @@ where ); added_peers.extend(new_peers.clone()); // add them to the mesh - debug!( + tracing::debug!( "JOIN: Inserting {:?} random peers into the mesh", new_peers.len() ); @@ -1031,7 +990,7 @@ where for peer_id in added_peers { // Send a GRAFT control message - debug!("JOIN: Sending Graft message to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(&peer_id, topic_hash.clone()); } @@ -1059,7 +1018,7 @@ where m.set_mesh_peers(topic_hash, mesh_peers) } - debug!("Completed JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); } /// Creates a PRUNE gossipsub action. @@ -1076,7 +1035,7 @@ where match self.connected_peers.get(peer).map(|v| &v.kind) { Some(PeerKind::Floodsub) => { - error!("Attempted to prune a Floodsub peer"); + tracing::error!("Attempted to prune a Floodsub peer"); } Some(PeerKind::Gossipsub) => { // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway @@ -1087,7 +1046,7 @@ where }; } None => { - error!("Attempted to Prune an unknown peer"); + tracing::error!("Attempted to Prune an unknown peer"); } _ => {} // Gossipsub 1.1 peer perform the `Prune` } @@ -1126,7 +1085,7 @@ where /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. fn leave(&mut self, topic_hash: &TopicHash) { - debug!("Running LEAVE for topic {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); // If our mesh contains the topic, send prune to peers and delete it from the mesh if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { @@ -1135,7 +1094,7 @@ where } for peer in peers { // Send a PRUNE control message - debug!("LEAVE: Sending PRUNE to peer: {:?}", peer); + tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); let on_unsubscribe = true; let control = self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); @@ -1152,14 +1111,14 @@ where ); } } - debug!("Completed LEAVE for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); } /// Checks if the given peer is still connected and if not dials the peer again. fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { if !self.peer_topics.contains_key(peer_id) { // Connect to peer - debug!("Connecting to explicit peer {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); self.events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer_id).build(), }); @@ -1197,9 +1156,10 @@ where fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { // We ignore IHAVE gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IHAVE: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" ); return; } @@ -1208,25 +1168,27 @@ where let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); *peer_have += 1; if *peer_have > self.config.max_ihave_messages() { - debug!( - "IHAVE: peer {} has advertised too many times ({}) within this heartbeat \ + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ interval; ignoring", - peer_id, *peer_have + *peer_have ); return; } if let Some(iasked) = self.count_sent_iwant.get(peer_id) { if *iasked >= self.config.max_ihave_length() { - debug!( - "IHAVE: peer {} has already advertised too many messages ({}); ignoring", - peer_id, *iasked + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked ); return; } } - trace!("Handling IHAVE for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); let mut iwant_ids = HashSet::new(); @@ -1248,9 +1210,9 @@ where for (topic, ids) in ihave_msgs { // only process the message if we are subscribed if !self.mesh.contains_key(&topic) { - debug!( - "IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?}", - topic + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" ); continue; } @@ -1274,11 +1236,11 @@ where } // Send the list of IWANT control messages - debug!( - "IHAVE: Asking for {} out of {} messages from {}", + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", iask, - iwant_ids.len(), - peer_id + iwant_ids.len() ); // Ask in random order @@ -1301,9 +1263,9 @@ where Instant::now() + self.config.iwant_followup_time(), ); } - trace!( - "IHAVE: Asking for the following messages from {}: {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", iwant_ids_vec ); @@ -1315,7 +1277,7 @@ where }, ); } - trace!("Completed IHAVE handling for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); } /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is @@ -1323,68 +1285,43 @@ where fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { // We ignore IWANT gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IWANT: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score ); return; } - debug!("Handling IWANT for peer: {:?}", peer_id); - // build a hashmap of available messages - let mut cached_messages = HashMap::new(); + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); for id in iwant_msgs { - // If we have it and the IHAVE count is not above the threshold, add it do the - // cached_messages mapping - if let Some((msg, count)) = self.mcache.get_with_iwant_counts(&id, peer_id) { + // If we have it and the IHAVE count is not above the threshold, + // foward the message. + if let Some((msg, count)) = self + .mcache + .get_with_iwant_counts(&id, peer_id) + .map(|(msg, count)| (msg.clone(), count)) + { if count > self.config.gossip_retransimission() { - debug!( - "IWANT: Peer {} has asked for message {} too many times; ignoring \ - request", - peer_id, &id + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" ); } else { - cached_messages.insert(id.clone(), msg.clone()); + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); + self.send_message(*peer_id, RpcOut::Forward(msg)); } } } - - if !cached_messages.is_empty() { - debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); - // Send the messages to the peer - let message_list: Vec<_> = cached_messages.into_iter().map(|entry| entry.1).collect(); - - let topics = message_list - .iter() - .map(|message| message.topic.clone()) - .collect::>(); - - let message = Rpc { - subscriptions: Vec::new(), - messages: message_list, - control_msgs: Vec::new(), - } - .into_protobuf(); - - let msg_bytes = message.get_size(); - - if self.send_message(*peer_id, message).is_err() { - error!("Failed to send cached messages. Messages too large"); - } else if let Some(m) = self.metrics.as_mut() { - // Sending of messages succeeded, register them on the internal metrics. - for topic in topics.iter() { - m.msg_sent(topic, msg_bytes); - } - } - } - debug!("Completed IWANT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); } /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, /// responds with PRUNE messages. fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling GRAFT message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); let mut to_prune_topics = HashSet::new(); @@ -1405,7 +1342,7 @@ where // we don't GRAFT to/from explicit peers; complain loudly if this happens if self.explicit_peers.contains(peer_id) { - warn!("GRAFT: ignoring request from direct peer {}", peer_id); + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics to_prune_topics = topics.into_iter().collect(); // but don't PX @@ -1417,9 +1354,10 @@ where if let Some(peers) = self.mesh.get_mut(&topic_hash) { // if the peer is already in the mesh ignore the graft if peers.contains(peer_id) { - debug!( - "GRAFT: Received graft for peer {:?} that is already in topic {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" ); continue; } @@ -1428,9 +1366,9 @@ where if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) { if backoff_time > now { - warn!( - "[Penalty] Peer attempted graft within backoff time, penalizing {}", - peer_id + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" ); // add behavioural penalty if let Some((peer_score, ..)) = &mut self.peer_score { @@ -1461,10 +1399,11 @@ where // check the score if below_zero { // we don't GRAFT peers with negative score - debug!( - "GRAFT: ignoring peer {:?} with negative score [score = {}, \ - topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" ); // we do send them PRUNE however, because it's a matter of protocol correctness to_prune_topics.insert(topic_hash.clone()); @@ -1483,9 +1422,10 @@ where } // add peer to the mesh - debug!( - "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" ); if peers.insert(*peer_id) { @@ -1510,9 +1450,10 @@ where } else { // don't do PX when there is an unknown topic to avoid leaking our peers do_px = false; - debug!( - "GRAFT: Received graft for unknown topic {:?} from peer {:?}", - &topic_hash, peer_id + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" ); // spam hardening: ignore GRAFTs for unknown topics continue; @@ -1523,29 +1464,20 @@ where if !to_prune_topics.is_empty() { // build the prune messages to send let on_unsubscribe = false; - let prune_messages = to_prune_topics + for action in to_prune_topics .iter() .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) - .collect(); + .collect::>() + { + self.send_message(*peer_id, RpcOut::Control(action)); + } // Send the prune messages to the peer - debug!( - "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {}", - peer_id + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" ); - - if let Err(e) = self.send_message( - *peer_id, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: prune_messages, - } - .into_protobuf(), - ) { - error!("Failed to send PRUNE: {:?}", e); - } } - debug!("Completed GRAFT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } fn remove_peer_from_mesh( @@ -1560,10 +1492,10 @@ where if let Some(peers) = self.mesh.get_mut(topic_hash) { // remove the peer if it exists in the mesh if peers.remove(peer_id) { - debug!( - "PRUNE: Removing peer: {} from the mesh for topic: {}", - peer_id.to_string(), - topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_removed(topic_hash, reason, 1) @@ -1603,7 +1535,7 @@ where peer_id: &PeerId, prune_data: Vec<(TopicHash, Vec, Option)>, ) { - debug!("Handling PRUNE message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); let (below_threshold, score) = self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); for (topic_hash, px, backoff) in prune_data { @@ -1614,10 +1546,11 @@ where if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { - debug!( - "PRUNE: ignoring PX from peer {:?} with insufficient score \ - [score ={} topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" ); continue; } @@ -1634,7 +1567,7 @@ where } } } - debug!("Completed PRUNE handling for peer: {}", peer_id.to_string()); + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); } fn px_connect(&mut self, mut px: Vec) { @@ -1674,17 +1607,17 @@ where raw_message: &mut RawMessage, propagation_source: &PeerId, ) -> bool { - debug!( - "Handling message: {:?} from peer: {}", - msg_id, - propagation_source.to_string() + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" ); // Reject any message from a blacklisted peer if self.blacklisted_peers.contains(propagation_source) { - debug!( - "Rejecting message from blacklisted peer: {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" ); if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { peer_score.reject_message( @@ -1701,9 +1634,10 @@ where // Also reject any message that originated from a blacklisted peer if let Some(source) = raw_message.source.as_ref() { if self.blacklisted_peers.contains(source) { - debug!( - "Rejecting message from peer {} because of blacklisted source: {}", - propagation_source, source + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" ); self.handle_invalid_message( propagation_source, @@ -1731,9 +1665,10 @@ where }; if self_published { - debug!( - "Dropping message {} claiming to be from self but forwarded from {}", - msg_id, propagation_source + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" ); self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); return false; @@ -1755,36 +1690,11 @@ where metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); } - let fast_message_id = self.config.fast_message_id(&raw_message); - - if let Some(fast_message_id) = fast_message_id.as_ref() { - if let Some(msg_id) = self.fast_message_id_cache.get(fast_message_id) { - let msg_id = msg_id.clone(); - // Report the duplicate - if self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { - if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.duplicated_message( - propagation_source, - &msg_id, - &raw_message.topic, - ); - } - // Update the cache, informing that we have received a duplicate from another peer. - // The peers in this cache are used to prevent us forwarding redundant messages onto - // these peers. - self.mcache.observe_duplicate(&msg_id, propagation_source); - } - - // This message has been seen previously. Ignore it - return; - } - } - // Try and perform the data transform to the message. If it fails, consider it invalid. let message = match self.data_transform.inbound_transform(raw_message.clone()) { Ok(message) => message, Err(e) => { - debug!("Invalid message. Transform error: {:?}", e); + tracing::debug!("Invalid message. Transform error: {:?}", e); // Reject the message and return self.handle_invalid_message( propagation_source, @@ -1805,25 +1715,17 @@ where return; } - // Add the message to the duplicate caches - if let Some(fast_message_id) = fast_message_id { - // add id to cache - self.fast_message_id_cache - .entry(fast_message_id) - .or_insert_with(|| msg_id.clone()); - } - if !self.duplicate_cache.insert(msg_id.clone()) { - debug!("Message already received, ignoring. Message: {}", msg_id); + tracing::debug!(message=%msg_id, "Message already received, ignoring"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); return; } - debug!( - "Put message {:?} in duplicate_cache and resolve promises", - msg_id + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" ); // Record the received message with the metrics @@ -1843,7 +1745,7 @@ where // Dispatch the message to the user if we are subscribed to any of the topics if self.mesh.contains_key(&message.topic) { - debug!("Sending received message to user"); + tracing::debug!("Sending received message to user"); self.events .push_back(ToSwarm::GenerateEvent(Event::Message { propagation_source: *propagation_source, @@ -1851,9 +1753,9 @@ where message, })); } else { - debug!( - "Received message on a topic we are not subscribed to: {:?}", - message.topic + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" ); return; } @@ -1869,9 +1771,9 @@ where ) .is_err() { - error!("Failed to forward message. Too large"); + tracing::error!("Failed to forward message. Too large"); } - debug!("Completed message handling for message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Completed message handling for message"); } } @@ -1887,20 +1789,17 @@ where metrics.register_invalid_message(&raw_message.topic); } - let fast_message_id_cache = &self.fast_message_id_cache; + if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { + let message_id = self.config.message_id(&message); - if let Some(msg_id) = self - .config - .fast_message_id(raw_message) - .and_then(|id| fast_message_id_cache.get(&id)) - { peer_score.reject_message( propagation_source, - msg_id, - &raw_message.topic, + &message_id, + &message.topic, reject_reason, ); - gossip_promises.reject_message(msg_id, &reject_reason); + + gossip_promises.reject_message(&message_id, &reject_reason); } else { // The message is invalid, we reject it ignoring any gossip promises. If a peer is // advertising this message via an IHAVE and it's invalid it will be double @@ -1916,23 +1815,20 @@ where subscriptions: &[Subscription], propagation_source: &PeerId, ) { - debug!( - "Handling subscriptions: {:?}, from source: {}", + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", subscriptions, - propagation_source.to_string() ); let mut unsubscribed_peers = Vec::new(); - let subscribed_topics = match self.peer_topics.get_mut(propagation_source) { - Some(topics) => topics, - None => { - error!( - "Subscription by unknown peer: {}", - propagation_source.to_string() - ); - return; - } + let Some(subscribed_topics) = self.peer_topics.get_mut(propagation_source) else { + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" + ); + return; }; // Collect potential graft topics for the peer. @@ -1947,10 +1843,10 @@ where { Ok(topics) => topics, Err(s) => { - error!( - "Subscription filter error: {}; ignoring RPC from peer {}", - s, - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s ); return; } @@ -1964,10 +1860,10 @@ where match subscription.action { SubscriptionAction::Subscribe => { if peer_list.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" ); } @@ -1996,19 +1892,19 @@ where if peers.len() < self.config.mesh_n_low() && peers.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Subscribed, 1) } // send graft to the peer - debug!( - "Sending GRAFT to peer {} for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" ); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(propagation_source, topic_hash.clone()); @@ -2025,10 +1921,10 @@ where } SubscriptionAction::Unsubscribe => { if peer_list.remove(propagation_source) { - debug!( - "SUBSCRIPTION: Removing gossip peer: {} from topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" ); } @@ -2068,23 +1964,12 @@ where // If we need to send grafts to peer, do so immediately, rather than waiting for the // heartbeat. - if !topics_to_graft.is_empty() - && self - .send_message( - *propagation_source, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: topics_to_graft - .into_iter() - .map(|topic_hash| ControlAction::Graft { topic_hash }) - .collect(), - } - .into_protobuf(), - ) - .is_err() + for action in topics_to_graft + .into_iter() + .map(|topic_hash| ControlAction::Graft { topic_hash }) + .collect::>() { - error!("Failed sending grafts. Message too large"); + self.send_message(*propagation_source, RpcOut::Control(action)) } // Notify the application of the subscriptions @@ -2092,9 +1977,9 @@ where self.events.push_back(event); } - trace!( - "Completed handling subscriptions from source: {:?}", - propagation_source + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" ); } @@ -2112,7 +1997,7 @@ where /// Heartbeat function which shifts the memcache and updates the mesh. fn heartbeat(&mut self) { - debug!("Starting heartbeat"); + tracing::debug!("Starting heartbeat"); let start = Instant::now(); self.heartbeat_ticks += 1; @@ -2168,10 +2053,11 @@ where } if peer_score < 0.0 { - debug!( - "HEARTBEAT: Prune peer {:?} with negative score [score = {}, topic = \ - {}]", - peer_id, peer_score, topic_hash + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" ); let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); @@ -2191,9 +2077,9 @@ where // too little peers - add some if peers.len() < self.config.mesh_n_low() { - debug!( - "HEARTBEAT: Mesh low. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_low() ); @@ -2216,7 +2102,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) } @@ -2225,9 +2111,9 @@ where // too many peers - remove some if peers.len() > self.config.mesh_n_high() { - debug!( - "HEARTBEAT: Mesh high. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_high() ); @@ -2235,7 +2121,7 @@ where // shuffle the peers and then sort by score ascending beginning with the worst let mut rng = thread_rng(); - let mut shuffled = peers.iter().cloned().collect::>(); + let mut shuffled = peers.iter().copied().collect::>(); shuffled.shuffle(&mut rng); shuffled.sort_by(|p1, p2| { let score_p1 = *scores.get(p1).unwrap_or(&0.0); @@ -2310,7 +2196,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) } @@ -2377,9 +2263,10 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!( - "Opportunistically graft in topic {} with peers {:?}", - topic_hash, peer_list + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) @@ -2400,9 +2287,9 @@ where let fanout_ttl = self.config.fanout_ttl(); self.fanout_last_pub.retain(|topic_hash, last_pub_time| { if *last_pub_time + fanout_ttl < Instant::now() { - debug!( - "HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" ); fanout.remove(topic_hash); return false; @@ -2425,9 +2312,9 @@ where match self.peer_topics.get(peer) { Some(topics) => { if !topics.contains(topic_hash) || peer_score < publish_threshold { - debug!( - "HEARTBEAT: Peer removed from fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" ); to_remove_peers.push(*peer); } @@ -2444,7 +2331,7 @@ where // not enough peers if peers.len() < self.config.mesh_n() { - debug!( + tracing::debug!( "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", peers.len(), self.config.mesh_n() @@ -2467,7 +2354,7 @@ where } if self.peer_score.is_some() { - trace!("Mesh message deliveries: {:?}", { + tracing::trace!("Mesh message deliveries: {:?}", { self.mesh .iter() .map(|(t, peers)| { @@ -2506,7 +2393,7 @@ where // shift the memcache self.mcache.shift(); - debug!("Completed Heartbeat"); + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); metrics.observe_heartbeat_duration(duration); @@ -2526,7 +2413,7 @@ where // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list if message_ids.len() > self.config.max_ihave_length() { // we do the truncation (with shuffling) per peer below - debug!( + tracing::debug!( "too many messages for gossip; will truncate IHAVE list ({} messages)", message_ids.len() ); @@ -2555,7 +2442,7 @@ where }, ); - debug!("Gossiping IHAVE to {} peers.", to_msg_peers.len()); + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); for peer in to_msg_peers { let mut peer_message_ids = message_ids.clone(); @@ -2608,12 +2495,9 @@ where &self.connected_peers, ); } - let mut control_msgs: Vec = topics - .iter() - .map(|topic_hash| ControlAction::Graft { - topic_hash: topic_hash.clone(), - }) - .collect(); + let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { + topic_hash: topic_hash.clone(), + }); // If there are prunes associated with the same peer add them. // NOTE: In this case a peer has been added to a topic mesh, and removed from another. @@ -2621,52 +2505,37 @@ where // of its removal from another. // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; - if let Some(topics) = to_prune.remove(&peer) { - let mut prunes = topics - .iter() - .map(|topic_hash| { - self.make_prune( - topic_hash, - &peer, - self.config.do_px() && !no_px.contains(&peer), - on_unsubscribe, - ) - }) - .collect::>(); - control_msgs.append(&mut prunes); - } + let prunes = to_prune + .remove(&peer) + .into_iter() + .flatten() + .map(|topic_hash| { + self.make_prune( + &topic_hash, + &peer, + self.config.do_px() && !no_px.contains(&peer), + false, + ) + }); // send the control messages - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send control messages. Message too large"); + for msg in control_msgs.chain(prunes).collect::>() { + self.send_message(peer, RpcOut::Control(msg)); } } // handle the remaining prunes // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; for (peer, topics) in to_prune.iter() { - let mut remaining_prunes = Vec::new(); for topic_hash in topics { let prune = self.make_prune( topic_hash, peer, self.config.do_px() && !no_px.contains(peer), - on_unsubscribe, + false, ); - remaining_prunes.push(prune); + self.send_message(*peer, RpcOut::Control(prune)); + // inform the handler peer_removed_from_mesh( *peer, @@ -2677,21 +2546,6 @@ where &self.connected_peers, ); } - - if self - .send_message( - *peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: remaining_prunes, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send prune messages. Message too large"); - } } } @@ -2712,7 +2566,7 @@ where } } - debug!("Forwarding message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Forwarding message"); let mut recipient_peers = HashSet::new(); { @@ -2748,22 +2602,13 @@ where // forward the message to peers if !recipient_peers.is_empty() { - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); + let event = RpcOut::Forward(message.clone()); - let msg_bytes = event.get_size(); for peer in recipient_peers.iter() { - debug!("Sending message: {:?} to peer {:?}", msg_id, peer); - self.send_message(*peer, event.clone())?; - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&message.topic, msg_bytes); - } + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); + self.send_message(*peer, event.clone()); } - debug!("Completed forwarding message"); + tracing::debug!("Completed forwarding message"); Ok(true) } else { Ok(false) @@ -2787,7 +2632,7 @@ where let signature = { let message = proto::Message { - from: Some(author.clone().to_bytes()), + from: Some(author.to_bytes()), data: Some(data.clone()), seqno: Some(sequence_number.to_be_bytes().to_vec()), topic: topic.clone().into_string(), @@ -2874,19 +2719,8 @@ where /// Takes each control action mapping and turns it into a message fn flush_control_pool(&mut self) { for (peer, controls) in self.control_pool.drain().collect::>() { - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: controls, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to flush control pool. Message too large"); + for msg in controls { + self.send_message(peer, RpcOut::Control(msg)); } } @@ -2894,144 +2728,21 @@ where self.pending_iwant_msgs.clear(); } - /// Send a [`Rpc`] message to a peer. This will wrap the message in an arc if it + /// Send a [`RpcOut`] message to a peer. This will wrap the message in an arc if it /// is not already an arc. - fn send_message(&mut self, peer_id: PeerId, message: proto::RPC) -> Result<(), PublishError> { - // If the message is oversized, try and fragment it. If it cannot be fragmented, log an - // error and drop the message (all individual messages should be small enough to fit in the - // max_transmit_size) - - let messages = self.fragment_message(message)?; - - for message in messages { - self.events.push_back(ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(message), - handler: NotifyHandler::Any, - }) - } - Ok(()) - } - - // If a message is too large to be sent as-is, this attempts to fragment it into smaller RPC - // messages to be sent. - fn fragment_message(&self, rpc: proto::RPC) -> Result, PublishError> { - if rpc.get_size() < self.config.max_transmit_size() { - return Ok(vec![rpc]); - } - - let new_rpc = proto::RPC { - subscriptions: Vec::new(), - publish: Vec::new(), - control: None, - }; - - let mut rpc_list = vec![new_rpc.clone()]; - - // Gets an RPC if the object size will fit, otherwise create a new RPC. The last element - // will be the RPC to add an object. - macro_rules! create_or_add_rpc { - ($object_size: ident ) => { - let list_index = rpc_list.len() - 1; // the list is never empty - - // create a new RPC if the new object plus 5% of its size (for length prefix - // buffers) exceeds the max transmit size. - if rpc_list[list_index].get_size() + (($object_size as f64) * 1.05) as usize - > self.config.max_transmit_size() - && rpc_list[list_index] != new_rpc - { - // create a new rpc and use this as the current - rpc_list.push(new_rpc.clone()); - } - }; - } - - macro_rules! add_item { - ($object: ident, $type: ident ) => { - let object_size = $object.get_size(); - - if object_size + 2 > self.config.max_transmit_size() { - // This should not be possible. All received and published messages have already - // been vetted to fit within the size. - error!("Individual message too large to fragment"); - return Err(PublishError::MessageTooLarge); - } - - create_or_add_rpc!(object_size); - rpc_list - .last_mut() - .expect("Must have at least one element") - .$type - .push($object.clone()); - }; - } - - // Add messages until the limit - for message in &rpc.publish { - add_item!(message, publish); - } - for subscription in &rpc.subscriptions { - add_item!(subscription, subscriptions); - } - - // handle the control messages. If all are within the max_transmit_size, send them without - // fragmenting, otherwise, fragment the control messages - let empty_control = proto::ControlMessage::default(); - if let Some(control) = rpc.control.as_ref() { - if control.get_size() + 2 > self.config.max_transmit_size() { - // fragment the RPC - for ihave in &control.ihave { - let len = ihave.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .ihave - .push(ihave.clone()); - } - for iwant in &control.iwant { - let len = iwant.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .iwant - .push(iwant.clone()); - } - for graft in &control.graft { - let len = graft.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .graft - .push(graft.clone()); - } - for prune in &control.prune { - let len = prune.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .prune - .push(prune.clone()); - } - } else { - let len = control.get_size(); - create_or_add_rpc!(len); - rpc_list.last_mut().expect("Always an element").control = Some(control.clone()); + fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) { + if let Some(m) = self.metrics.as_mut() { + if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { + // register bytes sent on the internal metrics. + m.msg_sent(&message.topic, message.raw_protobuf_len()); } } - Ok(rpc_list) + self.events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::Message(rpc), + handler: NotifyHandler::Any, + }); } fn on_connection_established( @@ -3058,9 +2769,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3080,46 +2791,27 @@ where .connections .push(connection_id); - if other_established == 0 { - // Ignore connections from blacklisted peers. - if self.blacklisted_peers.contains(&peer_id) { - debug!("Ignoring connection from blacklisted peer: {}", peer_id); - } else { - debug!("New peer connected: {}", peer_id); - // We need to send our subscriptions to the newly-connected node. - let mut subscriptions = vec![]; - for topic_hash in self.mesh.keys() { - subscriptions.push(Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }); - } + if other_established > 0 { + return; // Not our first connection to this peer, hence nothing to do. + } - if !subscriptions.is_empty() { - // send our subscriptions to the peer - if self - .send_message( - peer_id, - Rpc { - messages: Vec::new(), - subscriptions, - control_msgs: Vec::new(), - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send subscriptions, message too large"); - } - } - } + // Insert an empty set of the topics of this peer until known. + self.peer_topics.insert(peer_id, Default::default()); - // Insert an empty set of the topics of this peer until known. - self.peer_topics.insert(peer_id, Default::default()); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.add_peer(peer_id); + } - if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.add_peer(peer_id); - } + // Ignore connections from blacklisted peers. + if self.blacklisted_peers.contains(&peer_id) { + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); + return; + } + + tracing::debug!(peer=%peer_id, "New peer connected"); + // We need to send our subscriptions to the newly-connected node. + for topic_hash in self.mesh.clone().into_keys() { + self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); } } @@ -3131,16 +2823,16 @@ where endpoint, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { // Remove IP from peer scoring system if let Some((peer_score, ..)) = &mut self.peer_score { if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3177,17 +2869,14 @@ where } } else { // remove from mesh, topic_peers, peer_topic and the fanout - debug!("Peer disconnected: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer disconnected"); { - let topics = match self.peer_topics.get(&peer_id) { - Some(topics) => topics, - None => { - debug_assert!( - self.blacklisted_peers.contains(&peer_id), - "Disconnected node not in connected list" - ); - return; - } + let Some(topics) = self.peer_topics.get(&peer_id) else { + debug_assert!( + self.blacklisted_peers.contains(&peer_id), + "Disconnected node not in connected list" + ); + return; }; // remove peer from all mappings @@ -3207,18 +2896,19 @@ where if let Some(peer_list) = self.topic_peers.get_mut(topic) { if !peer_list.remove(&peer_id) { // debugging purposes - warn!( - "Disconnected node: {} not in topic_peers peer list", - peer_id + tracing::warn!( + peer=%peer_id, + "Disconnected node: peer not in topic_peers" ); } if let Some(m) = self.metrics.as_mut() { m.set_topic_peers(topic, peer_list.len()) } } else { - warn!( - "Disconnected node: {} with topic: {:?} not in topic_peers", - &peer_id, &topic + tracing::warn!( + peer=%peer_id, + topic=%topic, + "Disconnected node: peer with topic not in topic_peers" ); } @@ -3270,18 +2960,18 @@ where if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - &peer_id, + tracing::trace!( + peer=%&peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_old ) } if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_new ) } @@ -3312,10 +3002,7 @@ where _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new( - self.config.protocol_config(), - self.config.idle_timeout(), - )) + Ok(Handler::new(self.config.protocol_config())) } fn handle_established_outbound_connection( @@ -3325,10 +3012,7 @@ where _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new( - self.config.protocol_config(), - self.config.idle_timeout(), - )) + Ok(Handler::new(self.config.protocol_config())) } fn on_connection_handler_event( @@ -3346,9 +3030,9 @@ where } if let PeerKind::NotSupported = kind { - debug!( - "Peer does not support gossipsub protocols. {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Peer does not support gossipsub protocols" ); self.events .push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { @@ -3358,9 +3042,10 @@ where // Only change the value if the old value is Floodsub (the default set in // `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished). // All other PeerKind changes are ignored. - debug!( - "New peer type found: {} for peer: {}", - kind, propagation_source + tracing::debug!( + peer=%propagation_source, + peer_type=%kind, + "New peer type found for peer" ); if let PeerKind::Floodsub = conn.kind { conn.kind = kind; @@ -3383,7 +3068,7 @@ where if let (true, _) = self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) { - debug!("RPC Dropped from greylisted peer {}", propagation_source); + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); return; } @@ -3399,11 +3084,11 @@ where } else { // log the invalid messages for (message, validation_error) in invalid_messages { - warn!( - "Invalid message. Reason: {:?} propagation_peer {} source {:?}", + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", validation_error, - propagation_source.to_string(), - message.source ); } } @@ -3414,7 +3099,7 @@ where if self.config.max_messages_per_rpc().is_some() && Some(count) >= self.config.max_messages_per_rpc() { - warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); break; } self.handle_received_message(raw_message, &propagation_source); @@ -3457,10 +3142,10 @@ where } } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); @@ -3480,7 +3165,7 @@ where Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -3489,16 +3174,7 @@ where self.on_connection_closed(connection_closed) } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } @@ -3560,7 +3236,7 @@ fn peer_removed_from_mesh( .get(&peer_id) .expect("To be connected to peer.") .connections - .get(0) + .first() .expect("There should be at least one connection to a peer."); if let Some(topics) = known_topics { @@ -3598,7 +3274,7 @@ fn get_random_peers_dynamic( // if they exist, filter the peers by `f` Some(peer_list) => peer_list .iter() - .cloned() + .copied() .filter(|p| { f(p) && match connected_peers.get(p) { Some(connections) if connections.kind == PeerKind::Gossipsub => true, @@ -3613,7 +3289,7 @@ fn get_random_peers_dynamic( // if we have less than needed, return them let n = n_map(gossip_peers.len()); if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); return gossip_peers.into_iter().collect(); } @@ -3621,7 +3297,7 @@ fn get_random_peers_dynamic( let mut rng = thread_rng(); gossip_peers.partial_shuffle(&mut rng, n); - debug!("RANDOM PEERS: Got {:?} peers", n); + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); gossip_peers.into_iter().take(n).collect() } @@ -3705,17 +3381,8 @@ impl fmt::Debug for PublishConfig { mod local_test { use super::*; use crate::IdentTopic; - use asynchronous_codec::Encoder; use quickcheck::*; - fn empty_rpc() -> Rpc { - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: Vec::new(), - } - } - fn test_message() -> RawMessage { RawMessage { source: Some(PeerId::random()), @@ -3728,13 +3395,6 @@ mod local_test { } } - fn test_subscription() -> Subscription { - Subscription { - action: SubscriptionAction::Subscribe, - topic_hash: IdentTopic::new("TestTopic").hash(), - } - } - fn test_control() -> ControlAction { ControlAction::IHave { topic_hash: IdentTopic::new("TestTopic").hash(), @@ -3742,119 +3402,16 @@ mod local_test { } } - impl Arbitrary for Rpc { + impl Arbitrary for RpcOut { fn arbitrary(g: &mut Gen) -> Self { - let mut rpc = empty_rpc(); - - for _ in 0..g.gen_range(0..10u8) { - rpc.subscriptions.push(test_subscription()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.messages.push(test_message()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.control_msgs.push(test_control()); - } - rpc - } - } - - #[test] - /// Tests RPC message fragmentation - fn test_message_fragmentation_deterministic() { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - // Message under the limit should be fine. - let mut rpc = empty_rpc(); - rpc.messages.push(test_message()); - - let mut rpc_proto = rpc.clone().into_protobuf(); - let fragmented_messages = gs.fragment_message(rpc_proto.clone()).unwrap(); - assert_eq!( - fragmented_messages, - vec![rpc_proto.clone()], - "Messages under the limit shouldn't be fragmented" - ); - - // Messages over the limit should be split - - while rpc_proto.get_size() < max_transmit_size { - rpc.messages.push(test_message()); - rpc_proto = rpc.clone().into_protobuf(); - } - - let fragmented_messages = gs - .fragment_message(rpc_proto) - .expect("Should be able to fragment the messages"); - - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size" - ); - } - } - - #[test] - fn test_message_fragmentation() { - fn prop(rpc: Rpc) { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - let mut length_codec = unsigned_varint::codec::UviBytes::default(); - length_codec.set_max_len(max_transmit_size); - let mut codec = - crate::protocol::GossipsubCodec::new(length_codec, ValidationMode::Permissive); - - let rpc_proto = rpc.into_protobuf(); - let fragmented_messages = gs - .fragment_message(rpc_proto.clone()) - .expect("Messages must be valid"); - - if rpc_proto.get_size() < max_transmit_size { - assert_eq!( - fragmented_messages.len(), - 1, - "the message should not be fragmented" - ); - } else { - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - } - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size: list size {} max size{}", message.get_size(), max_transmit_size - ); - - // ensure they can all be encoded - let mut buf = bytes::BytesMut::with_capacity(message.get_size()); - codec.encode(message, &mut buf).unwrap() + match u8::arbitrary(g) % 5 { + 0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), + 1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), + 2 => RpcOut::Publish(test_message()), + 3 => RpcOut::Forward(test_message()), + 4 => RpcOut::Control(test_control()), + _ => panic!("outside range"), } } - QuickCheck::new() - .max_tests(100) - .quickcheck(prop as fn(_) -> _) } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index b2414fd7afc8..2bf1c90c5c88 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -21,20 +21,16 @@ // Collection of tests for the gossipsub network behaviour use super::*; -use crate::protocol::ProtocolConfig; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::transform::{DataTransform, IdentityTransform}; -use crate::types::FastMessageId; use crate::ValidationError; use crate::{ - config::Config, config::ConfigBuilder, IdentTopic as Topic, Message, TopicScoreParams, + config::Config, config::ConfigBuilder, types::Rpc, IdentTopic as Topic, TopicScoreParams, }; use async_std::net::Ipv4Addr; use byteorder::{BigEndian, ByteOrder}; use libp2p_core::{ConnectedPoint, Endpoint}; use rand::Rng; -use std::collections::hash_map::DefaultHasher; -use std::hash::{Hash, Hasher}; use std::thread::sleep; use std::time::Duration; @@ -166,7 +162,7 @@ fn inject_nodes1() -> InjectNodes fn add_peer( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, ) -> PeerId @@ -179,7 +175,7 @@ where fn add_peer_with_addr( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, address: Multiaddr, @@ -200,7 +196,7 @@ where fn add_peer_with_addr_and_kind( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, address: Multiaddr, @@ -272,13 +268,10 @@ where for connection_id in peer_connections.connections.clone() { active_connections = active_connections.checked_sub(1).unwrap(); - let dummy_handler = Handler::new(ProtocolConfig::default(), Duration::ZERO); - gs.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id: *peer_id, connection_id, endpoint: &fake_endpoint, - handler: dummy_handler, remaining_established: active_connections, })); } @@ -411,26 +404,19 @@ fn test_subscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), - .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; + .filter(|e| { + matches!( + e, + ToSwarm::NotifyHandler { + event: HandlerIn::Message(RpcOut::Subscribe(_)), + .. } - collected_subscriptions - } - _ => collected_subscriptions, - }); + ) + }) + .count(); // we sent a subscribe to all known peers - assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" - ); + assert_eq!(subscriptions, 20); } #[test] @@ -479,26 +465,16 @@ fn test_unsubscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { + .fold(0, |collected_subscriptions, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Subscribe(_)), .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions - } + } => collected_subscriptions + 1, _ => collected_subscriptions, }); // we sent a unsubscribe to all known peers, for two topics - assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" - ); + assert_eq!(subscriptions, 40); // check we clean up internal structures for topic_hash in &topic_hashes { @@ -666,16 +642,13 @@ fn test_publish_without_flood_publishing() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -756,16 +729,13 @@ fn test_fanout() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -807,37 +777,36 @@ fn test_inject_connected() { // check that our subscriptions are sent to each of the peers // collect all the SendEvents - let send_events: Vec<_> = gs + let subscriptions = gs .events - .iter() - .filter(|e| match e { + .into_iter() + .filter_map(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Subscribe(topic)), + peer_id, .. - } => !m.subscriptions.is_empty(), - _ => false, + } => Some((peer_id, topic)), + _ => None, }) - .collect(); + .fold( + HashMap::>::new(), + |mut subs, (peer, sub)| { + let mut peer_subs = subs.remove(&peer).unwrap_or_default(); + peer_subs.push(sub.into_string()); + subs.insert(peer, peer_subs); + subs + }, + ); // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - if let ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), - .. - } = sevent - { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - }; + for peer_subs in subscriptions.values() { + assert!(peer_subs.contains(&String::from("topic1"))); + assert!(peer_subs.contains(&String::from("topic2"))); + assert_eq!(peer_subs.len(), 2); } // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); + assert_eq!(subscriptions.len(), 20); // should add the new peers to `peer_topics` with an empty vec as a gossipsub node for peer in peers { @@ -1050,21 +1019,18 @@ fn test_handle_iwant_msg_cached() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { + let sent_messages = gs.events.into_iter().fold( + Vec::::new(), + |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push(c.clone()) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push(message); } collected_messages } _ => collected_messages, - }); + }, + ); assert!( sent_messages @@ -1113,15 +1079,14 @@ fn test_handle_iwant_msg_cached_shifted() { // is the message is being sent? let message_exists = gs.events.iter().any(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(message)), .. } => { - let event = proto_to_message(m); - event - .messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id) + gs.config.message_id( + &gs.data_transform + .inbound_transform(message.clone()) + .unwrap(), + ) == msg_id } _ => false, }); @@ -1352,22 +1317,15 @@ fn count_control_msgs( .sum::() + gs.events .iter() - .map(|e| match e { + .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Control(action)), .. - } => { - let event = proto_to_message(m); - event - .control_msgs - .iter() - .filter(|m| filter(peer_id, m)) - .count() - } - _ => 0, + } => filter(peer_id, action), + _ => false, }) - .sum::() + .count() } fn flush_events(gs: &mut Behaviour) { @@ -1418,7 +1376,7 @@ fn test_explicit_peer_reconnects() { .gs_config(config) .create_network(); - let peer = others.get(0).unwrap(); + let peer = others.first().unwrap(); //add peer as explicit peer gs.add_explicit_peer(peer); @@ -1469,7 +1427,7 @@ fn test_handle_graft_explicit_peer() { .explicit(1) .create_network(); - let peer = peers.get(0).unwrap(); + let peer = peers.first().unwrap(); gs.handle_graft(peer, topic_hashes.clone()); @@ -1576,17 +1534,10 @@ fn do_forward_messages_to_explicit_peers() { .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(m)), .. } => { - let event = proto_to_message(m); - peer_id == &peers[0] - && event - .messages - .iter() - .filter(|m| m.data == message.data) - .count() - > 0 + peer_id == &peers[0] && m.data == message.data } _ => false, }) @@ -2120,14 +2071,11 @@ fn test_flood_publish() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } collected_publish } @@ -2681,14 +2629,11 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { // the messages we are sending let sent_messages = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push((*peer_id, c.clone())) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push((peer_id, message)); } collected_messages } @@ -2829,14 +2774,11 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -2886,14 +2828,11 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -3226,7 +3165,7 @@ fn test_scoring_p1() { ); } -fn random_message(seq: &mut u64, topics: &Vec) -> RawMessage { +fn random_message(seq: &mut u64, topics: &[TopicHash]) -> RawMessage { let mut rng = rand::thread_rng(); *seq += 1; RawMessage { @@ -4088,20 +4027,20 @@ fn test_scoring_p6() { //create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &[], true, true, addr.clone()), ]; //create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), ]; //no penalties yet @@ -4412,17 +4351,14 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { assert_eq!( gs.events .iter() - .map(|e| match e { + .filter(|e| matches!( + e, ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(_)), .. - } => { - let event = proto_to_message(m); - event.messages.len() } - _ => 0, - }) - .sum::(), + )) + .count(), config.gossip_retransimission() as usize, "not more then gossip_retransmission many messages get sent back" ); @@ -4665,7 +4601,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) @@ -4821,11 +4760,8 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -4878,11 +4814,8 @@ fn test_do_not_use_floodsub_in_fanout() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -5064,86 +4997,6 @@ fn test_public_api() { ); } -#[test] -fn test_msg_id_fn_only_called_once_with_fast_message_ids() { - struct Pointers { - slow_counter: u32, - fast_counter: u32, - } - - let mut counters = Pointers { - slow_counter: 0, - fast_counter: 0, - }; - - let counters_pointer: *mut Pointers = &mut counters; - - let counters_address = counters_pointer as u64; - - macro_rules! get_counters_pointer { - ($m: expr) => {{ - let mut address_bytes: [u8; 8] = Default::default(); - address_bytes.copy_from_slice($m.as_slice()); - let address = u64::from_be_bytes(address_bytes); - address as *mut Pointers - }}; - } - - macro_rules! get_counters_and_hash { - ($m: expr) => {{ - let mut hasher = DefaultHasher::new(); - $m.hash(&mut hasher); - let id = hasher.finish().to_be_bytes().into(); - (id, get_counters_pointer!($m)) - }}; - } - - let message_id_fn = |m: &Message| -> MessageId { - let (mut id, counters_pointer): (MessageId, *mut Pointers) = - get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).slow_counter += 1; - } - id.0.reverse(); - id - }; - let fast_message_id_fn = |m: &RawMessage| -> FastMessageId { - let (id, counters_pointer) = get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).fast_counter += 1; - } - id - }; - let config = ConfigBuilder::default() - .message_id_fn(message_id_fn) - .fast_message_id_fn(fast_message_id_fn) - .build() - .unwrap(); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(0) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let message = RawMessage { - source: None, - data: counters_address.to_be_bytes().to_vec(), - sequence_number: None, - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - - for _ in 0..5 { - gs.handle_received_message(message.clone(), &PeerId::random()); - } - - assert_eq!(counters.fast_counter, 5); - assert_eq!(counters.slow_counter, 1); -} - #[test] fn test_subscribe_to_invalid_topic() { let t1 = Topic::new("t1"); @@ -5208,7 +5061,7 @@ fn test_subscribe_and_graft_with_negative_score() { p2, connection_id, HandlerEvent::Message { - rpc: proto_to_message(&message), + rpc: proto_to_message(&message.into_protobuf()), invalid_messages: vec![], }, ); diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index a5d31071538a..7e79912cc4a6 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -22,8 +22,9 @@ use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; +use crate::error::ConfigBuilderError; use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; -use crate::types::{FastMessageId, Message, MessageId, PeerKind, RawMessage}; +use crate::types::{Message, MessageId, PeerKind}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; @@ -74,11 +75,9 @@ pub struct Config { heartbeat_interval: Duration, fanout_ttl: Duration, check_explicit_peers_ticks: u64, - idle_timeout: Duration, duplicate_cache_time: Duration, validate_messages: bool, message_id_fn: Arc MessageId + Send + Sync + 'static>, - fast_message_id_fn: Option FastMessageId + Send + Sync + 'static>>, allow_self_origin: bool, do_px: bool, prune_peers: usize, @@ -183,13 +182,6 @@ impl Config { self.protocol.max_transmit_size } - /// The time a connection is maintained to a peer without being in the mesh and without - /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. - /// Default is 120 seconds. - pub fn idle_timeout(&self) -> Duration { - self.idle_timeout - } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. /// This settings sets the time period that messages are stored in the cache. Duplicates can be /// received if duplicate messages are sent at a time greater than this setting apart. The @@ -225,20 +217,6 @@ impl Config { (self.message_id_fn)(message) } - /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from [`RawMessage`] to - /// [`Message`] for duplicates. Two semantically different messages must always - /// have different fast message ids, but it is allowed that two semantically identical messages - /// have different fast message ids as long as the message_id_fn produces the same id for them. - /// - /// The function takes a [`RawMessage`] as input and outputs a String to be - /// interpreted as the fast message id. Default is None. - pub fn fast_message_id(&self, message: &RawMessage) -> Option { - self.fast_message_id_fn - .as_ref() - .map(|fast_message_id_fn| fast_message_id_fn(message)) - } - /// By default, gossipsub will reject messages that are sent to us that have the same message /// source as we have specified locally. Enabling this, allows these messages and prevents /// penalizing the peer that sent us the message. Default is false. @@ -406,7 +384,6 @@ impl Default for ConfigBuilder { heartbeat_interval: Duration::from_secs(1), fanout_ttl: Duration::from_secs(60), check_explicit_peers_ticks: 300, - idle_timeout: Duration::from_secs(120), duplicate_cache_time: Duration::from_secs(60), validate_messages: false, message_id_fn: Arc::new(|message| { @@ -423,7 +400,6 @@ impl Default for ConfigBuilder { .push_str(&message.sequence_number.unwrap_or_default().to_string()); MessageId::from(source_string) }), - fast_message_id_fn: None, allow_self_origin: false, do_px: false, prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. @@ -601,14 +577,6 @@ impl ConfigBuilder { self } - /// The time a connection is maintained to a peer without being in the mesh and without - /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. - /// Default is 120 seconds. - pub fn idle_timeout(&mut self, idle_timeout: Duration) -> &mut Self { - self.config.idle_timeout = idle_timeout; - self - } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. /// This settings sets the time period that messages are stored in the cache. Duplicates can be /// received if duplicate messages are sent at a time greater than this setting apart. The @@ -650,22 +618,6 @@ impl ConfigBuilder { self } - /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from [`RawMessage`] to - /// [`Message`] for duplicates. Two semantically different messages must always - /// have different fast message ids, but it is allowed that two semantically identical messages - /// have different fast message ids as long as the message_id_fn produces the same id for them. - /// - /// The function takes a [`Message`] as input and outputs a String to be interpreted - /// as the fast message id. Default is None. - pub fn fast_message_id_fn(&mut self, fast_id_fn: F) -> &mut Self - where - F: Fn(&RawMessage) -> FastMessageId + Send + Sync + 'static, - { - self.config.fast_message_id_fn = Some(Arc::new(fast_id_fn)); - self - } - /// Enables Peer eXchange. This should be enabled in bootstrappers and other well /// connected/trusted nodes. The default is false. /// @@ -831,40 +783,34 @@ impl ConfigBuilder { } /// Constructs a [`Config`] from the given configuration and validates the settings. - pub fn build(&self) -> Result { + pub fn build(&self) -> Result { // check all constraints on config if self.config.protocol.max_transmit_size < 100 { - return Err("The maximum transmission size must be greater than 100 to permit basic control messages"); + return Err(ConfigBuilderError::MaxTransmissionSizeTooSmall); } if self.config.history_length < self.config.history_gossip { - return Err( - "The history_length must be greater than or equal to the history_gossip \ - length", - ); + return Err(ConfigBuilderError::HistoryLengthTooSmall); } if !(self.config.mesh_outbound_min <= self.config.mesh_n_low && self.config.mesh_n_low <= self.config.mesh_n && self.config.mesh_n <= self.config.mesh_n_high) { - return Err("The following inequality doesn't hold \ - mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"); + return Err(ConfigBuilderError::MeshParametersInvalid); } if self.config.mesh_outbound_min * 2 > self.config.mesh_n { - return Err( - "The following inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2", - ); + return Err(ConfigBuilderError::MeshOutboundInvalid); } if self.config.unsubscribe_backoff.as_millis() == 0 { - return Err("The unsubscribe_backoff parameter should be positive."); + return Err(ConfigBuilderError::UnsubscribeBackoffIsZero); } if self.invalid_protocol { - return Err("The provided protocol is invalid, it must start with a forward-slash"); + return Err(ConfigBuilderError::InvalidProtocol); } Ok(self.config.clone()) @@ -886,7 +832,6 @@ impl std::fmt::Debug for Config { let _ = builder.field("heartbeat_initial_delay", &self.heartbeat_initial_delay); let _ = builder.field("heartbeat_interval", &self.heartbeat_interval); let _ = builder.field("fanout_ttl", &self.fanout_ttl); - let _ = builder.field("idle_timeout", &self.idle_timeout); let _ = builder.field("duplicate_cache_time", &self.duplicate_cache_time); let _ = builder.field("validate_messages", &self.validate_messages); let _ = builder.field("allow_self_origin", &self.allow_self_origin); diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 26f71a346c3b..8761630467bb 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -120,3 +120,37 @@ impl From for PublishError { PublishError::TransformFailed(error) } } + +/// Error associated with Config building. +#[derive(Debug)] +pub enum ConfigBuilderError { + /// Maximum transmission size is too small. + MaxTransmissionSizeTooSmall, + /// History length less than history gossip length. + HistoryLengthTooSmall, + /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high + MeshParametersInvalid, + /// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2 + MeshOutboundInvalid, + /// unsubscribe_backoff is zero + UnsubscribeBackoffIsZero, + /// Invalid protocol + InvalidProtocol, +} + +impl std::error::Error for ConfigBuilderError {} + +impl std::fmt::Display for ConfigBuilderError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MaxTransmissionSizeTooSmall => { + write!(f, "Maximum transmission size is too small") + } + Self::HistoryLengthTooSmall => write!(f, "History length less than history gossip length"), + Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"), + Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"), + Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"), + Self::InvalidProtocol => write!(f, "Invalid protocol"), + } + } +} diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index 827206afe8dc..9538622c0dc7 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -23,7 +23,6 @@ use crate::MessageId; use crate::ValidationError; use instant::Instant; use libp2p_identity::PeerId; -use log::debug; use std::collections::HashMap; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. @@ -85,9 +84,10 @@ impl GossipPromises { if *expires < now { let count = result.entry(*peer_id).or_insert(0); *count += 1; - debug!( - "[Penalty] The peer {} broke the promise to deliver message {} in time!", - peer_id, msg + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" ); false } else { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 8508e026cee3..e91f81776e76 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -20,7 +20,7 @@ use crate::protocol::{GossipsubCodec, ProtocolConfig}; use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc}; +use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; use crate::ValidationError; use asynchronous_codec::Framed; use futures::future::Either; @@ -30,17 +30,14 @@ use instant::Instant; use libp2p_core::upgrade::DeniedUpgrade; use libp2p_swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p_swarm::Stream; use smallvec::SmallVec; use std::{ pin::Pin, task::{Context, Poll}, - time::Duration, }; -use void::Void; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. @@ -61,10 +58,11 @@ pub enum HandlerEvent { } /// A message sent from the behaviour to the handler. +#[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum HandlerIn { /// A gossipsub message to send. - Message(proto::RPC), + Message(RpcOut), /// The peer has joined the mesh. JoinedMesh, /// The peer has left the mesh. @@ -119,9 +117,6 @@ pub struct EnabledHandler { last_io_activity: Instant, - /// The amount of time we keep an idle connection alive. - idle_timeout: Duration, - /// Keeps track of whether this connection is for a peer in the mesh. This is used to make /// decisions about the keep alive state for this connection. in_mesh: bool, @@ -164,7 +159,7 @@ enum OutboundSubstreamState { impl Handler { /// Builds a new [`Handler`]. - pub fn new(protocol_config: ProtocolConfig, idle_timeout: Duration) -> Self { + pub fn new(protocol_config: ProtocolConfig) -> Self { Handler::Enabled(EnabledHandler { listen_protocol: protocol_config, inbound_substream: None, @@ -176,7 +171,6 @@ impl Handler { peer_kind: None, peer_kind_sent: false, last_io_activity: Instant::now(), - idle_timeout, in_mesh: false, }) } @@ -193,7 +187,7 @@ impl EnabledHandler { } // new inbound substream. Replace the current one, if it exists. - log::trace!("New inbound substream request"); + tracing::trace!("New inbound substream request"); self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); } @@ -226,7 +220,6 @@ impl EnabledHandler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, > { if !self.peer_kind_sent { @@ -249,70 +242,6 @@ impl EnabledHandler { }); } - loop { - match std::mem::replace( - &mut self.inbound_substream, - Some(InboundSubstreamState::Poisoned), - ) { - // inbound idle state - Some(InboundSubstreamState::WaitingInput(mut substream)) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(message))) => { - self.last_io_activity = Instant::now(); - self.inbound_substream = - Some(InboundSubstreamState::WaitingInput(substream)); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); - } - Poll::Ready(Some(Err(error))) => { - log::debug!("Failed to read from inbound stream: {error}"); - // Close this side of the stream. If the - // peer is still around, they will re-establish their - // outbound stream i.e. our inbound stream. - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - } - // peer closed the stream - Poll::Ready(None) => { - log::debug!("Inbound stream closed by remote"); - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - } - Poll::Pending => { - self.inbound_substream = - Some(InboundSubstreamState::WaitingInput(substream)); - break; - } - } - } - Some(InboundSubstreamState::Closing(mut substream)) => { - match Sink::poll_close(Pin::new(&mut substream), cx) { - Poll::Ready(res) => { - if let Err(e) = res { - // Don't close the connection but just drop the inbound substream. - // In case the remote has more to send, they will open up a new - // substream. - log::debug!("Inbound substream error while closing: {e}"); - } - self.inbound_substream = None; - break; - } - Poll::Pending => { - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - break; - } - } - } - None => { - self.inbound_substream = None; - break; - } - Some(InboundSubstreamState::Poisoned) => { - unreachable!("Error occurred during inbound stream processing") - } - } - } - // process outbound stream loop { match std::mem::replace( @@ -341,14 +270,16 @@ impl EnabledHandler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(e) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!( + "Failed to send message on outbound stream: {e}" + ); self.outbound_substream = None; break; } } } Poll::Ready(Err(e)) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!("Failed to send message on outbound stream: {e}"); self.outbound_substream = None; break; } @@ -367,7 +298,7 @@ impl EnabledHandler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(e)) => { - log::debug!("Failed to flush outbound stream: {e}"); + tracing::debug!("Failed to flush outbound stream: {e}"); self.outbound_substream = None; break; } @@ -388,6 +319,70 @@ impl EnabledHandler { } } + loop { + match std::mem::replace( + &mut self.inbound_substream, + Some(InboundSubstreamState::Poisoned), + ) { + // inbound idle state + Some(InboundSubstreamState::WaitingInput(mut substream)) => { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(message))) => { + self.last_io_activity = Instant::now(); + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); + } + Poll::Ready(Some(Err(error))) => { + tracing::debug!("Failed to read from inbound stream: {error}"); + // Close this side of the stream. If the + // peer is still around, they will re-establish their + // outbound stream i.e. our inbound stream. + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + // peer closed the stream + Poll::Ready(None) => { + tracing::debug!("Inbound stream closed by remote"); + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + break; + } + } + } + Some(InboundSubstreamState::Closing(mut substream)) => { + match Sink::poll_close(Pin::new(&mut substream), cx) { + Poll::Ready(res) => { + if let Err(e) = res { + // Don't close the connection but just drop the inbound substream. + // In case the remote has more to send, they will open up a new + // substream. + tracing::debug!("Inbound substream error while closing: {e}"); + } + self.inbound_substream = None; + break; + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + break; + } + } + } + None => { + self.inbound_substream = None; + break; + } + Some(InboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during inbound stream processing") + } + } + } + Poll::Pending } } @@ -395,7 +390,6 @@ impl EnabledHandler { impl ConnectionHandler for Handler { type FromBehaviour = HandlerIn; type ToBehaviour = HandlerEvent; - type Error = Void; type InboundOpenInfo = (); type InboundProtocol = either::Either; type OutboundOpenInfo = (); @@ -415,7 +409,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, message: HandlerIn) { match self { Handler::Enabled(handler) => match message { - HandlerIn::Message(m) => handler.send_queue.push(m), + HandlerIn::Message(m) => handler.send_queue.push(m.into_protobuf()), HandlerIn::JoinedMesh => { handler.in_mesh = true; } @@ -424,42 +418,21 @@ impl ConnectionHandler for Handler { } }, Handler::Disabled(_) => { - log::debug!("Handler is disabled. Dropping message {:?}", message); + tracing::debug!(?message, "Handler is disabled. Dropping message"); } } } - fn connection_keep_alive(&self) -> KeepAlive { - match self { - Handler::Enabled(handler) => { - if handler.in_mesh { - return KeepAlive::Yes; - } - - if let Some( - OutboundSubstreamState::PendingSend(_, _) - | OutboundSubstreamState::PendingFlush(_), - ) = handler.outbound_substream - { - return KeepAlive::Yes; - } - - KeepAlive::Until(handler.last_io_activity + handler.idle_timeout) - } - Handler::Disabled(_) => KeepAlive::No, - } + fn connection_keep_alive(&self) -> bool { + matches!(self, Handler::Enabled(h) if h.in_mesh) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { match self { Handler::Enabled(handler) => handler.poll(cx), @@ -492,7 +465,7 @@ impl ConnectionHandler for Handler { handler.inbound_substream_attempts += 1; if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of inbound substreams attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -506,7 +479,7 @@ impl ConnectionHandler for Handler { handler.outbound_substream_attempts += 1; if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of outbound substream attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -529,7 +502,7 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Timeout, .. }) => { - log::debug!("Dial upgrade error: Protocol negotiation timeout"); + tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), @@ -540,7 +513,7 @@ impl ConnectionHandler for Handler { .. }) => { // The protocol is not supported - log::debug!( + tracing::debug!( "The remote peer does not support gossipsub on this connection" ); *self = Handler::Disabled(DisabledHandler::ProtocolUnsupported { @@ -551,12 +524,9 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Io(e), .. }) => { - log::debug!("Protocol negotiation failed: {e}") + tracing::debug!("Protocol negotiation failed: {e}") } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } Handler::Disabled(_) => {} diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index e065319c4c3c..15db5eba21d6 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -89,53 +89,7 @@ //! ## Example //! -//! An example of initialising a gossipsub compatible swarm: -//! -//! ``` -//! # use libp2p_gossipsub::Event; -//! # use libp2p_core::{transport::{Transport, MemoryTransport}, Multiaddr}; -//! # use libp2p_gossipsub::MessageAuthenticity; -//! # use libp2p_identity as identity; -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = local_key.public().to_peer_id(); -//! -//! // Set up an encrypted TCP Transport over yamux -//! // This is test transport (memory). -//! let transport = MemoryTransport::default() -//! .upgrade(libp2p_core::upgrade::Version::V1) -//! .authenticate(libp2p_noise::Config::new(&local_key).unwrap()) -//! .multiplex(libp2p_yamux::Config::default()) -//! .boxed(); -//! -//! // Create a Gossipsub topic -//! let topic = libp2p_gossipsub::IdentTopic::new("example"); -//! -//! // Set the message authenticity - How we expect to publish messages -//! // Here we expect the publisher to sign the message with their key. -//! let message_authenticity = MessageAuthenticity::Signed(local_key); -//! -//! // Create a Swarm to manage peers and events -//! let mut swarm = { -//! // set default parameters for gossipsub -//! let gossipsub_config = libp2p_gossipsub::Config::default(); -//! // build a gossipsub network behaviour -//! let mut gossipsub: libp2p_gossipsub::Behaviour = -//! libp2p_gossipsub::Behaviour::new(message_authenticity, gossipsub_config).unwrap(); -//! // subscribe to the topic -//! gossipsub.subscribe(&topic); -//! // create the swarm (use an executor in a real example) -//! libp2p_swarm::SwarmBuilder::without_executor( -//! transport, -//! gossipsub, -//! local_peer_id, -//! ).build() -//! }; -//! -//! // Listen on a memory transport. -//! let memory: Multiaddr = libp2p_core::multiaddr::Protocol::Memory(10).into(); -//! let addr = swarm.listen_on(memory).unwrap(); -//! println!("Listening on {:?}", addr); -//! ``` +//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat). #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -158,7 +112,7 @@ mod types; pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; -pub use self::error::{PublishError, SubscriptionError, ValidationError}; +pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; pub use self::metrics::Config as MetricsConfig; pub use self::peer_score::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, @@ -171,7 +125,10 @@ pub use self::subscription_filter::{ }; pub use self::topic::{Hasher, Topic, TopicHash}; pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{FastMessageId, Message, MessageAcceptance, MessageId, RawMessage, Rpc}; +pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage}; + +#[deprecated(note = "Will be removed from the public API.")] +pub type Rpc = self::types::Rpc; pub type IdentTopic = Topic; pub type Sha256Topic = Topic; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index e85a5bf9c6a2..ef4a93bc936e 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -21,7 +21,6 @@ use crate::topic::TopicHash; use crate::types::{MessageId, RawMessage}; use libp2p_identity::PeerId; -use log::{debug, trace}; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::{ @@ -87,7 +86,7 @@ impl MessageCache { entry.insert((msg, HashSet::default())); self.history[0].push(cache_entry); - trace!("Put message {:?} in mcache", message_id); + tracing::trace!(message=?message_id, "Put message in mcache"); true } } @@ -191,13 +190,13 @@ impl MessageCache { // If GossipsubConfig::validate_messages is true, the implementing // application has to ensure that Gossipsub::validate_message gets called for // each received message within the cache timeout time." - debug!( - "The message with id {} got removed from the cache without being validated.", - &entry.mid + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." ); } } - trace!("Remove message from the cache: {}", &entry.mid); + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); self.iwant_counts.remove(&entry.mid); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index c6c918d6b2ae..b1ea9bfae959 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -26,7 +26,6 @@ use crate::time_cache::TimeCache; use crate::{MessageId, TopicHash}; use instant::Instant; use libp2p_identity::PeerId; -use log::{debug, trace, warn}; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; @@ -221,11 +220,9 @@ impl PeerScore { /// Returns the score for a peer, logging metrics. This is called from the heartbeat and /// increments the metric counts for penalties. pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 { - let peer_stats = match self.peer_stats.get(peer_id) { - Some(v) => v, - None => return 0.0, + let Some(peer_stats) = self.peer_stats.get(peer_id) else { + return 0.0; }; - let mut score = 0.0; // topic scores @@ -274,13 +271,12 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::MessageDeficit); } - debug!( - "[Penalty] The peer {} has a mesh message deliveries deficit of {} in topic\ - {} and will get penalized by {}", - peer_id, - deficit, - topic, - p3 * topic_params.mesh_message_deliveries_weight + tracing::debug!( + peer=%peer_id, + %topic, + %deficit, + penalty=%topic_score, + "[Penalty] The peer has a mesh deliveries deficit and will be penalized" ); } @@ -326,10 +322,11 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::IPColocation); } - debug!( - "[Penalty] The peer {} gets penalized because of too many peers with the ip {}. \ - The surplus is {}. ", - peer_id, ip, surplus + tracing::debug!( + peer=%peer_id, + surplus_ip=%ip, + surplus=%surplus, + "[Penalty] The peer gets penalized because of too many peers with the same ip" ); score += p6 * self.params.ip_colocation_factor_weight; } @@ -347,9 +344,10 @@ impl PeerScore { pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - debug!( - "[Penalty] Behavioral penalty for peer {}, count = {}.", - peer_id, count + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" ); peer_stats.behaviour_penalty += count as f64; } @@ -445,7 +443,7 @@ impl PeerScore { /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { - trace!("Add ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); let peer_stats = self.peer_stats.entry(*peer_id).or_default(); // Mark the peer as connected (currently the default is connected, but we don't want to @@ -462,20 +460,20 @@ impl PeerScore { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { peer_stats.known_ips.remove(ip); if let Some(peer_ids) = self.peer_ips.get_mut(ip) { - trace!("Remove ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); peer_ids.remove(peer_id); } else { - trace!( - "No entry in peer_ips for ip {} which should get removed for peer {}", - ip, - peer_id + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" ); } } else { - trace!( - "No peer_stats for peer {} which should remove the ip {}", - peer_id, - ip + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" ); } } @@ -594,7 +592,12 @@ impl PeerScore { // this should be the first delivery trace if record.status != DeliveryStatus::Unknown { - warn!("Unexpected delivery trace: Message from {} was first seen {}s ago and has a delivery status {:?}", from, record.first_seen.elapsed().as_secs(), record.status); + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); return; } @@ -611,9 +614,9 @@ impl PeerScore { /// Similar to `reject_message` except does not require the message id or reason for an invalid message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { - debug!( - "[Penalty] Message from {} rejected because of ValidationError or SelfOrigin", - from + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" ); self.mark_invalid_message_delivery(from, topic_hash); @@ -778,10 +781,11 @@ impl PeerScore { if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { - debug!( - "[Penalty] Peer {} delivered an invalid message in topic {} and gets penalized \ + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ for it", - peer_id, topic_hash ); topic_stats.invalid_message_deliveries += 1f64; } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 15d2f59755a2..e9600a4d8d85 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -34,10 +34,8 @@ use futures::prelude::*; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; -use log::{debug, warn}; use quick_protobuf::Writer; use std::pin::Pin; -use unsigned_varint::codec; use void::Void; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; @@ -109,12 +107,10 @@ where type Future = Pin> + Send>>; fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); Box::pin(future::ok(( Framed::new( socket, - GossipsubCodec::new(length_codec, self.validation_mode), + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), ), protocol_id.kind, ))) @@ -130,12 +126,10 @@ where type Future = Pin> + Send>>; fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); Box::pin(future::ok(( Framed::new( socket, - GossipsubCodec::new(length_codec, self.validation_mode), + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), ), protocol_id.kind, ))) @@ -152,8 +146,8 @@ pub struct GossipsubCodec { } impl GossipsubCodec { - pub fn new(length_codec: codec::UviBytes, validation_mode: ValidationMode) -> GossipsubCodec { - let codec = quick_protobuf_codec::Codec::new(length_codec.max_len()); + pub fn new(max_length: usize, validation_mode: ValidationMode) -> GossipsubCodec { + let codec = quick_protobuf_codec::Codec::new(max_length); GossipsubCodec { validation_mode, codec, @@ -166,28 +160,19 @@ impl GossipsubCodec { fn verify_signature(message: &proto::Message) -> bool { use quick_protobuf::MessageWrite; - let from = match message.from.as_ref() { - Some(v) => v, - None => { - debug!("Signature verification failed: No source id given"); - return false; - } + let Some(from) = message.from.as_ref() else { + tracing::debug!("Signature verification failed: No source id given"); + return false; }; - let source = match PeerId::from_bytes(from) { - Ok(v) => v, - Err(_) => { - debug!("Signature verification failed: Invalid Peer Id"); - return false; - } + let Ok(source) = PeerId::from_bytes(from) else { + tracing::debug!("Signature verification failed: Invalid Peer Id"); + return false; }; - let signature = match message.signature.as_ref() { - Some(v) => v, - None => { - debug!("Signature verification failed: No signature provided"); - return false; - } + let Some(signature) = message.signature.as_ref() else { + tracing::debug!("Signature verification failed: No signature provided"); + return false; }; // If there is a key value in the protobuf, use that key otherwise the key must be @@ -197,7 +182,7 @@ impl GossipsubCodec { _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { - warn!("Signature verification failed: No valid public key supplied"); + tracing::warn!("Signature verification failed: No valid public key supplied"); return false; } }, @@ -205,7 +190,9 @@ impl GossipsubCodec { // The key must match the peer_id if source != public_key.to_peer_id() { - warn!("Signature verification failed: Public key doesn't match source peer id"); + tracing::warn!( + "Signature verification failed: Public key doesn't match source peer id" + ); return false; } @@ -225,10 +212,10 @@ impl GossipsubCodec { } impl Encoder for GossipsubCodec { - type Item = proto::RPC; + type Item<'a> = proto::RPC; type Error = quick_protobuf_codec::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { self.codec.encode(item, dst) } } @@ -238,11 +225,9 @@ impl Decoder for GossipsubCodec { type Error = quick_protobuf_codec::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let rpc = match self.codec.decode(src)? { - Some(p) => p, - None => return Ok(None), + let Some(rpc) = self.codec.decode(src)? else { + return Ok(None); }; - // Store valid messages. let mut messages = Vec::with_capacity(rpc.publish.len()); // Store any invalid messages. @@ -276,13 +261,17 @@ impl Decoder for GossipsubCodec { } ValidationMode::Anonymous => { if message.signature.is_some() { - warn!("Signature field was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SignaturePresent); } else if message.seqno.is_some() { - warn!("Sequence number was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SequenceNumberPresent); } else if message.from.is_some() { - warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); invalid_kind = Some(ValidationError::MessageSourcePresent); } } @@ -308,7 +297,7 @@ impl Decoder for GossipsubCodec { // verify message signatures if required if verify_signature && !GossipsubCodec::verify_signature(&message) { - warn!("Invalid signature for received message"); + tracing::warn!("Invalid signature for received message"); // Build the invalid message (ignoring further validation of sequence number // and source) @@ -332,10 +321,10 @@ impl Decoder for GossipsubCodec { if seq_no.is_empty() { None } else if seq_no.len() != 8 { - debug!( - "Invalid sequence number length for received message. SeqNo: {:?} Size: {}", - seq_no, - seq_no.len() + tracing::debug!( + sequence_number=?seq_no, + sequence_length=%seq_no.len(), + "Invalid sequence number length for received message" ); let message = RawMessage { source: None, // don't bother inform the application @@ -355,7 +344,7 @@ impl Decoder for GossipsubCodec { } } else { // sequence number was not present - debug!("Sequence number not present but expected"); + tracing::debug!("Sequence number not present but expected"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -381,7 +370,7 @@ impl Decoder for GossipsubCodec { Ok(peer_id) => Some(peer_id), // valid peer id Err(_) => { // invalid peer id, add to invalid messages - debug!("Message source has an invalid PeerId"); + tracing::debug!("Message source has an invalid PeerId"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -588,12 +577,12 @@ mod tests { let message = message.0; let rpc = Rpc { - messages: vec![message], + messages: vec![message.clone()], subscriptions: vec![], control_msgs: vec![], }; - let mut codec = GossipsubCodec::new(codec::UviBytes::default(), ValidationMode::Strict); + let mut codec = GossipsubCodec::new(u32::MAX as usize, ValidationMode::Strict); let mut buf = BytesMut::new(); codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); @@ -602,7 +591,7 @@ mod tests { HandlerEvent::Message { mut rpc, .. } => { rpc.messages[0].validated = true; - assert_eq!(rpc, rpc); + assert_eq!(vec![message], rpc.messages); } _ => panic!("Must decode a message"), } diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 9f883f12a1b7..09c323d7904a 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -20,7 +20,6 @@ use crate::types::Subscription; use crate::TopicHash; -use log::debug; use std::collections::{BTreeSet, HashMap, HashSet}; pub trait TopicSubscriptionFilter { @@ -66,7 +65,7 @@ pub trait TopicSubscriptionFilter { if self.allow_incoming_subscription(s) { true } else { - debug!("Filtered incoming subscription {:?}", s); + tracing::debug!(subscription=?s, "Filtered incoming subscription"); false } }); diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index ffc95a474f48..89fd4afee099 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -93,12 +93,6 @@ impl<'a, K: 'a, V: 'a> Entry<'a, K, V> where K: Eq + std::hash::Hash + Clone, { - pub(crate) fn or_insert_with V>(self, default: F) -> &'a mut V { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default()), - } - } pub(crate) fn or_default(self) -> &'a mut V where V: Default, @@ -159,10 +153,6 @@ where pub(crate) fn contains_key(&self, key: &Key) -> bool { self.map.contains_key(key) } - - pub(crate) fn get(&self, key: &Key) -> Option<&Value> { - self.map.get(key).map(|e| &e.element) - } } pub(crate) struct DuplicateCache(TimeCache); diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index f18656354541..d1b92ff0ba88 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -43,49 +43,33 @@ pub enum MessageAcceptance { Ignore, } -/// Macro for declaring message id types -macro_rules! declare_message_id_type { - ($name: ident, $name_string: expr) => { - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - #[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] - pub struct $name(pub Vec); - - impl $name { - pub fn new(value: &[u8]) -> Self { - Self(value.to_vec()) - } - } +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct MessageId(pub Vec); - impl>> From for $name { - fn from(value: T) -> Self { - Self(value.into()) - } - } - - impl std::fmt::Display for $name { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex_fmt::HexFmt(&self.0)) - } - } +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} - impl std::fmt::Debug for $name { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}({})", $name_string, hex_fmt::HexFmt(&self.0)) - } - } - }; +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } } -// A type for gossipsub message ids. -declare_message_id_type!(MessageId, "MessageId"); +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} -// A type for gossipsub fast messsage ids, not to confuse with "real" message ids. -// -// A fast-message-id is an optional message_id that can be used to filter duplicates quickly. On -// high intensive networks with lots of messages, where the message_id is based on the result of -// decompressed traffic, it is beneficial to specify a `fast-message-id` that can identify and -// filter duplicates quickly without performing the overhead of decompression. -declare_message_id_type!(FastMessageId, "FastMessageId"); +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct PeerConnections { @@ -148,6 +132,19 @@ impl RawMessage { } } +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + /// The message sent to the user after a [`RawMessage`] has been transformed by a /// [`crate::DataTransform`]. #[derive(Clone, PartialEq, Eq, Hash)] @@ -236,6 +233,130 @@ pub enum ControlAction { }, } +/// A Gossipsub RPC message sent. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum RpcOut { + /// Publish a Gossipsub message on network. + Publish(RawMessage), + /// Forward a Gossipsub message to the network. + Forward(RawMessage), + /// Subscribe a topic. + Subscribe(TopicHash), + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// List of Gossipsub control messages. + Control(ControlAction), +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish(message) => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + }, + RpcOut::Forward(message) => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + }, + RpcOut::Subscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Control(ControlAction::IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + }), + } + } + } + } +} + /// An RPC received/sent. #[derive(Clone, PartialEq, Eq, Hash)] pub struct Rpc { diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index e4e4c90d7686..c8876428b4e9 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -25,11 +25,10 @@ use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; -use log::debug; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; use std::{task::Poll, time::Duration}; - +use tracing_subscriber::EnvFilter; struct Graph { nodes: SelectAll>, } @@ -122,21 +121,23 @@ async fn build_node() -> Swarm { .unwrap(); gossipsub::Behaviour::new(MessageAuthenticity::Author(peer_id), config).unwrap() }); - swarm.listen().await; + swarm.listen().with_memory_addr_external().await; swarm } #[test] fn multi_hop_propagation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } - debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); + tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); async_std::task::block_on(async move { let mut graph = Graph::new_connected(num_nodes as usize, seed).await; diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 0e8812b1b420..83984448d073 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,4 +1,28 @@ -## 0.43.1 - unreleased +## 0.44.2 + +- Emit `ToSwarm::NewExternalAddrOfPeer` for all external addresses of remote peers. + For this work, the address cache must be enabled via `identify::Config::with_cache_size`. + The default is 0, i.e. disabled. + See [PR 4371](https://github.com/libp2p/rust-libp2p/pull/4371). + +## 0.44.1 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). + +## 0.44.0 + +- Add `Info` to the `libp2p-identify::Event::Pushed` to report pushed info. + See [PR 4527](https://github.com/libp2p/rust-libp2p/pull/4527) +- Remove deprecated `initial_delay`. + Identify requests are always sent instantly after the connection has been established. + See [PR 4735](https://github.com/libp2p/rust-libp2p/pull/4735) +- Don't repeatedly report the same observed address as a `NewExternalAddrCandidate`. + Instead, only report each observed address once per connection. + This allows users to probabilistically deem an address as external if it gets reported as a candidate repeatedly. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). + +## 0.43.1 - Handle partial push messages. Previously, push messages with partial information were ignored. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 43421811c5dc..2fb51d876278 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = { workspace = true } description = "Nodes identifcation protocol for libp2p" -version = "0.43.1" +version = "0.44.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,27 +11,27 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" -futures = "0.3.28" +asynchronous-codec = { workspace = true } +futures = "0.3.30" futures-timer = "3.0.2" futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" -lru = "0.12.0" +lru = "0.12.1" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" -smallvec = "1.11.1" +smallvec = "1.12.0" thiserror = "1.0" +tracing = "0.1.37" void = "1.0" either = "1.9.0" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } libp2p-swarm = { workspace = true, features = ["macros"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index f572b937d386..43bddb52fe7f 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -26,14 +26,14 @@ use libp2p_identity::PublicKey; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p_swarm::{ ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, - NotifyHandler, PollParameters, StreamUpgradeError, THandlerInEvent, ToSwarm, + NotifyHandler, PeerAddresses, StreamUpgradeError, THandlerInEvent, ToSwarm, }; use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; -use lru::LruCache; + +use std::collections::hash_map::Entry; use std::num::NonZeroUsize; use std::{ collections::{HashMap, HashSet, VecDeque}, - iter::FromIterator, task::Context, task::Poll, time::Duration, @@ -48,6 +48,10 @@ pub struct Behaviour { config: Config, /// For each peer we're connected to, the observed address to send back to it. connected: HashMap>, + + /// The address a remote observed for us. + our_observed_addresses: HashMap, + /// Pending events to be emitted when polled. events: VecDeque>, /// The addresses of all peers that we have discovered. @@ -71,14 +75,6 @@ pub struct Config { /// /// Defaults to `rust-libp2p/`. pub agent_version: String, - /// The initial delay before the first identification request - /// is sent to a remote on a newly established connection. - /// - /// Defaults to 0ms. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request")] - pub initial_delay: Duration, /// The interval at which identification requests are sent to /// the remote on established connections after the first request, /// i.e. the delay between identification requests. @@ -106,13 +102,11 @@ pub struct Config { impl Config { /// Creates a new configuration for the identify [`Behaviour`] that /// advertises the given protocol version and public key. - #[allow(deprecated)] pub fn new(protocol_version: String, local_public_key: PublicKey) -> Self { Self { protocol_version, agent_version: format!("rust-libp2p/{}", env!("CARGO_PKG_VERSION")), local_public_key, - initial_delay: Duration::from_millis(0), interval: Duration::from_secs(5 * 60), push_listen_addr_updates: false, cache_size: 100, @@ -125,17 +119,6 @@ impl Config { self } - /// Configures the initial delay before the first identification - /// request is sent on a newly established connection to a peer. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request thus also this setter will be removed")] - #[allow(deprecated)] - pub fn with_initial_delay(mut self, d: Duration) -> Self { - self.initial_delay = d; - self - } - /// Configures the interval at which identification requests are /// sent to peers after the initial request. pub fn with_interval(mut self, d: Duration) -> Self { @@ -169,6 +152,7 @@ impl Behaviour { Self { config, connected: HashMap::new(), + our_observed_addresses: Default::default(), events: VecDeque::new(), discovered_peers, listen_addresses: Default::default(), @@ -183,7 +167,7 @@ impl Behaviour { { for p in peers { if !self.connected.contains_key(&p) { - log::debug!("Not pushing to {p} because we are not connected"); + tracing::debug!(peer=%p, "Not pushing to peer because we are not connected"); continue; } @@ -215,9 +199,9 @@ impl Behaviour { .or_default() .insert(conn, addr); - if let Some(entry) = self.discovered_peers.get_mut(&peer_id) { + if let Some(cache) = self.discovered_peers.0.as_mut() { for addr in failed_addresses { - entry.remove(addr); + cache.remove(&peer_id, addr); } } } @@ -235,7 +219,6 @@ impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; type ToSwarm = Event; - #[allow(deprecated)] fn handle_established_inbound_connection( &mut self, _: ConnectionId, @@ -244,7 +227,6 @@ impl NetworkBehaviour for Behaviour { remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), @@ -255,7 +237,6 @@ impl NetworkBehaviour for Behaviour { )) } - #[allow(deprecated)] fn handle_established_outbound_connection( &mut self, _: ConnectionId, @@ -264,7 +245,6 @@ impl NetworkBehaviour for Behaviour { _: Endpoint, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), @@ -278,7 +258,7 @@ impl NetworkBehaviour for Behaviour { fn on_connection_handler_event( &mut self, peer_id: PeerId, - _: ConnectionId, + id: ConnectionId, event: THandlerOutEvent, ) { match event { @@ -287,23 +267,53 @@ impl NetworkBehaviour for Behaviour { info.listen_addrs .retain(|addr| multiaddr_matches_peer_id(addr, &peer_id)); - // Replace existing addresses to prevent other peer from filling up our memory. - self.discovered_peers - .put(peer_id, info.listen_addrs.iter().cloned()); - let observed = info.observed_addr.clone(); self.events - .push_back(ToSwarm::GenerateEvent(Event::Received { peer_id, info })); - self.events - .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + .push_back(ToSwarm::GenerateEvent(Event::Received { + peer_id, + info: info.clone(), + })); + + if let Some(ref mut discovered_peers) = self.discovered_peers.0 { + for address in &info.listen_addrs { + if discovered_peers.add(peer_id, address.clone()) { + self.events.push_back(ToSwarm::NewExternalAddrOfPeer { + peer_id, + address: address.clone(), + }); + } + } + } + + match self.our_observed_addresses.entry(id) { + Entry::Vacant(not_yet_observed) => { + not_yet_observed.insert(observed.clone()); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + Entry::Occupied(already_observed) if already_observed.get() == &observed => { + // No-op, we already observed this address. + } + Entry::Occupied(mut already_observed) => { + tracing::info!( + old_address=%already_observed.get(), + new_address=%observed, + "Our observed address on connection {id} changed", + ); + + *already_observed.get_mut() = observed.clone(); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + } } handler::Event::Identification => { self.events .push_back(ToSwarm::GenerateEvent(Event::Sent { peer_id })); } - handler::Event::IdentificationPushed => { + handler::Event::IdentificationPushed(info) => { self.events - .push_back(ToSwarm::GenerateEvent(Event::Pushed { peer_id })); + .push_back(ToSwarm::GenerateEvent(Event::Pushed { peer_id, info })); } handler::Event::IdentificationError(error) => { self.events @@ -312,11 +322,8 @@ impl NetworkBehaviour for Behaviour { } } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } @@ -339,7 +346,7 @@ impl NetworkBehaviour for Behaviour { Ok(self.discovered_peers.get(&peer)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { let listen_addr_changed = self.listen_addresses.on_swarm_event(&event); let external_addr_changed = self.external_addresses.on_swarm_event(&event); @@ -385,26 +392,19 @@ impl NetworkBehaviour for Behaviour { } else if let Some(addrs) = self.connected.get_mut(&peer_id) { addrs.remove(&connection_id); } + + self.our_observed_addresses.remove(&connection_id); } FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { - if let Some(entry) = peer_id.and_then(|id| self.discovered_peers.get_mut(&id)) { - if let DialError::Transport(errors) = error { - for (addr, _error) in errors { - entry.remove(addr); - } + if let (Some(peer_id), Some(cache), DialError::Transport(errors)) = + (peer_id, self.discovered_peers.0.as_mut(), error) + { + for (addr, _error) in errors { + cache.remove(&peer_id, addr); } } } - FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } @@ -431,6 +431,9 @@ pub enum Event { Pushed { /// The peer that the information has been sent to. peer_id: PeerId, + /// The full Info struct we pushed to the remote peer. Clients must + /// do some diff'ing to know what has changed since the last push. + info: Info, }, /// Error while attempting to identify the remote. Error { @@ -451,7 +454,7 @@ fn multiaddr_matches_peer_id(addr: &Multiaddr, peer_id: &PeerId) -> bool { true } -struct PeerCache(Option>>); +struct PeerCache(Option); impl PeerCache { fn disabled() -> Self { @@ -459,33 +462,15 @@ impl PeerCache { } fn enabled(size: NonZeroUsize) -> Self { - Self(Some(LruCache::new(size))) - } - - fn get_mut(&mut self, peer: &PeerId) -> Option<&mut HashSet> { - self.0.as_mut()?.get_mut(peer) - } - - fn put(&mut self, peer: PeerId, addresses: impl Iterator) { - let cache = match self.0.as_mut() { - None => return, - Some(cache) => cache, - }; - - cache.put(peer, HashSet::from_iter(addresses)); + Self(Some(PeerAddresses::new(size))) } fn get(&mut self, peer: &PeerId) -> Vec { - let cache = match self.0.as_mut() { - None => return Vec::new(), - Some(cache) => cache, - }; - - cache - .get(peer) - .cloned() - .map(Vec::from_iter) - .unwrap_or_default() + if let Some(cache) = self.0.as_mut() { + cache.get(peer).collect() + } else { + Vec::new() + } } } diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 50b9882f2c5d..f9b77e0b63a0 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -33,13 +33,13 @@ use libp2p_swarm::handler::{ ProtocolSupport, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamProtocol, StreamUpgradeError, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; -use log::{warn, Level}; use smallvec::SmallVec; use std::collections::HashSet; -use std::{io, task::Context, task::Poll, time::Duration}; +use std::{task::Context, task::Poll, time::Duration}; +use tracing::Level; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; @@ -57,7 +57,6 @@ pub struct Handler { Either, ReadyUpgrade>, (), Event, - io::Error, >; 4], >, @@ -110,16 +109,14 @@ pub enum Event { /// We replied to an identification request from the remote. Identification, /// We actively pushed our identification information to the remote. - IdentificationPushed, + IdentificationPushed(Info), /// Failed to identify the remote, or to reply to an identification request. IdentificationError(StreamUpgradeError), } impl Handler { /// Creates a new `Handler`. - #[allow(clippy::too_many_arguments)] pub fn new( - initial_delay: Duration, interval: Duration, remote_peer_id: PeerId, public_key: PublicKey, @@ -135,7 +132,7 @@ impl Handler { STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - trigger_next_identify: Delay::new(initial_delay), + trigger_next_identify: Delay::new(Duration::ZERO), exchanged_one_periodic_identify: false, interval, public_key, @@ -169,7 +166,7 @@ impl Handler { ) .is_err() { - warn!("Dropping inbound stream because we are at capacity"); + tracing::warn!("Dropping inbound stream because we are at capacity"); } else { self.exchanged_one_periodic_identify = true; } @@ -180,7 +177,9 @@ impl Handler { .try_push(protocol::recv_push(stream).map_ok(Success::ReceivedIdentifyPush)) .is_err() { - warn!("Dropping inbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping inbound identify push stream because we are at capacity" + ); } } } @@ -202,7 +201,7 @@ impl Handler { .try_push(protocol::recv_identify(stream).map_ok(Success::ReceivedIdentify)) .is_err() { - warn!("Dropping outbound identify stream because we are at capacity"); + tracing::warn!("Dropping outbound identify stream because we are at capacity"); } } future::Either::Right(stream) => { @@ -211,11 +210,13 @@ impl Handler { if self .active_streams .try_push( - protocol::send_identify(stream, info).map_ok(|_| Success::SentIdentifyPush), + protocol::send_identify(stream, info).map_ok(Success::SentIdentifyPush), ) .is_err() { - warn!("Dropping outbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping outbound identify push stream because we are at capacity" + ); } } } @@ -280,7 +281,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = InEvent; type ToBehaviour = Event; - type Error = io::Error; type InboundProtocol = SelectUpgrade, ReadyUpgrade>; type OutboundProtocol = Either, ReadyUpgrade>; @@ -314,20 +314,11 @@ impl ConnectionHandler for Handler { } } - fn connection_keep_alive(&self) -> KeepAlive { - if !self.active_streams.is_empty() { - return KeepAlive::Yes; - } - - KeepAlive::No - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if let Some(event) = self.events.pop() { return Poll::Ready(event); } @@ -352,9 +343,9 @@ impl ConnectionHandler for Handler { remote_info, ))); } - Poll::Ready(Ok(Ok(Success::SentIdentifyPush))) => { + Poll::Ready(Ok(Ok(Success::SentIdentifyPush(info)))) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationPushed, + Event::IdentificationPushed(info), )); } Poll::Ready(Ok(Ok(Success::SentIdentify))) => { @@ -412,22 +403,21 @@ impl ConnectionHandler for Handler { )); self.trigger_next_identify.reset(self.interval); } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::LocalProtocolsChange(change) => { - let before = log::log_enabled!(Level::Debug) + let before = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); let protocols_changed = self.local_supported_protocols.on_protocols_change(change); - let after = log::log_enabled!(Level::Debug) + let after = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); if protocols_changed && self.exchanged_one_periodic_identify { - log::debug!( - "Supported listen protocols changed from [{before}] to [{after}], pushing to {}", - self.remote_peer_id + tracing::debug!( + peer=%self.remote_peer_id, + %before, + %after, + "Supported listen protocols changed, pushing to peer" ); self.events @@ -439,6 +429,7 @@ impl ConnectionHandler for Handler { }); } } + _ => {} } } } @@ -446,6 +437,6 @@ impl ConnectionHandler for Handler { enum Success { SentIdentify, ReceivedIdentify(Info), - SentIdentifyPush, + SentIdentifyPush(Info), ReceivedIdentifyPush(PushInfo), } diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 5e2891e04e40..c6b22b00c0ab 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -25,7 +25,6 @@ use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use log::{debug, trace}; use std::convert::TryFrom; use std::io; use thiserror::Error; @@ -90,27 +89,23 @@ pub struct PushInfo { pub observed_addr: Option, } -pub(crate) async fn send_identify(io: T, info: Info) -> Result<(), UpgradeError> +pub(crate) async fn send_identify(io: T, info: Info) -> Result where T: AsyncWrite + Unpin, { - trace!("Sending: {:?}", info); + tracing::trace!("Sending: {:?}", info); - let listen_addrs = info - .listen_addrs - .into_iter() - .map(|addr| addr.to_vec()) - .collect(); + let listen_addrs = info.listen_addrs.iter().map(|addr| addr.to_vec()).collect(); let pubkey_bytes = info.public_key.encode_protobuf(); let message = proto::Identify { - agentVersion: Some(info.agent_version), - protocolVersion: Some(info.protocol_version), + agentVersion: Some(info.agent_version.clone()), + protocolVersion: Some(info.protocol_version.clone()), publicKey: Some(pubkey_bytes), listenAddrs: listen_addrs, observedAddr: Some(info.observed_addr.to_vec()), - protocols: info.protocols.into_iter().map(|p| p.to_string()).collect(), + protocols: info.protocols.iter().map(|p| p.to_string()).collect(), }; let mut framed_io = FramedWrite::new( @@ -121,7 +116,7 @@ where framed_io.send(message).await?; framed_io.close().await?; - Ok(()) + Ok(info) } pub(crate) async fn recv_push(socket: T) -> Result @@ -130,7 +125,7 @@ where { let info = recv(socket).await?.try_into()?; - trace!("Received {:?}", info); + tracing::trace!(?info, "Received"); Ok(info) } @@ -141,7 +136,7 @@ where { let info = recv(socket).await?.try_into()?; - trace!("Received {:?}", info); + tracing::trace!(?info, "Received"); Ok(info) } @@ -172,7 +167,7 @@ fn parse_listen_addrs(listen_addrs: Vec>) -> Vec { .filter_map(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); + tracing::debug!("Unable to parse multiaddr: {e:?}"); None } }) @@ -185,7 +180,7 @@ fn parse_protocols(protocols: Vec) -> Vec { .filter_map(|p| match StreamProtocol::try_from_owned(p) { Ok(p) => Some(p), Err(e) => { - debug!("Received invalid protocol from peer: {e}"); + tracing::debug!("Received invalid protocol from peer: {e}"); None } }) @@ -196,7 +191,7 @@ fn parse_public_key(public_key: Option>) -> Option { public_key.and_then(|key| match PublicKey::try_decode_protobuf(&key) { Ok(k) => Some(k), Err(e) => { - debug!("Unable to decode public key: {e:?}"); + tracing::debug!("Unable to decode public key: {e:?}"); None } }) @@ -206,7 +201,7 @@ fn parse_observed_addr(observed_addr: Option>) -> Option { observed_addr.and_then(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse observed multiaddr: {e:?}"); + tracing::debug!("Unable to parse observed multiaddr: {e:?}"); None } }) diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index c1926b4125f5..dd92d10979a7 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,12 +1,18 @@ +use futures::StreamExt; use libp2p_core::multiaddr::Protocol; use libp2p_identify as identify; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use std::collections::HashSet; use std::iter; +use std::time::{Duration, Instant}; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn periodic_identify() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -24,7 +30,8 @@ async fn periodic_identify() { }); let swarm2_peer_id = *swarm2.local_peer_id(); - let (swarm1_memory_listen, swarm1_tcp_listen_addr) = swarm1.listen().await; + let (swarm1_memory_listen, swarm1_tcp_listen_addr) = + swarm1.listen().with_memory_addr_external().await; let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; swarm2.connect(&mut swarm1).await; @@ -77,10 +84,155 @@ async fn periodic_identify() { other => panic!("Unexpected events: {other:?}"), } } +#[async_std::test] +async fn only_emits_address_candidate_once_per_connection() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()) + .with_interval(Duration::from_secs(1)), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("c".to_string(), identity.public()) + .with_agent_version("d".to_string()), + ) + }); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let swarm_events = futures::stream::poll_fn(|cx| swarm1.poll_next_unpin(cx)) + .take(8) + .collect::>() + .await; + + let infos = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::Behaviour(identify::Event::Received { info, .. }) => Some(info.clone()), + _ => None, + }) + .collect::>(); + + assert!( + infos.len() > 1, + "should exchange identify payload more than once" + ); + + let varying_observed_addresses = infos + .iter() + .map(|i| i.observed_addr.clone()) + .collect::>(); + assert_eq!( + varying_observed_addresses.len(), + 1, + "Observed address should not vary on persistent connection" + ); + + let external_address_candidates = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::NewExternalAddrCandidate { address } => Some(address.clone()), + _ => None, + }) + .collect::>(); + + assert_eq!( + external_address_candidates.len(), + 1, + "To only have one external address candidate" + ); + assert_eq!( + &external_address_candidates[0], + varying_observed_addresses.iter().next().unwrap() + ); +} + +#[async_std::test] +async fn emits_unique_listen_addresses() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()) + .with_interval(Duration::from_secs(1)) + .with_cache_size(10), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("c".to_string(), identity.public()) + .with_agent_version("d".to_string()), + ) + }); + + let (swarm2_mem_listen_addr, swarm2_tcp_listen_addr) = + swarm2.listen().with_memory_addr_external().await; + let swarm2_peer_id = *swarm2.local_peer_id(); + swarm1.connect(&mut swarm2).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let swarm_events = futures::stream::poll_fn(|cx| swarm1.poll_next_unpin(cx)) + .take(8) + .collect::>() + .await; + + let infos = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::Behaviour(identify::Event::Received { info, .. }) => Some(info.clone()), + _ => None, + }) + .collect::>(); + + assert!( + infos.len() > 1, + "should exchange identify payload more than once" + ); + + let listen_addrs = infos + .iter() + .map(|i| i.listen_addrs.clone()) + .collect::>(); + + for addrs in listen_addrs { + assert_eq!(addrs.len(), 2); + assert!(addrs.contains(&swarm2_mem_listen_addr)); + assert!(addrs.contains(&swarm2_tcp_listen_addr)); + } + + let reported_addrs = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { + Some((*peer_id, address.clone())) + } + _ => None, + }) + .collect::>(); + + assert_eq!(reported_addrs.len(), 2, "To have two addresses of remote"); + assert!(reported_addrs.contains(&(swarm2_peer_id, swarm2_mem_listen_addr))); + assert!(reported_addrs.contains(&(swarm2_peer_id, swarm2_tcp_listen_addr))); +} #[async_std::test] async fn identify_push() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -92,7 +244,7 @@ async fn identify_push() { ) }); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // First, let the periodic identify do its thing. @@ -130,7 +282,9 @@ async fn identify_push() { #[async_std::test] async fn discover_peer_after_disconnect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -142,7 +296,7 @@ async fn discover_peer_after_disconnect() { ) }); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); @@ -178,3 +332,43 @@ async fn discover_peer_after_disconnect() { assert_eq!(connected_peer, swarm1_peer_id); } + +#[async_std::test] +async fn configured_interval_starts_after_first_identify() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let identify_interval = Duration::from_secs(5); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_interval(identify_interval), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let start = Instant::now(); + + // Wait until we identified. + swarm1 + .wait(|event| { + matches!(event, SwarmEvent::Behaviour(identify::Event::Sent { .. })).then_some(()) + }) + .await; + + let time_to_first_identify = Instant::now().duration_since(start); + + assert!(time_to_first_identify < identify_interval) +} diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 7e90c78371f6..f7baee2d288b 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,8 +1,60 @@ -## 0.44.6 - unreleased -- Rename `Kademlia` symbols to follow naming convention. +## 0.45.4 + +- Make it mandatory to provide protocol names when creating a `kad::Config`. + Deprecate `kad::Config::default()`, replaced by `kad::Config::new(StreamProtocol)`. + See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122). + +## 0.45.3 + +- The progress of the close query iterator shall be decided by ANY of the new peers. + See [PR 4932](https://github.com/libp2p/rust-libp2p/pull/4932). + +## 0.45.2 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). + +## 0.45.1 + +- Fix a bug where calling `Behaviour::remove_address` with an address not in the peer's bucket would remove the peer from the routing table if the bucket has only one address left. + See [PR 4816](https://github.com/libp2p/rust-libp2p/pull/4816) +- Add `std::fmt::Display` implementation on `QueryId`. + See [PR 4814](https://github.com/libp2p/rust-libp2p/pull/4814). + +## 0.45.0 + +- Remove deprecated `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4659](https://github.com/libp2p/rust-libp2p/pull/4659). +- Emit `ModeChanged` event whenever we automatically reconfigure the mode. + See [PR 4503](https://github.com/libp2p/rust-libp2p/pull/4503). +- Make previously "deprecated" `record` module private. + See [PR 4035](https://github.com/libp2p/rust-libp2p/pull/4035). +- Expose hashed bytes of KBucketKey. + See [PR 4698](https://github.com/libp2p/rust-libp2p/pull/4698). +- Remove previously deprecated type-aliases. + Users should follow the convention of importing the `libp2p::kad` module and referring to symbols as `kad::Behaviour` etc. + See [PR 4733](https://github.com/libp2p/rust-libp2p/pull/4733). + +## 0.44.6 + +- Rename `Kademlia` symbols to follow naming convention. See [PR 4547]. +- Fix a bug where we didn't detect a remote peer moving into client-state. + See [PR 4639](https://github.com/libp2p/rust-libp2p/pull/4639). +- Re-export `NodeStatus`. + See [PR 4645]. +- Deprecate `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4675]. [PR 4547]: https://github.com/libp2p/rust-libp2p/pull/4547 +[PR 4645]: https://github.com/libp2p/rust-libp2p/pull/4645 +[PR 4675]: https://github.com/libp2p/rust-libp2p/pull/4675 + + ## 0.44.5 - Migrate to `quick-protobuf-codec` crate for codec logic. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index f97f4ba03adb..bde0d5f7c845 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-kad" edition = "2021" rust-version = { workspace = true } description = "Kademlia protocol for libp2p" -version = "0.44.6" +version = "0.45.4" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,28 +15,27 @@ arrayvec = "0.7.4" bytes = "1" either = "1.9" fnv = "1.0" -asynchronous-codec = "0.6" -futures = "0.3.28" -log = "0.4" +asynchronous-codec = { workspace = true } +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } +futures-bounded = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } rand = "0.8" sha2 = "0.10.8" -smallvec = "1.11.1" +smallvec = "1.12.0" uint = "0.9" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0" futures-timer = "3.0.2" instant = "0.1.12" serde = { version = "1.0", optional = true, features = ["derive"] } thiserror = "1" +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" futures-timer = "3.0" libp2p-identify = { path = "../identify" } libp2p-noise = { workspace = true } @@ -44,6 +43,7 @@ libp2p-swarm = { path = "../../swarm", features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] serde = ["dep:serde", "bytes/serde"] diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 3c2af4173fde..0b3dc71e649f 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -23,6 +23,7 @@ use smallvec::SmallVec; use std::fmt; /// A non-empty list of (unique) addresses of a peer in the routing table. +/// Every address must be a fully-qualified /p2p address. #[derive(Clone)] pub struct Addresses { addrs: SmallVec<[Multiaddr; 6]>, @@ -67,7 +68,7 @@ impl Addresses { /// otherwise unreachable. #[allow(clippy::result_unit_err)] pub fn remove(&mut self, addr: &Multiaddr) -> Result<(), ()> { - if self.addrs.len() == 1 { + if self.addrs.len() == 1 && self.addrs[0] == *addr { return Err(()); } @@ -113,3 +114,76 @@ impl fmt::Debug for Addresses { f.debug_list().entries(self.addrs.iter()).finish() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn given_one_address_when_removing_different_one_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234)]); + + let result = addresses.remove(&tcp_addr(4321)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234)], + "`Addresses` to not change because we tried to remove a non-present address" + ); + } + + #[test] + fn given_one_address_when_removing_correct_one_returns_err() { + let mut addresses = make_addresses([tcp_addr(1234)]); + + let result = addresses.remove(&tcp_addr(1234)); + + assert!(result.is_err()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234)], + "`Addresses` to not be empty because it would have been the last address to be removed" + ); + } + + #[test] + fn given_many_addresses_when_removing_different_one_does_not_remove_and_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234), tcp_addr(4321)]); + + let result = addresses.remove(&tcp_addr(5678)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234), tcp_addr(4321)], + "`Addresses` to not change because we tried to remove a non-present address" + ); + } + + #[test] + fn given_many_addresses_when_removing_correct_one_removes_and_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234), tcp_addr(4321)]); + + let result = addresses.remove(&tcp_addr(1234)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(4321)], + "`Addresses to no longer contain address was present and then removed`" + ); + } + + /// Helper function to easily initialize Addresses struct with multiple addresses. + fn make_addresses(addresses: impl IntoIterator) -> Addresses { + Addresses { + addrs: SmallVec::from_iter(addresses), + } + } + + /// Helper function to create a tcp Multiaddr with a specific port + fn tcp_addr(port: u16) -> Multiaddr { + format!("/ip4/127.0.0.1/tcp/{port}").parse().unwrap() + } +} diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 262962cbd1ff..b237fe11dda7 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -24,16 +24,16 @@ mod test; use crate::addresses::Addresses; use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; -use crate::jobs::*; use crate::kbucket::{self, Distance, KBucketsTable, NodeStatus}; use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record_priv::{ +use crate::record::{ self, store::{self, RecordStore}, ProviderRecord, Record, }; use crate::K_VALUE; +use crate::{jobs::*, protocol}; use fnv::{FnvHashMap, FnvHashSet}; use instant::Instant; use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; @@ -43,11 +43,10 @@ use libp2p_swarm::behaviour::{ }; use libp2p_swarm::{ dial_opts::{self, DialOpts}, - ConnectionDenied, ConnectionId, DialError, ExternalAddresses, ListenAddresses, - NetworkBehaviour, NotifyHandler, PollParameters, StreamProtocol, THandler, THandlerInEvent, + ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, + ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::{debug, info, warn}; use smallvec::SmallVec; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::fmt; @@ -56,6 +55,7 @@ use std::task::{Context, Poll, Waker}; use std::time::Duration; use std::vec; use thiserror::Error; +use tracing::Level; pub use crate::query::QueryStats; @@ -96,9 +96,6 @@ pub struct Behaviour { /// The TTL of provider records. provider_record_ttl: Option, - /// How long to keep connections alive when they're idle. - connection_idle_timeout: Duration, - /// Queued events to return when the behaviour is being polled. queued_events: VecDeque>, @@ -151,7 +148,7 @@ pub enum BucketInserts { /// This can be used for e.g. signature verification or validating /// the accompanying [`Key`]. /// -/// [`Key`]: crate::record_priv::Key +/// [`Key`]: crate::record::Key #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum StoreInserts { /// Whenever a (provider) record is received, @@ -182,27 +179,16 @@ pub struct Config { record_filtering: StoreInserts, provider_record_ttl: Option, provider_publication_interval: Option, - connection_idle_timeout: Duration, kbucket_inserts: BucketInserts, caching: Caching, } impl Default for Config { + /// Returns the default configuration. + /// + /// Deprecated: use `Config::new` instead. fn default() -> Self { - Config { - kbucket_pending_timeout: Duration::from_secs(60), - query_config: QueryConfig::default(), - protocol_config: Default::default(), - record_ttl: Some(Duration::from_secs(36 * 60 * 60)), - record_replication_interval: Some(Duration::from_secs(60 * 60)), - record_publication_interval: Some(Duration::from_secs(24 * 60 * 60)), - record_filtering: StoreInserts::Unfiltered, - provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), - provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)), - connection_idle_timeout: Duration::from_secs(10), - kbucket_inserts: BucketInserts::OnConnected, - caching: Caching::Enabled { max_peers: 1 }, - } + Self::new(protocol::DEFAULT_PROTO_NAME) } } @@ -222,6 +208,30 @@ pub enum Caching { } impl Config { + /// Builds a new `Config` with the given protocol name. + pub fn new(protocol_name: StreamProtocol) -> Self { + Config { + kbucket_pending_timeout: Duration::from_secs(60), + query_config: QueryConfig::default(), + protocol_config: ProtocolConfig::new(protocol_name), + record_ttl: Some(Duration::from_secs(36 * 60 * 60)), + record_replication_interval: Some(Duration::from_secs(60 * 60)), + record_publication_interval: Some(Duration::from_secs(24 * 60 * 60)), + record_filtering: StoreInserts::Unfiltered, + provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), + provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)), + kbucket_inserts: BucketInserts::OnConnected, + caching: Caching::Enabled { max_peers: 1 }, + } + } + + /// Returns the default configuration. + #[deprecated(note = "Use `Config::new` instead")] + #[allow(clippy::should_implement_trait)] + pub fn default() -> Self { + Default::default() + } + /// Sets custom protocol names. /// /// Kademlia nodes only communicate with other nodes using the same protocol @@ -231,6 +241,8 @@ impl Config { /// More than one protocol name can be supplied. In this case the node will /// be able to talk to other nodes supporting any of the provided names. /// Multiple names must be used with caution to avoid network partitioning. + #[deprecated(note = "Use `Config::new` instead")] + #[allow(deprecated)] pub fn set_protocol_names(&mut self, names: Vec) -> &mut Self { self.protocol_config.set_protocol_names(names); self @@ -371,12 +383,6 @@ impl Config { self } - /// Sets the amount of time to keep connections alive when they're idle. - pub fn set_connection_idle_timeout(&mut self, duration: Duration) -> &mut Self { - self.connection_idle_timeout = duration; - self - } - /// Modifies the maximum allowed size of individual Kademlia packets. /// /// It might be necessary to increase this value if trying to put large @@ -453,7 +459,6 @@ where put_record_job, record_ttl: config.record_ttl, provider_record_ttl: config.provider_record_ttl, - connection_idle_timeout: config.connection_idle_timeout, external_addresses: Default::default(), local_peer_id: id, connections: Default::default(), @@ -525,9 +530,13 @@ where /// If the routing table has been updated as a result of this operation, /// a [`Event::RoutingUpdated`] event is emitted. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { + // ensuring address is a fully-qualified /p2p multiaddr + let Ok(address) = address.with_p2p(*peer) else { + return RoutingUpdate::Failed; + }; let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, _) => { + Some(kbucket::Entry::Present(mut entry, _)) => { if entry.value().insert(address) { self.queued_events .push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated { @@ -544,11 +553,11 @@ where } RoutingUpdate::Success } - kbucket::Entry::Pending(mut entry, _) => { + Some(kbucket::Entry::Pending(mut entry, _)) => { entry.value().insert(address); RoutingUpdate::Pending } - kbucket::Entry::Absent(entry) => { + Some(kbucket::Entry::Absent(entry)) => { let addresses = Addresses::new(address); let status = if self.connected_peers.contains(peer) { NodeStatus::Connected @@ -573,7 +582,7 @@ where RoutingUpdate::Success } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!(%peer, "Bucket full. Peer not added to routing table"); RoutingUpdate::Failed } kbucket::InsertResult::Pending { disconnected } => { @@ -586,7 +595,7 @@ where } } } - kbucket::Entry::SelfEntry => RoutingUpdate::Failed, + None => RoutingUpdate::Failed, } } @@ -605,8 +614,9 @@ where peer: &PeerId, address: &Multiaddr, ) -> Option, Addresses>> { + let address = &address.to_owned().with_p2p(*peer).ok()?; let key = kbucket::Key::from(*peer); - match self.kbuckets.entry(&key) { + match self.kbuckets.entry(&key)? { kbucket::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. @@ -621,7 +631,7 @@ where None } } - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket::Entry::Absent(..) => None, } } @@ -634,10 +644,10 @@ where peer: &PeerId, ) -> Option, Addresses>> { let key = kbucket::Key::from(*peer); - match self.kbuckets.entry(&key) { + match self.kbuckets.entry(&key)? { kbucket::Entry::Present(entry, _) => Some(entry.remove()), kbucket::Entry::Pending(entry, _) => Some(entry.remove()), - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket::Entry::Absent(..) => None, } } @@ -692,7 +702,7 @@ where /// /// The result of this operation is delivered in a /// [`Event::OutboundQueryProgressed{QueryResult::GetRecord}`]. - pub fn get_record(&mut self, key: record_priv::Key) -> QueryId { + pub fn get_record(&mut self, key: record::Key) -> QueryId { let record = if let Some(record) = self.store.get(&key) { if record.is_expired(Instant::now()) { self.store.remove(&key); @@ -842,7 +852,7 @@ where /// This is a _local_ operation. However, it also has the effect that /// the record will no longer be periodically re-published, allowing the /// record to eventually expire throughout the DHT. - pub fn remove_record(&mut self, key: &record_priv::Key) { + pub fn remove_record(&mut self, key: &record::Key) { if let Some(r) = self.store.get(key) { if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { self.store.remove(key) @@ -912,7 +922,7 @@ where /// /// The results of the (repeated) provider announcements sent by this node are /// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. - pub fn start_providing(&mut self, key: record_priv::Key) -> Result { + pub fn start_providing(&mut self, key: record::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are // acquired on demand when returning a `ProviderRecord` for the local node. @@ -940,7 +950,7 @@ where /// /// This is a local operation. The local node will still be considered as a /// provider for the key by other nodes until these provider records expire. - pub fn stop_providing(&mut self, key: &record_priv::Key) { + pub fn stop_providing(&mut self, key: &record::Key) { self.store .remove_provider(key, self.kbuckets.local_key().preimage()); } @@ -949,7 +959,7 @@ where /// /// The result of this operation is delivered in a /// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. - pub fn get_providers(&mut self, key: record_priv::Key) -> QueryId { + pub fn get_providers(&mut self, key: record::Key) -> QueryId { let providers: HashSet<_> = self .store .providers(&key) @@ -1024,7 +1034,7 @@ where let num_connections = self.connections.len(); - log::debug!( + tracing::debug!( "Re-configuring {} established connection{}", num_connections, if num_connections > 1 { "s" } else { "" } @@ -1045,9 +1055,11 @@ where } fn determine_mode_from_external_addresses(&mut self) { + let old_mode = self.mode; + self.mode = match (self.external_addresses.as_slice(), self.mode) { ([], Mode::Server) => { - log::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); + tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); Mode::Client } @@ -1057,11 +1069,11 @@ where Mode::Client } (confirmed_external_addresses, Mode::Client) => { - if log::log_enabled!(log::Level::Debug) { + if tracing::enabled!(Level::DEBUG) { let confirmed_external_addresses = to_comma_separated_list(confirmed_external_addresses); - log::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); + tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); } Mode::Server @@ -1079,6 +1091,13 @@ where }; self.reconfigure_mode(); + + if old_mode != self.mode { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::ModeChanged { + new_mode: self.mode, + })); + } } /// Processes discovered peers from a successful request in an iterative `Query`. @@ -1089,13 +1108,13 @@ where let local_id = self.kbuckets.local_key().preimage(); let others_iter = peers.filter(|p| &p.node_id != local_id); if let Some(query) = self.queries.get_mut(query_id) { - log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id); + tracing::trace!(peer=%source, query=?query_id, "Request to peer in query succeeded"); for peer in others_iter.clone() { - log::trace!( - "Peer {:?} reported by {:?} in query {:?}.", - peer, - source, - query_id + tracing::trace!( + ?peer, + %source, + query=?query_id, + "Peer reported by source in query" ); let addrs = peer.multiaddrs.iter().cloned().collect(); query.inner.addresses.insert(peer.node_id, addrs); @@ -1125,7 +1144,7 @@ where } /// Collects all peers who are known to be providers of the value for a given `Multihash`. - fn provider_peers(&mut self, key: &record_priv::Key, source: &PeerId) -> Vec { + fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec { let kbuckets = &mut self.kbuckets; let connected = &mut self.connected_peers; let listen_addresses = &self.listen_addresses; @@ -1162,7 +1181,8 @@ where let key = kbucket::Key::from(node_id); kbuckets .entry(&key) - .view() + .as_mut() + .and_then(|e| e.view()) .map(|e| e.node.value.clone().into_vec()) } } else { @@ -1182,7 +1202,7 @@ where } /// Starts an iterative `ADD_PROVIDER` query for the given key. - fn start_add_provider(&mut self, key: record_priv::Key, context: AddProviderContext) { + fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) { let info = QueryInfo::AddProvider { context, key: key.clone(), @@ -1218,7 +1238,7 @@ where ) { let key = kbucket::Key::from(peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, old_status) => { + Some(kbucket::Entry::Present(mut entry, old_status)) => { if old_status != new_status { entry.update(new_status) } @@ -1241,7 +1261,7 @@ where } } - kbucket::Entry::Pending(mut entry, old_status) => { + Some(kbucket::Entry::Pending(mut entry, old_status)) => { if let Some(address) = address { entry.value().insert(address); } @@ -1250,7 +1270,7 @@ where } } - kbucket::Entry::Absent(entry) => { + Some(kbucket::Entry::Absent(entry)) => { // Only connected nodes with a known address are newly inserted. if new_status != NodeStatus::Connected { return; @@ -1285,7 +1305,10 @@ where self.queued_events.push_back(ToSwarm::GenerateEvent(event)); } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!( + %peer, + "Bucket full. Peer not added to routing table" + ); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( Event::RoutablePeer { peer, address }, @@ -1322,7 +1345,7 @@ where /// Handles a finished (i.e. successful) query. fn query_finished(&mut self, q: Query) -> Option { let query_id = q.id(); - log::trace!("Query {:?} finished.", query_id); + tracing::trace!(query=?query_id, "Query finished"); let result = q.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1522,7 +1545,7 @@ where get_closest_peers_stats, }, } => { - let mk_result = |key: record_priv::Key| { + let mk_result = |key: record::Key| { if success.len() >= quorum.get() { Ok(PutRecordOk { key }) } else { @@ -1549,7 +1572,7 @@ where step: ProgressStep::first_and_last(), }), PutRecordContext::Replicate => { - debug!("Record replicated: {:?}", record.key); + tracing::debug!(record=?record.key, "Record replicated"); None } } @@ -1560,7 +1583,7 @@ where /// Handles a query that timed out. fn query_timeout(&mut self, query: Query) -> Option { let query_id = query.id(); - log::trace!("Query {:?} timed out.", query_id); + tracing::trace!(query=?query_id, "Query timed out"); let result = query.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1658,11 +1681,14 @@ where }), PutRecordContext::Replicate => match phase { PutRecordPhase::GetClosestPeers => { - warn!("Locating closest peers for replication failed: {:?}", err); + tracing::warn!( + "Locating closest peers for replication failed: {:?}", + err + ); None } PutRecordPhase::PutRecord { .. } => { - debug!("Replicating record failed: {:?}", err); + tracing::debug!("Replicating record failed: {:?}", err); None } }, @@ -1762,9 +1788,9 @@ where match self.record_filtering { StoreInserts::Unfiltered => match self.store.put(record.clone()) { Ok(()) => { - debug!( - "Record stored: {:?}; {} bytes", - record.key, + tracing::debug!( + record=?record.key, + "Record stored: {} bytes", record.value.len() ); self.queued_events.push_back(ToSwarm::GenerateEvent( @@ -1778,7 +1804,7 @@ where )); } Err(e) => { - info!("Record not stored: {:?}", e); + tracing::info!("Record not stored: {:?}", e); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), @@ -1820,7 +1846,7 @@ where } /// Processes a provider record received from a peer. - fn provider_received(&mut self, key: record_priv::Key, provider: KadPeer) { + fn provider_received(&mut self, key: record::Key, provider: KadPeer) { if &provider.node_id != self.kbuckets.local_key().preimage() { let record = ProviderRecord { key, @@ -1831,7 +1857,7 @@ where match self.record_filtering { StoreInserts::Unfiltered => { if let Err(e) = self.store.add_provider(record) { - info!("Provider record not stored: {:?}", e); + tracing::info!("Provider record not stored: {:?}", e); return; } @@ -1855,16 +1881,17 @@ where fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) { let key = kbucket::Key::from(peer_id); - if let Some(addrs) = self.kbuckets.entry(&key).value() { + if let Some(addrs) = self.kbuckets.entry(&key).as_mut().and_then(|e| e.value()) { // TODO: Ideally, the address should only be removed if the error can // be classified as "permanent" but since `err` is currently a borrowed // trait object without a `'static` bound, even downcasting for inspection // of the error is not possible (and also not truly desirable or ergonomic). // The error passed in should rather be a dedicated enum. if addrs.remove(address).is_ok() { - debug!( - "Address '{}' removed from peer '{}' due to error.", - address, peer_id + tracing::debug!( + peer=%peer_id, + %address, + "Address removed from peer due to error." ); } else { // Despite apparently having no reachable address (any longer), @@ -1876,10 +1903,11 @@ where // into the same bucket. This is handled transparently by the // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` // within `Behaviour::poll`. - debug!( - "Last remaining address '{}' of peer '{}' is unreachable.", - address, peer_id, - ) + tracing::debug!( + peer=%peer_id, + %address, + "Last remaining address of peer is unreachable." + ); } } @@ -1903,29 +1931,8 @@ where self.address_failed(peer_id, addr); } - // When a connection is established, we don't know yet whether the - // remote supports the configured protocol name. Only once a connection - // handler reports [`HandlerEvent::ProtocolConfirmed`] do we - // update the local routing table. - // Peer's first connection. if other_established == 0 { - // Queue events for sending pending RPCs to the connected peer. - // There can be only one pending RPC for a particular peer and query per definition. - for (peer_id, event) in self.queries.iter_mut().filter_map(|q| { - q.inner - .pending_rpcs - .iter() - .position(|(p, _)| p == &peer_id) - .map(|p| q.inner.pending_rpcs.remove(p)) - }) { - self.queued_events.push_back(ToSwarm::NotifyHandler { - peer_id, - event, - handler: NotifyHandler::Any, - }); - } - self.connected_peers.insert(peer_id); } } @@ -1942,24 +1949,34 @@ where let (old, new) = (old.get_remote_address(), new.get_remote_address()); // Update routing table. - if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { + if let Some(addrs) = self + .kbuckets + .entry(&kbucket::Key::from(peer)) + .as_mut() + .and_then(|e| e.value()) + { if addrs.replace(old, new) { - debug!( - "Address '{}' replaced with '{}' for peer '{}'.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address replaced with new address for peer." ); } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as old address wasn't \ - present.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as old address wasn't present.", ); } } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as peer is not present in the \ - routing table.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as peer is not present in the \ + routing table." ); } @@ -1989,11 +2006,7 @@ where } fn on_dial_failure(&mut self, DialFailure { peer_id, error, .. }: DialFailure) { - let peer_id = match peer_id { - Some(id) => id, - // Not interested in dial failures to unknown peers. - None => return, - }; + let Some(peer_id) = peer_id else { return }; match error { DialError::LocalPeerId { .. } @@ -2013,7 +2026,9 @@ where } } DialError::DialPeerConditionFalse( - dial_opts::PeerCondition::Disconnected | dial_opts::PeerCondition::NotDialing, + dial_opts::PeerCondition::Disconnected + | dial_opts::PeerCondition::NotDialing + | dial_opts::PeerCondition::DisconnectedAndNotDialing, ) => { // We might (still) be connected, or about to be connected, thus do not report the // failure to the queries. @@ -2031,7 +2046,7 @@ where remaining_established, connection_id, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { self.connections.remove(&connection_id); @@ -2043,6 +2058,27 @@ where self.connected_peers.remove(&peer_id); } } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + connection_id: ConnectionId, + peer: PeerId, + ) { + self.connections.insert(connection_id, peer); + // Queue events for sending pending RPCs to the connected peer. + // There can be only one pending RPC for a particular peer and query per definition. + for (_peer_id, event) in self.queries.iter_mut().filter_map(|q| { + q.inner + .pending_rpcs + .iter() + .position(|(p, _)| p == &peer) + .map(|p| q.inner.pending_rpcs.remove(p)) + }) { + handler.on_behaviour_event(event) + } + } } /// Exponentially decrease the given duration (base 2). @@ -2068,16 +2104,16 @@ where local_addr: local_addr.clone(), send_back_addr: remote_addr.clone(), }; - self.connections.insert(connection_id, peer); - Ok(Handler::new( + let mut handler = Handler::new( self.protocol_config.clone(), - self.connection_idle_timeout, connected_point, peer, self.mode, - connection_id, - )) + ); + self.preload_new_handler(&mut handler, connection_id, peer); + + Ok(handler) } fn handle_established_outbound_connection( @@ -2091,16 +2127,16 @@ where address: addr.clone(), role_override, }; - self.connections.insert(connection_id, peer); - Ok(Handler::new( + let mut handler = Handler::new( self.protocol_config.clone(), - self.connection_idle_timeout, connected_point, peer, self.mode, - connection_id, - )) + ); + self.preload_new_handler(&mut handler, connection_id, peer); + + Ok(handler) } fn handle_pending_outbound_connection( @@ -2119,7 +2155,7 @@ where // the addresses of that peer in the k-buckets. let key = kbucket::Key::from(peer_id); let mut peer_addrs = - if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { + if let Some(kbucket::Entry::Present(mut entry, _)) = self.kbuckets.entry(&key) { let addrs = entry.value().iter().cloned().collect::>(); debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table."); addrs @@ -2252,12 +2288,11 @@ where } } } - HandlerEvent::QueryError { query_id, error } => { - log::debug!( - "Request to {:?} in query {:?} failed with {:?}", - source, - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "Request to peer in query failed with {:?}", error ); // If the query to which the error relates is still active, @@ -2345,7 +2380,7 @@ where *step = step.next(); } else { - log::trace!("Record with key {:?} not found at {}", key, source); + tracing::trace!(record=?key, %source, "Record not found at source"); if let Caching::Enabled { max_peers } = self.caching { let source_key = kbucket::Key::from(source); let target_key = kbucket::Key::from(key.clone()); @@ -2386,13 +2421,13 @@ where let peers = success.clone(); let finished = query.try_finish(peers.iter()); if !finished { - debug!( - "PutRecord query ({:?}) reached quorum ({}/{}) with response \ - from peer {} but could not yet finish.", - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "PutRecord query reached quorum ({}/{}) with response \ + from peer but could not yet finish.", peers.len(), quorum, - source, ); } } @@ -2402,10 +2437,10 @@ where }; } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, ) -> Poll>> { let now = Instant::now(); @@ -2526,7 +2561,7 @@ where } } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.listen_addresses.on_swarm_event(&event); let external_addresses_changed = self.external_addresses.on_swarm_event(&event); @@ -2543,15 +2578,7 @@ where } FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::ExpiredListenAddr(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } @@ -2666,6 +2693,12 @@ pub enum Event { /// See [`Behaviour::kbucket`] for insight into the contents of /// the k-bucket of `peer`. PendingRoutablePeer { peer: PeerId, address: Multiaddr }, + + /// This peer's mode has been updated automatically. + /// + /// This happens in response to an external + /// address being added or removed. + ModeChanged { new_mode: Mode }, } /// Information about progress events. @@ -2786,22 +2819,22 @@ pub enum GetRecordOk { pub enum GetRecordError { #[error("the record was not found")] NotFound { - key: record_priv::Key, + key: record::Key, closest_peers: Vec, }, #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record_priv::Key, + key: record::Key, records: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] - Timeout { key: record_priv::Key }, + Timeout { key: record::Key }, } impl GetRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2811,7 +2844,7 @@ impl GetRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2826,7 +2859,7 @@ pub type PutRecordResult = Result; /// The successful result of [`Behaviour::put_record`]. #[derive(Debug, Clone)] pub struct PutRecordOk { - pub key: record_priv::Key, + pub key: record::Key, } /// The error result of [`Behaviour::put_record`]. @@ -2834,14 +2867,14 @@ pub struct PutRecordOk { pub enum PutRecordError { #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record_priv::Key, + key: record::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] Timeout { - key: record_priv::Key, + key: record::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, @@ -2850,7 +2883,7 @@ pub enum PutRecordError { impl PutRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2859,7 +2892,7 @@ impl PutRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2928,7 +2961,7 @@ pub type GetProvidersResult = Result; #[derive(Debug, Clone)] pub enum GetProvidersOk { FoundProviders { - key: record_priv::Key, + key: record::Key, /// The new set of providers discovered. providers: HashSet, }, @@ -2942,14 +2975,14 @@ pub enum GetProvidersOk { pub enum GetProvidersError { #[error("the request timed out")] Timeout { - key: record_priv::Key, + key: record::Key, closest_peers: Vec, }, } impl GetProvidersError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2957,7 +2990,7 @@ impl GetProvidersError { /// Extracts the key for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2970,26 +3003,26 @@ pub type AddProviderResult = Result; /// The successful result of publishing a provider record. #[derive(Debug, Clone)] pub struct AddProviderOk { - pub key: record_priv::Key, + pub key: record::Key, } /// The possible errors when publishing a provider record. #[derive(Debug, Clone, Error)] pub enum AddProviderError { #[error("the request timed out")] - Timeout { key: record_priv::Key }, + Timeout { key: record::Key }, } impl AddProviderError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { AddProviderError::Timeout { key, .. } => key, } } /// Extracts the key for which the operation failed, - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { AddProviderError::Timeout { key, .. } => key, } @@ -3088,7 +3121,7 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Behaviour::get_providers`]. GetProviders { /// The key for which to search for providers. - key: record_priv::Key, + key: record::Key, /// The number of providers found so far. providers_found: usize, /// Current index of events. @@ -3098,7 +3131,7 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Behaviour::start_providing`]. AddProvider { /// The record key. - key: record_priv::Key, + key: record::Key, /// The current phase of the query. phase: AddProviderPhase, /// The execution context of the query. @@ -3119,7 +3152,7 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Behaviour::get_record`]. GetRecord { /// The key to look for. - key: record_priv::Key, + key: record::Key, /// Current index of events. step: ProgressStep, /// Did we find at least one record? @@ -3163,6 +3196,7 @@ impl QueryInfo { multiaddrs: external_addresses.clone(), connection_ty: crate::protocol::ConnectionType::Connected, }, + query_id, }, }, QueryInfo::GetRecord { key, .. } => HandlerIn::GetRecord { diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index f85208ee817a..20378bb6a3f9 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -23,8 +23,8 @@ use super::*; use crate::kbucket::Distance; -use crate::record_priv::{store::MemoryStore, Key}; -use crate::{K_VALUE, SHA_256_MH}; +use crate::record::{store::MemoryStore, Key}; +use crate::{K_VALUE, PROTOCOL_NAME, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; use libp2p_core::{ @@ -37,7 +37,8 @@ use libp2p_core::{ use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; -use libp2p_swarm::{ConnectionId, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::behaviour::ConnectionEstablished; +use libp2p_swarm::{self as swarm, ConnectionId, Swarm, SwarmEvent}; use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; @@ -67,7 +68,13 @@ fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) { let store = MemoryStore::new(local_id); let behaviour = Behaviour::with_config(local_id, store, cfg); - let mut swarm = SwarmBuilder::without_executor(transport, behaviour, local_id).build(); + let mut swarm = Swarm::new( + transport, + behaviour, + local_id, + swarm::Config::with_async_std_executor() + .with_idle_connection_timeout(Duration::from_secs(5)), + ); let address: Multiaddr = Protocol::Memory(random::()).into(); swarm.listen_on(address.clone()).unwrap(); @@ -166,7 +173,7 @@ fn bootstrap() { // or smaller than K_VALUE. let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); - let mut cfg = Config::default(); + let mut cfg = Config::new(PROTOCOL_NAME); if rng.gen() { cfg.disjoint_query_paths(true); } @@ -314,7 +321,9 @@ fn query_iter() { #[test] fn unresponsive_not_returned_direct() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a // random peer. We make sure that no fake address is returned. @@ -442,7 +451,7 @@ fn get_record_not_found() { .map(|(_addr, swarm)| swarm) .collect::>(); - let target_key = record_priv::Key::from(random_multihash()); + let target_key = record::Key::from(random_multihash()); let qid = swarms[0].behaviour_mut().get_record(target_key.clone()); block_on(poll_fn(move |ctx| { @@ -489,7 +498,7 @@ fn put_record() { // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = Config::default(); + let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); @@ -851,14 +860,14 @@ fn get_record_many() { /// network where X is equal to the configured replication factor. #[test] fn add_provider() { - fn prop(keys: Vec, seed: Seed) { + fn prop(keys: Vec, seed: Seed) { let mut rng = StdRng::from_seed(seed.0); let replication_factor = NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap(); // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = Config::default(); + let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); @@ -1054,6 +1063,7 @@ fn exceed_jobs_max_queries() { result: QueryResult::GetClosestPeers(Ok(r)), .. }) => break assert!(r.peers.is_empty()), + SwarmEvent::Behaviour(Event::ModeChanged { .. }) => {} SwarmEvent::Behaviour(e) => panic!("Unexpected event: {e:?}"), _ => {} } @@ -1073,14 +1083,14 @@ fn exp_decr_expiration_overflow() { } // Right shifting a u64 by >63 results in a panic. - prop_no_panic(Config::default().record_ttl.unwrap(), 64); + prop_no_panic(Config::new(PROTOCOL_NAME).record_ttl.unwrap(), 64); quickcheck(prop_no_panic as fn(_, _)) } #[test] fn disjoint_query_does_not_finish_before_all_paths_did() { - let mut config = Config::default(); + let mut config = Config::new(PROTOCOL_NAME); config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); @@ -1228,7 +1238,7 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { /// the routing table with `BucketInserts::Manual`. #[test] fn manual_bucket_inserts() { - let mut cfg = Config::default(); + let mut cfg = Config::new(PROTOCOL_NAME); cfg.set_kbucket_inserts(BucketInserts::Manual); // 1 -> 2 -> [3 -> ...] let mut swarms = build_connected_nodes_with_config(3, 1, cfg); @@ -1364,7 +1374,7 @@ fn network_behaviour_on_address_change() { #[test] fn get_providers_single() { - fn prop(key: record_priv::Key) { + fn prop(key: record::Key) { let (_, mut single_swarm) = build_node(); single_swarm .behaviour_mut() @@ -1377,6 +1387,7 @@ fn get_providers_single() { result: QueryResult::StartProviding(Ok(_)), .. }) => {} + SwarmEvent::Behaviour(Event::ModeChanged { .. }) => {} SwarmEvent::Behaviour(e) => panic!("Unexpected event: {e:?}"), _ => {} } @@ -1417,7 +1428,7 @@ fn get_providers_single() { } fn get_providers_limit() { - fn prop(key: record_priv::Key) { + fn prop(key: record::Key) { let mut swarms = build_nodes(3); // Let first peer know of second peer and second peer know of third peer. diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 0df4da6bdc71..5e7c2e21b8b6 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -22,29 +22,25 @@ use crate::behaviour::Mode; use crate::protocol::{ KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, }; -use crate::record_priv::{self, Record}; +use crate::record::{self, Record}; use crate::QueryId; use either::Either; +use futures::channel::oneshot; use futures::prelude::*; use futures::stream::SelectAll; -use instant::Instant; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, KeepAlive, Stream, StreamUpgradeError, - SubstreamProtocol, SupportedProtocols, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, + SupportedProtocols, }; -use log::trace; use std::collections::VecDeque; use std::task::Waker; -use std::{ - error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll, time::Duration, -}; +use std::time::Duration; +use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; -const MAX_NUM_SUBSTREAMS: usize = 32; +const MAX_NUM_STREAMS: usize = 32; /// Protocol handler that manages substreams for the Kademlia protocol /// on a single connection with a peer. @@ -60,28 +56,24 @@ pub struct Handler { /// In client mode, we don't accept inbound substreams. mode: Mode, - /// Time after which we close an idle connection. - idle_timeout: Duration, - /// Next unique ID of a connection. next_connec_unique_id: UniqueConnecId, - /// List of active outbound substreams with the state they are in. - outbound_substreams: SelectAll, + /// List of active outbound streams. + outbound_substreams: + futures_bounded::FuturesTupleSet>, QueryId>, - /// Number of outbound streams being upgraded right now. - num_requested_outbound_streams: usize, + /// Contains one [`oneshot::Sender`] per outbound stream that we have requested. + pending_streams: + VecDeque, StreamUpgradeError>>>, /// List of outbound substreams that are waiting to become active next. /// Contains the request we want to send, and the user data if we expect an answer. - pending_messages: VecDeque<(KadRequestMsg, Option)>, + pending_messages: VecDeque<(KadRequestMsg, QueryId)>, /// List of active inbound substreams with the state they are in. inbound_substreams: SelectAll, - /// Until when to keep the connection alive. - keep_alive: KeepAlive, - /// The connected endpoint of the connection that the handler /// is associated with. endpoint: ConnectedPoint, @@ -90,47 +82,19 @@ pub struct Handler { remote_peer_id: PeerId, /// The current state of protocol confirmation. - protocol_status: ProtocolStatus, + protocol_status: Option, remote_supported_protocols: SupportedProtocols, - - /// The ID of this connection. - connection_id: ConnectionId, } /// The states of protocol confirmation that a connection /// handler transitions through. -#[derive(Copy, Clone)] -enum ProtocolStatus { - /// It is as yet unknown whether the remote supports the - /// configured protocol name. - Unknown, - /// The configured protocol name has been confirmed by the remote - /// but has not yet been reported to the `Kademlia` behaviour. - Confirmed, - /// The configured protocol name(s) are not or no longer supported by the remote. - NotSupported, - /// The configured protocol has been confirmed by the remote - /// and the confirmation reported to the `Kademlia` behaviour. - Reported, -} - -/// State of an active outbound substream. -enum OutboundSubstreamState { - /// Waiting to send a message to the remote. - PendingSend(KadOutStreamSink, KadRequestMsg, Option), - /// Waiting to flush the substream so that the data arrives to the remote. - PendingFlush(KadOutStreamSink, Option), - /// Waiting for an answer back from the remote. - // TODO: add timeout - WaitingAnswer(KadOutStreamSink, QueryId), - /// An error happened on the substream and we should report the error to the user. - ReportError(HandlerQueryErr, QueryId), - /// The substream is being closed. - Closing(KadOutStreamSink), - /// The substream is complete and will not perform any more work. - Done, - Poisoned, +#[derive(Debug, Copy, Clone, PartialEq)] +struct ProtocolStatus { + /// Whether the remote node supports one of our kademlia protocols. + supported: bool, + /// Whether we reported the state to the behaviour. + reported: bool, } /// State of an active inbound substream. @@ -242,7 +206,7 @@ pub enum HandlerEvent { /// this key. GetProvidersReq { /// The key for which providers are requested. - key: record_priv::Key, + key: record::Key, /// Identifier of the request. Needs to be passed back when answering. request_id: RequestId, }, @@ -268,7 +232,7 @@ pub enum HandlerEvent { /// The peer announced itself as a provider of a key. AddProvider { /// The key for which the peer is a provider of the associated value. - key: record_priv::Key, + key: record::Key, /// The peer that is the provider of the value for `key`. provider: KadPeer, }, @@ -276,7 +240,7 @@ pub enum HandlerEvent { /// Request to get a value from the dht records GetRecord { /// Key for which we should look in the dht - key: record_priv::Key, + key: record::Key, /// Identifier of the request. Needs to be passed back when answering. request_id: RequestId, }, @@ -301,7 +265,7 @@ pub enum HandlerEvent { /// Response to a request to store a record. PutRecordRes { /// The key of the stored record. - key: record_priv::Key, + key: record::Key, /// The value of the stored record. value: Vec, /// The user data passed to the `PutValue`. @@ -312,8 +276,6 @@ pub enum HandlerEvent { /// Error that can happen when requesting an RPC query. #[derive(Debug)] pub enum HandlerQueryErr { - /// Error while trying to perform the query. - Upgrade(StreamUpgradeError), /// Received an answer that doesn't correspond to the request. UnexpectedMessage, /// I/O error in the substream. @@ -323,9 +285,6 @@ pub enum HandlerQueryErr { impl fmt::Display for HandlerQueryErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - HandlerQueryErr::Upgrade(err) => { - write!(f, "Error while performing Kademlia query: {err}") - } HandlerQueryErr::UnexpectedMessage => { write!( f, @@ -342,19 +301,12 @@ impl fmt::Display for HandlerQueryErr { impl error::Error for HandlerQueryErr { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - HandlerQueryErr::Upgrade(err) => Some(err), HandlerQueryErr::UnexpectedMessage => None, HandlerQueryErr::Io(err) => Some(err), } } } -impl From> for HandlerQueryErr { - fn from(err: StreamUpgradeError) -> Self { - HandlerQueryErr::Upgrade(err) - } -} - /// Event to send to the handler. #[derive(Debug)] pub enum HandlerIn { @@ -375,7 +327,7 @@ pub enum HandlerIn { FindNodeReq { /// Identifier of the node. key: Vec, - /// Custom user data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, @@ -393,8 +345,8 @@ pub enum HandlerIn { /// this key. GetProvidersReq { /// Identifier being searched. - key: record_priv::Key, - /// Custom user data. Passed back in the out event when the results arrive. + key: record::Key, + /// ID of the query that generated this request. query_id: QueryId, }, @@ -416,16 +368,18 @@ pub enum HandlerIn { /// succeeded. AddProvider { /// Key for which we should add providers. - key: record_priv::Key, + key: record::Key, /// Known provider for this key. provider: KadPeer, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Request to retrieve a record from the DHT. GetRecord { /// The key of the record. - key: record_priv::Key, - /// Custom data. Passed back in the out event when the results arrive. + key: record::Key, + /// ID of the query that generated this request. query_id: QueryId, }, @@ -442,14 +396,14 @@ pub enum HandlerIn { /// Put a value into the dht records. PutRecord { record: Record, - /// Custom data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, /// Response to a `PutRecord`. PutRecordRes { /// Key of the value that was put. - key: record_priv::Key, + key: record::Key, /// Value that was put. value: Vec, /// Identifier of the request that was made by the remote. @@ -472,66 +426,67 @@ struct UniqueConnecId(u64); impl Handler { pub fn new( protocol_config: ProtocolConfig, - idle_timeout: Duration, endpoint: ConnectedPoint, remote_peer_id: PeerId, mode: Mode, - connection_id: ConnectionId, ) -> Self { match &endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Operating in {mode}-mode on new outbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New outbound connection" ); } ConnectedPoint::Listener { .. } => { - log::debug!( - "Operating in {mode}-mode on new inbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New inbound connection" ); } } - let keep_alive = KeepAlive::Until(Instant::now() + idle_timeout); - Handler { protocol_config, mode, - idle_timeout, endpoint, remote_peer_id, next_connec_unique_id: UniqueConnecId(0), inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - num_requested_outbound_streams: 0, + outbound_substreams: futures_bounded::FuturesTupleSet::new( + Duration::from_secs(10), + MAX_NUM_STREAMS, + ), + pending_streams: Default::default(), pending_messages: Default::default(), - keep_alive, - protocol_status: ProtocolStatus::Unknown, + protocol_status: None, remote_supported_protocols: Default::default(), - connection_id, } } fn on_fully_negotiated_outbound( &mut self, - FullyNegotiatedOutbound { protocol, info: () }: FullyNegotiatedOutbound< + FullyNegotiatedOutbound { + protocol: stream, + info: (), + }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, >, ) { - if let Some((msg, query_id)) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::PendingSend(protocol, msg, query_id)); - } else { - debug_assert!(false, "Requested outbound stream without message") + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Ok(stream)); } - self.num_requested_outbound_streams -= 1; - - if let ProtocolStatus::Unknown = self.protocol_status { + if self.protocol_status.is_none() { // Upon the first successfully negotiated substream, we know that the // remote is configured with the same protocol name and we want // the behaviour to add this peer to the routing table, if possible. - self.protocol_status = ProtocolStatus::Confirmed; + self.protocol_status = Some(ProtocolStatus { + supported: true, + reported: false, + }); } } @@ -549,14 +504,17 @@ impl Handler { future::Either::Right(p) => void::unreachable(p), }; - if let ProtocolStatus::Unknown = self.protocol_status { + if self.protocol_status.is_none() { // Upon the first successfully negotiated substream, we know that the // remote is configured with the same protocol name and we want // the behaviour to add this peer to the routing table, if possible. - self.protocol_status = ProtocolStatus::Confirmed; + self.protocol_status = Some(ProtocolStatus { + supported: true, + reported: false, + }); } - if self.inbound_substreams.len() == MAX_NUM_SUBSTREAMS { + if self.inbound_substreams.len() == MAX_NUM_STREAMS { if let Some(s) = self.inbound_substreams.iter_mut().find(|s| { matches!( s, @@ -565,16 +523,16 @@ impl Handler { ) }) { *s = InboundSubstreamState::Cancelled; - log::debug!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - Removed older substream waiting to be reused.", - self.remote_peer_id, + tracing::debug!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + Removed older substream waiting to be reused." ) } else { - log::warn!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - No older substream waiting to be reused. Dropping new substream.", - self.remote_peer_id, + tracing::warn!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + No older substream waiting to be reused. Dropping new substream." ); return; } @@ -590,31 +548,52 @@ impl Handler { }); } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { - info: (), error, .. - }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - // TODO: cache the fact that the remote doesn't support kademlia at all, so that we don't - // continue trying + /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + let result = self.outbound_substreams.try_push( + async move { + let mut stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(|e| match e { + StreamUpgradeError::Timeout => io::ErrorKind::TimedOut.into(), + StreamUpgradeError::Apply(e) => e, + StreamUpgradeError::NegotiationFailed => io::Error::new( + io::ErrorKind::ConnectionRefused, + "protocol not supported", + ), + StreamUpgradeError::Io(e) => e, + })?; + + let has_answer = !matches!(msg, KadRequestMsg::AddProvider { .. }); + + stream.send(msg).await?; + stream.close().await?; + + if !has_answer { + return Ok(None); + } - if let Some((_, Some(query_id))) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::ReportError(error.into(), query_id)); - } + let msg = stream.next().await.ok_or(io::ErrorKind::UnexpectedEof)??; + + Ok(Some(msg)) + }, + id, + ); - self.num_requested_outbound_streams -= 1; + debug_assert!( + result.is_ok(), + "Expected to not create more streams than allowed" + ); } } impl ConnectionHandler for Handler { type FromBehaviour = HandlerIn; type ToBehaviour = HandlerEvent; - type Error = io::Error; // TODO: better error type? type InboundProtocol = Either; type OutboundProtocol = ProtocolConfig; type OutboundOpenInfo = (); @@ -645,7 +624,7 @@ impl ConnectionHandler for Handler { } HandlerIn::FindNodeReq { key, query_id } => { let msg = KadRequestMsg::FindNode { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::FindNodeRes { closer_peers, @@ -653,7 +632,7 @@ impl ConnectionHandler for Handler { } => self.answer_pending_request(request_id, KadResponseMsg::FindNode { closer_peers }), HandlerIn::GetProvidersReq { key, query_id } => { let msg = KadRequestMsg::GetProviders { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetProvidersRes { closer_peers, @@ -666,17 +645,21 @@ impl ConnectionHandler for Handler { provider_peers, }, ), - HandlerIn::AddProvider { key, provider } => { + HandlerIn::AddProvider { + key, + provider, + query_id, + } => { let msg = KadRequestMsg::AddProvider { key, provider }; - self.pending_messages.push_back((msg, None)); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetRecord { key, query_id } => { let msg = KadRequestMsg::GetValue { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::PutRecord { record, query_id } => { let msg = KadRequestMsg::PutValue { record }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetRecordRes { record, @@ -703,12 +686,18 @@ impl ConnectionHandler for Handler { match &self.endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Now operating in {new_mode}-mode on outbound connection with {peer}" + tracing::debug!( + %peer, + mode=%new_mode, + "Changed mode on outbound connection" ) } ConnectedPoint::Listener { local_addr, .. } => { - log::debug!("Now operating in {new_mode}-mode on inbound connection with {peer} assuming that one of our external addresses routes to {local_addr}") + tracing::debug!( + %peer, + mode=%new_mode, + local_address=%local_addr, + "Changed mode on inbound connection assuming that one of our external addresses routes to the local address") } } @@ -717,60 +706,75 @@ impl ConnectionHandler for Handler { } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - if let ProtocolStatus::Confirmed = self.protocol_status { - self.protocol_status = ProtocolStatus::Reported; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::ProtocolConfirmed { - endpoint: self.endpoint.clone(), - }, - )); - } + loop { + match &mut self.protocol_status { + Some(status) if !status.reported => { + status.reported = true; + let event = if status.supported { + HandlerEvent::ProtocolConfirmed { + endpoint: self.endpoint.clone(), + } + } else { + HandlerEvent::ProtocolNotSupported { + endpoint: self.endpoint.clone(), + } + }; - if let Poll::Ready(Some(event)) = self.outbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + _ => {} + } - if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + match self.outbound_substreams.poll_unpin(cx) { + Poll::Ready((Ok(Ok(Some(response))), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + process_kad_response(response, query_id), + )) + } + Poll::Ready((Ok(Ok(None)), _)) => { + continue; + } + Poll::Ready((Ok(Err(e)), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(e), + query_id, + }, + )) + } + Poll::Ready((Err(_timeout), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(io::ErrorKind::TimedOut.into()), + query_id, + }, + )) + } + Poll::Pending => {} + } - let num_in_progress_outbound_substreams = - self.outbound_substreams.len() + self.num_requested_outbound_streams; - if num_in_progress_outbound_substreams < MAX_NUM_SUBSTREAMS - && self.num_requested_outbound_streams < self.pending_messages.len() - { - self.num_requested_outbound_streams += 1; - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol_config.clone(), ()), - }); - } + if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { + return Poll::Ready(event); + } - let no_streams = self.outbound_substreams.is_empty() && self.inbound_substreams.is_empty(); - self.keep_alive = match (no_streams, self.keep_alive) { - // No open streams. Preserve the existing idle timeout. - (true, k @ KeepAlive::Until(_)) => k, - // No open streams. Set idle timeout. - (true, _) => KeepAlive::Until(Instant::now() + self.idle_timeout), - // Keep alive for open streams. - (false, _) => KeepAlive::Yes, - }; + if self.outbound_substreams.len() < MAX_NUM_STREAMS { + if let Some((msg, id)) = self.pending_messages.pop_front() { + self.queue_new_stream(id, msg); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol_config.clone(), ()), + }); + } + } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -789,12 +793,11 @@ impl ConnectionHandler for Handler { ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { self.on_fully_negotiated_inbound(fully_negotiated_inbound) } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Err(ev.error)); + } } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) => {} ConnectionEvent::RemoteProtocolsChange(change) => { let dirty = self.remote_supported_protocols.on_protocols_change(change); @@ -804,34 +807,50 @@ impl ConnectionHandler for Handler { .iter() .any(|p| self.protocol_config.protocol_names().contains(p)); - match (remote_supports_our_kademlia_protocols, self.protocol_status) { - (true, ProtocolStatus::Confirmed | ProtocolStatus::Reported) => {} - (true, _) => { - log::debug!( - "Remote {} now supports our kademlia protocol on connection {}", - self.remote_peer_id, - self.connection_id, - ); - - self.protocol_status = ProtocolStatus::Confirmed; - } - (false, ProtocolStatus::Confirmed | ProtocolStatus::Reported) => { - log::debug!( - "Remote {} no longer supports our kademlia protocol on connection {}", - self.remote_peer_id, - self.connection_id, - ); - - self.protocol_status = ProtocolStatus::NotSupported; - } - (false, _) => {} - } + self.protocol_status = Some(compute_new_protocol_status( + remote_supports_our_kademlia_protocols, + self.protocol_status, + )) } } + _ => {} } } } +fn compute_new_protocol_status( + now_supported: bool, + current_status: Option, +) -> ProtocolStatus { + let current_status = match current_status { + None => { + return ProtocolStatus { + supported: now_supported, + reported: false, + } + } + Some(current) => current, + }; + + if now_supported == current_status.supported { + return ProtocolStatus { + supported: now_supported, + reported: true, + }; + } + + if now_supported { + tracing::debug!("Remote now supports our kademlia protocol"); + } else { + tracing::debug!("Remote no longer supports our kademlia protocol"); + } + + ProtocolStatus { + supported: now_supported, + reported: false, + } +} + impl Handler { fn answer_pending_request(&mut self, request_id: RequestId, mut msg: KadResponseMsg) { for state in self.inbound_substreams.iter_mut() { @@ -847,140 +866,8 @@ impl Handler { } } -impl futures::Stream for OutboundSubstreamState { - type Item = ConnectionHandlerEvent; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - loop { - match std::mem::replace(this, OutboundSubstreamState::Poisoned) { - OutboundSubstreamState::PendingSend(mut substream, msg, query_id) => { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => match substream.start_send_unpin(msg) { - Ok(()) => { - *this = OutboundSubstreamState::PendingFlush(substream, query_id); - } - Err(error) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }, - ) - }); - - return Poll::Ready(event); - } - }, - Poll::Pending => { - *this = OutboundSubstreamState::PendingSend(substream, msg, query_id); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::PendingFlush(mut substream, query_id) => { - match substream.poll_flush_unpin(cx) { - Poll::Ready(Ok(())) => { - if let Some(query_id) = query_id { - *this = OutboundSubstreamState::WaitingAnswer(substream, query_id); - } else { - *this = OutboundSubstreamState::Closing(substream); - } - } - Poll::Pending => { - *this = OutboundSubstreamState::PendingFlush(substream, query_id); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::WaitingAnswer(mut substream, query_id) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(msg))) => { - *this = OutboundSubstreamState::Closing(substream); - let event = process_kad_response(msg, query_id); - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - Poll::Pending => { - *this = OutboundSubstreamState::WaitingAnswer(substream, query_id); - return Poll::Pending; - } - Poll::Ready(Some(Err(error))) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - Poll::Ready(None) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { - error: HandlerQueryErr::Io(io::ErrorKind::UnexpectedEof.into()), - query_id, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - } - } - OutboundSubstreamState::ReportError(error, query_id) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { error, query_id }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour(event))); - } - OutboundSubstreamState::Closing(mut stream) => match stream.poll_close_unpin(cx) { - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => return Poll::Ready(None), - Poll::Pending => { - *this = OutboundSubstreamState::Closing(stream); - return Poll::Pending; - } - }, - OutboundSubstreamState::Done => { - *this = OutboundSubstreamState::Done; - return Poll::Ready(None); - } - OutboundSubstreamState::Poisoned => unreachable!(), - } - } - } -} - impl futures::Stream for InboundSubstreamState { - type Item = ConnectionHandlerEvent; + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -998,7 +885,7 @@ impl futures::Stream for InboundSubstreamState { mut substream, } => match substream.poll_next_unpin(cx) { Poll::Ready(Some(Ok(KadRequestMsg::Ping))) => { - log::warn!("Kademlia PING messages are unsupported"); + tracing::warn!("Kademlia PING messages are unsupported"); *this = InboundSubstreamState::Closing(substream); } @@ -1072,7 +959,7 @@ impl futures::Stream for InboundSubstreamState { return Poll::Ready(None); } Poll::Ready(Some(Err(e))) => { - trace!("Inbound substream error: {:?}", e); + tracing::trace!("Inbound substream error: {:?}", e); return Poll::Ready(None); } }, @@ -1168,3 +1055,48 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven }, } } + +#[cfg(test)] +mod tests { + use super::*; + use quickcheck::{Arbitrary, Gen}; + use tracing_subscriber::EnvFilter; + + impl Arbitrary for ProtocolStatus { + fn arbitrary(g: &mut Gen) -> Self { + Self { + supported: bool::arbitrary(g), + reported: bool::arbitrary(g), + } + } + } + + #[test] + fn compute_next_protocol_status_test() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + fn prop(now_supported: bool, current: Option) { + let new = compute_new_protocol_status(now_supported, current); + + match current { + None => { + assert!(!new.reported); + assert_eq!(new.supported, now_supported); + } + Some(current) => { + if current.supported == now_supported { + assert!(new.reported); + } else { + assert!(!new.reported); + } + + assert_eq!(new.supported, now_supported); + } + } + } + + quickcheck::quickcheck(prop as fn(_, _)) + } +} diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index af07076018eb..f1631ed6ad1a 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -61,7 +61,7 @@ //! > to the size of all stored records. As a job runs, the records are moved //! > out of the job to the consumer, where they can be dropped after being sent. -use crate::record_priv::{self, store::RecordStore, ProviderRecord, Record}; +use crate::record::{self, store::RecordStore, ProviderRecord, Record}; use futures::prelude::*; use futures_timer::Delay; use instant::Instant; @@ -87,6 +87,7 @@ struct PeriodicJob { } impl PeriodicJob { + #[cfg(test)] fn is_running(&self) -> bool { match self.state { PeriodicJobState::Running(..) => true, @@ -96,6 +97,7 @@ impl PeriodicJob { /// Cuts short the remaining delay, if the job is currently waiting /// for the delay to expire. + #[cfg(test)] fn asap(&mut self) { if let PeriodicJobState::Waiting(delay, deadline) = &mut self.state { let new_deadline = Instant::now().checked_sub(Duration::from_secs(1)).unwrap(); @@ -132,7 +134,7 @@ pub(crate) struct PutRecordJob { next_publish: Option, publish_interval: Option, record_ttl: Option, - skipped: HashSet, + skipped: HashSet, inner: PeriodicJob>, } @@ -164,11 +166,12 @@ impl PutRecordJob { /// Adds the key of a record that is ignored on the current or /// next run of the job. - pub(crate) fn skip(&mut self, key: record_priv::Key) { + pub(crate) fn skip(&mut self, key: record::Key) { self.skipped.insert(key); } /// Checks whether the job is currently running. + #[cfg(test)] pub(crate) fn is_running(&self) -> bool { self.inner.is_running() } @@ -177,6 +180,7 @@ impl PutRecordJob { /// for the delay to expire. /// /// The job is guaranteed to run on the next invocation of `poll`. + #[cfg(test)] pub(crate) fn asap(&mut self, publish: bool) { if publish { self.next_publish = Some(Instant::now().checked_sub(Duration::from_secs(1)).unwrap()) @@ -273,6 +277,7 @@ impl AddProviderJob { } /// Checks whether the job is currently running. + #[cfg(test)] pub(crate) fn is_running(&self) -> bool { self.inner.is_running() } @@ -281,6 +286,7 @@ impl AddProviderJob { /// for the delay to expire. /// /// The job is guaranteed to run on the next invocation of `poll`. + #[cfg(test)] pub(crate) fn asap(&mut self) { self.inner.asap() } @@ -330,7 +336,7 @@ impl AddProviderJob { #[cfg(test)] mod tests { use super::*; - use crate::record_priv::store::MemoryStore; + use crate::record::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 08dd3ed45607..a0d272c31f11 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -72,6 +72,7 @@ mod entry; #[allow(clippy::assign_op_pattern)] mod key; +pub use bucket::NodeStatus; pub use entry::*; use arrayvec::{self, ArrayVec}; @@ -171,17 +172,16 @@ where /// Returns an `Entry` for the given key, representing the state of the entry /// in the routing table. - pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> { - let index = BucketIndex::new(&self.local_key.as_ref().distance(key)); - if let Some(i) = index { - let bucket = &mut self.buckets[i.get()]; - if let Some(applied) = bucket.apply_pending() { - self.applied_pending.push_back(applied) - } - Entry::new(bucket, key) - } else { - Entry::SelfEntry + /// + /// Returns `None` in case the key points to the local node. + pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Option> { + let index = BucketIndex::new(&self.local_key.as_ref().distance(key))?; + + let bucket = &mut self.buckets[index.get()]; + if let Some(applied) = bucket.apply_pending() { + self.applied_pending.push_back(applied) } + Some(Entry::new(bucket, key)) } /// Returns an iterator over all buckets. @@ -626,7 +626,7 @@ mod tests { let other_id = Key::from(PeerId::random()); let mut table = KBucketsTable::<_, ()>::new(local_key, Duration::from_secs(5)); - if let Entry::Absent(entry) = table.entry(&other_id) { + if let Some(Entry::Absent(entry)) = table.entry(&other_id) { match entry.insert((), NodeStatus::Connected) { InsertResult::Inserted => (), _ => panic!(), @@ -644,10 +644,8 @@ mod tests { fn entry_self() { let local_key = Key::from(PeerId::random()); let mut table = KBucketsTable::<_, ()>::new(local_key.clone(), Duration::from_secs(5)); - match table.entry(&local_key) { - Entry::SelfEntry => (), - _ => panic!(), - } + + assert!(table.entry(&local_key).is_none()) } #[test] @@ -660,7 +658,7 @@ mod tests { break; } let key = Key::from(PeerId::random()); - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Connected) { InsertResult::Inserted => count += 1, _ => continue, @@ -693,10 +691,10 @@ mod tests { let full_bucket_index; loop { let key = Key::from(PeerId::random()); - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Disconnected) { InsertResult::Full => { - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Connected) { InsertResult::Pending { disconnected } => { expected_applied = AppliedPending { @@ -731,12 +729,12 @@ mod tests { full_bucket.pending_mut().unwrap().set_ready_at(elapsed); match table.entry(&expected_applied.inserted.key) { - Entry::Present(_, NodeStatus::Connected) => {} + Some(Entry::Present(_, NodeStatus::Connected)) => {} x => panic!("Unexpected entry: {x:?}"), } match table.entry(&expected_applied.evicted.as_ref().unwrap().key) { - Entry::Absent(_) => {} + Some(Entry::Absent(_)) => {} x => panic!("Unexpected entry: {x:?}"), } diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index bd0c5903a4ad..d70161919e14 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -54,10 +54,6 @@ pub enum NodeStatus { } impl PendingNode { - pub(crate) fn key(&self) -> &TKey { - &self.node.key - } - pub(crate) fn status(&self) -> NodeStatus { self.status } @@ -70,6 +66,7 @@ impl PendingNode { Instant::now() >= self.replace } + #[cfg(test)] pub(crate) fn set_ready_at(&mut self, t: Instant) { self.replace = t; } @@ -191,11 +188,6 @@ where .filter(|p| p.node.key.as_ref() == key.as_ref()) } - /// Returns a reference to a node in the bucket. - pub(crate) fn get(&self, key: &TKey) -> Option<&Node> { - self.position(key).map(|p| &self.nodes[p.0]) - } - /// Returns an iterator over the nodes in the bucket, together with their status. pub(crate) fn iter(&self) -> impl Iterator, NodeStatus)> { self.nodes @@ -398,22 +390,19 @@ where } } - /// Checks whether the given position refers to a connected node. - pub(crate) fn is_connected(&self, pos: Position) -> bool { - self.status(pos) == NodeStatus::Connected - } - /// Gets the number of entries currently in the bucket. pub(crate) fn num_entries(&self) -> usize { self.nodes.len() } /// Gets the number of entries in the bucket that are considered connected. + #[cfg(test)] pub(crate) fn num_connected(&self) -> usize { self.first_connected_pos.map_or(0, |i| self.nodes.len() - i) } /// Gets the number of entries in the bucket that are considered disconnected. + #[cfg(test)] pub(crate) fn num_disconnected(&self) -> usize { self.nodes.len() - self.num_connected() } diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index 0794ace4202d..02c90fdfcc78 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -81,8 +81,6 @@ pub(crate) enum Entry<'a, TPeerId, TVal> { Pending(PendingEntry<'a, TPeerId, TVal>, NodeStatus), /// The entry is absent and may be inserted. Absent(AbsentEntry<'a, TPeerId, TVal>), - /// The entry represents the local node. - SelfEntry, } /// The internal representation of the different states of an `Entry`, @@ -135,20 +133,6 @@ where } } - /// Returns the key of the entry. - /// - /// Returns `None` if the `Key` used to construct this `Entry` is not a valid - /// key for an entry in a bucket, which is the case for the `local_key` of - /// the `KBucketsTable` referring to the local node. - pub(crate) fn key(&self) -> Option<&TKey> { - match self { - Entry::Present(entry, _) => Some(entry.key()), - Entry::Pending(entry, _) => Some(entry.key()), - Entry::Absent(entry) => Some(entry.key()), - Entry::SelfEntry => None, - } - } - /// Returns the value associated with the entry. /// /// Returns `None` if the entry is absent from any bucket or refers to the @@ -158,7 +142,6 @@ where Entry::Present(entry, _) => Some(entry.value()), Entry::Pending(entry, _) => Some(entry.value()), Entry::Absent(_) => None, - Entry::SelfEntry => None, } } } @@ -175,11 +158,6 @@ where PresentEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub(crate) fn key(&self) -> &TKey { - self.0.key - } - /// Returns the value associated with the key. pub(crate) fn value(&mut self) -> &mut TVal { &mut self @@ -218,11 +196,6 @@ where PendingEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub(crate) fn key(&self) -> &TKey { - self.0.key - } - /// Returns the value associated with the key. pub(crate) fn value(&mut self) -> &mut TVal { self.0 @@ -262,11 +235,6 @@ where AbsentEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub(crate) fn key(&self) -> &TKey { - self.0.key - } - /// Attempts to insert the entry into a bucket. pub(crate) fn insert(self, value: TVal, status: NodeStatus) -> InsertResult { self.0.bucket.insert( diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 15cedaaaa325..bc5d6a537509 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record_priv; +use crate::record; use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; use sha2::digest::generic_array::{typenum::U32, GenericArray}; @@ -77,6 +77,11 @@ impl Key { self.bytes.distance(other) } + /// Exposing the hashed bytes. + pub fn hashed_bytes(&self) -> &[u8] { + &self.bytes.0 + } + /// Returns the uniquely determined key with the given distance to `self`. /// /// This implements the following equivalence: @@ -113,8 +118,8 @@ impl From> for Key> { } } -impl From for Key { - fn from(k: record_priv::Key) -> Self { +impl From for Key { + fn from(k: record::Key) -> Self { Key::new(k) } } diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index dd9f7f56f300..519b67f9d7a9 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -33,19 +33,8 @@ //! existing nodes in the kademlia network cannot obtain the listen addresses //! of nodes querying them, and thus will not be able to add them to their routing table. -// TODO: we allow dead_code for now because this library contains a lot of unused code that will -// be useful later for record store -#![allow(dead_code)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod record_priv; -#[deprecated( - note = "The `record` module will be made private in the future and should not be depended on." -)] -pub mod record { - pub use super::record_priv::*; -} - mod addresses; mod behaviour; mod handler; @@ -53,6 +42,7 @@ mod jobs; mod kbucket; mod protocol; mod query; +mod record; mod proto { #![allow(unreachable_pub)] @@ -75,10 +65,12 @@ pub use behaviour::{ pub use behaviour::{ Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, }; -pub use kbucket::{Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey}; +pub use kbucket::{ + Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, +}; pub use protocol::ConnectionType; pub use query::QueryId; -pub use record_priv::{store, Key as RecordKey, ProviderRecord, Record}; +pub use record::{store, Key as RecordKey, ProviderRecord, Record}; use libp2p_swarm::StreamProtocol; use std::num::NonZeroUsize; @@ -114,30 +106,3 @@ pub const PROTOCOL_NAME: StreamProtocol = protocol::DEFAULT_PROTO_NAME; /// Constant shared across tests for the [`Multihash`](libp2p_core::multihash::Multihash) type. #[cfg(test)] const SHA_256_MH: u64 = 0x12; - -#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Behaviour`.")] -pub type Kademlia = Behaviour; - -#[deprecated( - note = "Import the `kad` module instead and refer to this type as `kad::BucketInserts`." -)] -pub type KademliaBucketInserts = BucketInserts; - -#[deprecated( - note = "Import the `kad` module instead and refer to this type as `kad::StoreInserts`." -)] -pub type KademliaStoreInserts = StoreInserts; - -#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Config`.")] -pub type KademliaConfig = Config; - -#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Caching`.")] -pub type KademliaCaching = Caching; - -#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Event`.")] -pub type KademliaEvent = Event; - -#[deprecated( - note = "Import the `kad` module instead and refer to this type as `kad::ConnectionType`." -)] -pub type KadConnectionType = ConnectionType; diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index e6341ee4f211..5abe2089852a 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -27,7 +27,7 @@ //! is used to send messages to remote peers. use crate::proto; -use crate::record_priv::{self, Record}; +use crate::record::{self, Record}; use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; use futures::prelude::*; @@ -39,6 +39,7 @@ use libp2p_swarm::StreamProtocol; use std::marker::PhantomData; use std::{convert::TryFrom, time::Duration}; use std::{io, iter}; +use tracing::debug; /// The protocol name used for negotiating with multistream-select. pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); @@ -103,11 +104,12 @@ impl TryFrom for KadPeer { let mut addrs = Vec::with_capacity(peer.addrs.len()); for addr in peer.addrs.into_iter() { - match Multiaddr::try_from(addr) { - Ok(a) => addrs.push(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + match Multiaddr::try_from(addr).map(|addr| addr.with_p2p(node_id)) { + Ok(Ok(a)) => addrs.push(a), + Ok(Err(a)) => { + debug!("Unable to parse multiaddr: {a} is not compatible with {node_id}") } + Err(e) => debug!("Unable to parse multiaddr: {e}"), }; } @@ -142,6 +144,21 @@ pub struct ProtocolConfig { } impl ProtocolConfig { + /// Builds a new `ProtocolConfig` with the given protocol name. + pub fn new(protocol_name: StreamProtocol) -> Self { + ProtocolConfig { + protocol_names: vec![protocol_name], + max_packet_size: DEFAULT_MAX_PACKET_SIZE, + } + } + + /// Returns the default configuration. + #[deprecated(note = "Use `ProtocolConfig::new` instead")] + #[allow(clippy::should_implement_trait)] + pub fn default() -> Self { + Default::default() + } + /// Returns the configured protocol name. pub fn protocol_names(&self) -> &[StreamProtocol] { &self.protocol_names @@ -149,6 +166,7 @@ impl ProtocolConfig { /// Modifies the protocol names used on the wire. Can be used to create incompatibilities /// between networks on purpose. + #[deprecated(note = "Use `ProtocolConfig::new` instead")] pub fn set_protocol_names(&mut self, names: Vec) { self.protocol_names = names; } @@ -160,6 +178,9 @@ impl ProtocolConfig { } impl Default for ProtocolConfig { + /// Returns the default configuration. + /// + /// Deprecated: use `ProtocolConfig::new` instead. fn default() -> Self { ProtocolConfig { protocol_names: iter::once(DEFAULT_PROTO_NAME).collect(), @@ -193,9 +214,9 @@ impl Codec { impl, B> Encoder for Codec { type Error = io::Error; - type Item = A; + type Item<'a> = A; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { Ok(self.codec.encode(item.into(), dst)?) } } @@ -260,13 +281,13 @@ pub enum KadRequestMsg { /// this key. GetProviders { /// Identifier being searched. - key: record_priv::Key, + key: record::Key, }, /// Indicates that this list of providers is known for this key. AddProvider { /// Key for which we should add providers. - key: record_priv::Key, + key: record::Key, /// Known provider for this key. provider: KadPeer, }, @@ -274,7 +295,7 @@ pub enum KadRequestMsg { /// Request to get a value from the dht records. GetValue { /// The key we are searching for. - key: record_priv::Key, + key: record::Key, }, /// Request to put a value into the dht records. @@ -312,7 +333,7 @@ pub enum KadResponseMsg { /// Response to a `PutValue`. PutValue { /// The key of the record. - key: record_priv::Key, + key: record::Key, /// Value of the record. value: Vec, }, @@ -441,11 +462,11 @@ fn proto_to_req_msg(message: proto::Message) -> Result Ok(KadRequestMsg::PutValue { record }) } proto::MessageType::GET_VALUE => Ok(KadRequestMsg::GetValue { - key: record_priv::Key::from(message.key), + key: record::Key::from(message.key), }), proto::MessageType::FIND_NODE => Ok(KadRequestMsg::FindNode { key: message.key }), proto::MessageType::GET_PROVIDERS => Ok(KadRequestMsg::GetProviders { - key: record_priv::Key::from(message.key), + key: record::Key::from(message.key), }), proto::MessageType::ADD_PROVIDER => { // TODO: for now we don't parse the peer properly, so it is possible that we get @@ -457,7 +478,7 @@ fn proto_to_req_msg(message: proto::Message) -> Result .find_map(|peer| KadPeer::try_from(peer).ok()); if let Some(provider) = provider { - let key = record_priv::Key::from(message.key); + let key = record::Key::from(message.key); Ok(KadRequestMsg::AddProvider { key, provider }) } else { Err(invalid_data("AddProvider message with no valid peer.")) @@ -521,7 +542,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result { - let key = record_priv::Key::from(message.key); + let key = record::Key::from(message.key); let rec = message .record .ok_or_else(|| invalid_data("received PutValue message with no record"))?; @@ -539,7 +560,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result Result { - let key = record_priv::Key::from(record.key); + let key = record::Key::from(record.key); let value = record.value; let publisher = if !record.publisher.is_empty() { @@ -596,10 +617,34 @@ where mod tests { use super::*; + #[test] + fn append_p2p() { + let peer_id = PeerId::random(); + let multiaddr = "/ip6/2001:db8::/tcp/1234".parse::().unwrap(); + + let payload = proto::Peer { + id: peer_id.to_bytes(), + addrs: vec![multiaddr.to_vec()], + connection: proto::ConnectionType::CAN_CONNECT, + }; + + let peer = KadPeer::try_from(payload).unwrap(); + + assert_eq!(peer.multiaddrs, vec![multiaddr.with_p2p(peer_id).unwrap()]) + } + #[test] fn skip_invalid_multiaddr() { - let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap(); - let valid_multiaddr_bytes = valid_multiaddr.to_vec(); + let peer_id = PeerId::random(); + let multiaddr = "/ip6/2001:db8::/tcp/1234".parse::().unwrap(); + + let valid_multiaddr = multiaddr.clone().with_p2p(peer_id).unwrap(); + + let multiaddr_with_incorrect_peer_id = { + let other_peer_id = PeerId::random(); + assert_ne!(peer_id, other_peer_id); + multiaddr.with_p2p(other_peer_id).unwrap() + }; let invalid_multiaddr = { let a = vec![255; 8]; @@ -608,12 +653,16 @@ mod tests { }; let payload = proto::Peer { - id: PeerId::random().to_bytes(), - addrs: vec![valid_multiaddr_bytes, invalid_multiaddr], + id: peer_id.to_bytes(), + addrs: vec![ + valid_multiaddr.to_vec(), + multiaddr_with_incorrect_peer_id.to_vec(), + invalid_multiaddr, + ], connection: proto::ConnectionType::CAN_CONNECT, }; - let peer = KadPeer::try_from(payload).expect("not to fail"); + let peer = KadPeer::try_from(payload).unwrap(); assert_eq!(peer.multiaddrs, vec![valid_multiaddr]) } diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 6cc158619f57..bb240d5864a2 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -225,6 +225,12 @@ impl QueryPool { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct QueryId(usize); +impl std::fmt::Display for QueryId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// The configuration for queries in a `QueryPool`. #[derive(Debug, Clone)] pub(crate) struct QueryConfig { @@ -326,15 +332,6 @@ impl Query { } } - /// Checks whether the query is currently waiting for a result from `peer`. - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - match &self.peer_iter { - QueryPeerIter::Closest(iter) => iter.is_waiting(peer), - QueryPeerIter::ClosestDisjoint(iter) => iter.is_waiting(peer), - QueryPeerIter::Fixed(iter) => iter.is_waiting(peer), - } - } - /// Advances the state of the underlying peer iterator. fn next(&mut self, now: Instant) -> PeersIterState<'_> { let state = match &mut self.peer_iter { diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index a9011803e73e..dc913f1bbcae 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -175,10 +175,14 @@ impl ClosestPeersIter { }, } - let num_closest = self.closest_peers.len(); - let mut progress = false; - // Incorporate the reported closer peers into the iterator. + // + // The iterator makes progress if: + // 1, the iterator did not yet accumulate enough closest peers. + // OR + // 2, any of the new peers is closer to the target than any peer seen so far + // (i.e. is the first entry after being incorporated) + let mut progress = self.closest_peers.len() < self.config.num_results.get(); for peer in closer_peers { let key = peer.into(); let distance = self.target.distance(&key); @@ -187,11 +191,8 @@ impl ClosestPeersIter { state: PeerState::NotContacted, }; self.closest_peers.entry(distance).or_insert(peer); - // The iterator makes progress if the new peer is either closer to the target - // than any peer seen so far (i.e. is the first entry), or the iterator did - // not yet accumulate enough closest peers. - progress = self.closest_peers.keys().next() == Some(&distance) - || num_closest < self.config.num_results.get(); + + progress = self.closest_peers.keys().next() == Some(&distance) || progress; } // Update the iterator state. @@ -788,6 +789,7 @@ mod tests { QuickCheck::new().tests(10).quickcheck(prop as fn(_)) } + #[test] fn stalled_at_capacity() { fn prop(mut iter: ClosestPeersIter) { iter.state = State::Stalled; diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index 3906b65b0af5..68721f93d7cd 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -31,7 +31,6 @@ use std::{ /// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery /// path per configured parallelism according to the S/Kademlia paper. pub(crate) struct ClosestDisjointPeersIter { - config: ClosestPeersIterConfig, target: KeyBytes, /// The set of wrapped [`ClosestPeersIter`]. @@ -51,6 +50,7 @@ pub(crate) struct ClosestDisjointPeersIter { impl ClosestDisjointPeersIter { /// Creates a new iterator with a default configuration. + #[cfg(test)] pub(crate) fn new(target: KeyBytes, known_closest_peers: I) -> Self where I: IntoIterator>, @@ -88,7 +88,6 @@ impl ClosestDisjointPeersIter { let iters_len = iters.len(); ClosestDisjointPeersIter { - config, target: target.into(), iters, iter_order: (0..iters_len) @@ -190,10 +189,6 @@ impl ClosestDisjointPeersIter { updated } - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - self.iters.iter().any(|i| i.is_waiting(peer)) - } - pub(crate) fn next(&mut self, now: Instant) -> PeersIterState<'_> { let mut state = None; @@ -411,9 +406,8 @@ impl>> Iterator for ResultIter { .iter_mut() // Find the iterator with the next closest peer. .fold(Option::<&mut Peekable<_>>::None, |iter_a, iter_b| { - let iter_a = match iter_a { - Some(iter_a) => iter_a, - None => return Some(iter_b), + let Some(iter_a) = iter_a else { + return Some(iter_b); }; match (iter_a.peek(), iter_b.peek()) { diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index 1169feee87f4..50a969380a3a 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -115,10 +115,6 @@ impl FixedPeersIter { false } - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - self.peers.get(peer) == Some(&PeerState::Waiting) - } - pub(crate) fn finish(&mut self) { if let State::Waiting { .. } = self.state { self.state = State::Finished diff --git a/protocols/kad/src/record_priv.rs b/protocols/kad/src/record.rs similarity index 100% rename from protocols/kad/src/record_priv.rs rename to protocols/kad/src/record.rs diff --git a/protocols/kad/src/record_priv/store.rs b/protocols/kad/src/record/store.rs similarity index 100% rename from protocols/kad/src/record_priv/store.rs rename to protocols/kad/src/record/store.rs diff --git a/protocols/kad/src/record_priv/store/memory.rs b/protocols/kad/src/record/store/memory.rs similarity index 100% rename from protocols/kad/src/record_priv/store/memory.rs rename to protocols/kad/src/record/store/memory.rs diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index bc162ff6a010..6aceeb27263e 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -2,51 +2,56 @@ use libp2p_identify as identify; use libp2p_identity as identity; use libp2p_kad::store::MemoryStore; use libp2p_kad::{Behaviour, Config, Event, Mode}; -use libp2p_swarm::Swarm; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; +use Event::*; +use MyBehaviourEvent::*; #[async_std::test] async fn server_gets_added_to_routing_table_by_client() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; let server_peer_id = *server.local_peer_id(); + async_std::task::spawn(server.loop_on_next()); - match libp2p_swarm_test::drive(&mut client, &mut server).await { - ( - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(Event::RoutingUpdated { peer, .. })], - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_)], - ) => { - assert_eq!(peer, server_peer_id) - } - other => panic!("Unexpected events: {other:?}"), - } + let peer = client + .wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }) + .await; + + assert_eq!(peer, server_peer_id); } #[async_std::test] async fn two_servers_add_each_other_to_routing_table() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); - server2.listen().await; + server2.listen().with_memory_addr_external().await; server1.connect(&mut server2).await; let server1_peer_id = *server1.local_peer_id(); let server2_peer_id = *server2.local_peer_id(); - use Event::*; - use MyBehaviourEvent::*; - match libp2p_swarm_test::drive(&mut server1, &mut server2).await { ( - [Identify(_), Identify(_), Kad(RoutingUpdated { peer: peer1, .. })], + [Identify(_), Identify(_), Kad(RoutingUpdated { peer: peer1, .. })] + | [Identify(_), Kad(RoutingUpdated { peer: peer1, .. }), Identify(_)], [Identify(_), Identify(_)], ) => { assert_eq!(peer1, server2_peer_id); @@ -54,27 +59,26 @@ async fn two_servers_add_each_other_to_routing_table() { other => panic!("Unexpected events: {other:?}"), } - server1.listen().await; + server1.listen().with_memory_addr_external().await; server2.connect(&mut server1).await; - match libp2p_swarm_test::drive(&mut server2, &mut server1).await { - ( - [Identify(_), Kad(RoutingUpdated { peer: peer2, .. }), Identify(_)], - [Identify(_), Identify(_)], - ) - | ( - [Identify(_), Identify(_), Kad(RoutingUpdated { peer: peer2, .. })], - [Identify(_), Identify(_)], - ) => { - assert_eq!(peer2, server1_peer_id); - } - other => panic!("Unexpected events: {other:?}"), - } + async_std::task::spawn(server1.loop_on_next()); + + let peer = server2 + .wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }) + .await; + + assert_eq!(peer, server1_peer_id); } #[async_std::test] async fn adding_an_external_addresses_activates_server_mode_on_existing_connections() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -82,29 +86,24 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti let (memory_addr, _) = server.listen().await; - // Remove memory address to simulate a server that doesn't know its external address. - server.remove_external_address(&memory_addr); client.dial(memory_addr.clone()).unwrap(); - use MyBehaviourEvent::*; - // Do the usual identify send/receive dance. match libp2p_swarm_test::drive(&mut client, &mut server).await { ([Identify(_), Identify(_)], [Identify(_), Identify(_)]) => {} other => panic!("Unexpected events: {other:?}"), } - use Event::*; - // Server learns its external address (this could be through AutoNAT or some other mechanism). server.add_external_address(memory_addr); - // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table. + // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server. match libp2p_swarm_test::drive(&mut client, &mut server).await { ( [Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })], - [Identify(identify::Event::Pushed { .. })], + [Kad(ModeChanged { new_mode }), Identify(identify::Event::Pushed { .. })], ) => { + assert_eq!(new_mode, Mode::Server); assert_eq!(peer1, server_peer_id); } other => panic!("Unexpected events: {other:?}"), @@ -113,46 +112,52 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti #[async_std::test] async fn set_client_to_server_mode() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); client.behaviour_mut().kad.set_mode(Some(Mode::Client)); let mut server = Swarm::new_ephemeral(MyBehaviour::new); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; let server_peer_id = *server.local_peer_id(); - match libp2p_swarm_test::drive(&mut client, &mut server).await { - ( - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(Event::RoutingUpdated { peer, .. })], - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(identify::Event::Received { info, .. })], - ) => { - assert_eq!(peer, server_peer_id); - assert!(info - .protocols - .iter() - .all(|proto| libp2p_kad::PROTOCOL_NAME.ne(proto))) - } - other => panic!("Unexpected events: {other:?}"), - } + let client_event = client.wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }); + let server_event = server.wait(|e| match e { + SwarmEvent::Behaviour(Identify(identify::Event::Received { info, .. })) => Some(info), + _ => None, + }); + + let (peer, info) = futures::future::join(client_event, server_event).await; + + assert_eq!(peer, server_peer_id); + assert!(info + .protocols + .iter() + .all(|proto| libp2p_kad::PROTOCOL_NAME.ne(proto))); client.behaviour_mut().kad.set_mode(Some(Mode::Server)); - match libp2p_swarm_test::drive(&mut client, &mut server).await { - ( - [MyBehaviourEvent::Identify(_)], - [MyBehaviourEvent::Identify(identify::Event::Received { info, .. }), MyBehaviourEvent::Kad(_)], - ) => { - assert!(info - .protocols - .iter() - .any(|proto| libp2p_kad::PROTOCOL_NAME.eq(proto))) - } - other => panic!("Unexpected events: {other:?}"), - } + async_std::task::spawn(client.loop_on_next()); + + let info = server + .wait(|e| match e { + SwarmEvent::Behaviour(Identify(identify::Event::Received { info, .. })) => Some(info), + _ => None, + }) + .await; + + assert!(info + .protocols + .iter() + .any(|proto| libp2p_kad::PROTOCOL_NAME.eq(proto))); } #[derive(libp2p_swarm::NetworkBehaviour)] @@ -174,7 +179,7 @@ impl MyBehaviour { kad: Behaviour::with_config( local_peer_id, MemoryStore::new(local_peer_id), - Config::default(), + Config::new(libp2p_kad::PROTOCOL_NAME), ), } } diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 3a287e9031a0..cfd02232b077 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,8 +1,20 @@ -## 0.44.0 +## 0.45.1 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). +- Fix a bug in the `Behaviour::poll` method causing missed mdns packets. + See [PR 4861](https://github.com/libp2p/rust-libp2p/pull/4861). + +## 0.45.0 + +- Don't perform IO in `Behaviour::poll`. + See [PR 4623](https://github.com/libp2p/rust-libp2p/pull/4623). + +## 0.44.0 - Change `mdns::Event` to hold `Vec` and remove `DiscoveredAddrsIter` and `ExpiredAddrsIter`. See [PR 3621]. - + - Raise MSRV to 1.65. See [PR 3715]. - Remove deprecated `Mdns` prefixed items. See [PR 3699]. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index da2c0c932410..ef273781b780 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = { workspace = true } -version = "0.44.0" +version = "0.45.1" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -11,34 +11,35 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "1.13.0", optional = true } -data-encoding = "2.4.0" -futures = "0.3.28" -if-watch = "3.0.1" +async-std = { version = "1.12.0", optional = true } +async-io = { version = "2.3.1", optional = true } +data-encoding = "2.5.0" +futures = "0.3.30" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8.3" -smallvec = "1.11.1" -socket2 = { version = "0.5.4", features = ["all"] } -tokio = { version = "1.32", default-features = false, features = ["net", "time"], optional = true} -trust-dns-proto = { version = "0.23.0", default-features = false, features = ["mdns"] } +smallvec = "1.12.0" +socket2 = { version = "0.5.5", features = ["all"] } +tokio = { version = "1.36", default-features = false, features = ["net", "time"], optional = true} +tracing = "0.1.37" +hickory-proto = { version = "0.24.0", default-features = false, features = ["mdns"] } void = "1.0.2" [features] tokio = ["dep:tokio", "if-watch/tokio"] -async-io = ["dep:async-io", "if-watch/smol"] +async-io = ["dep:async-io", "dep:async-std", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } libp2p-yamux = { workspace = true } -tokio = { version = "1.32", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } +tokio = { version = "1.36", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "use-async-std" diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index bc102f832df9..4e3533f26ab3 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -25,17 +25,20 @@ mod timer; use self::iface::InterfaceState; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::Config; -use futures::Stream; +use futures::channel::mpsc; +use futures::{Stream, StreamExt}; use if_watch::IfEvent; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; use std::collections::hash_map::{Entry, HashMap}; +use std::future::Future; +use std::sync::{Arc, RwLock}; use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; /// An abstraction to allow for compatibility with various async runtimes. @@ -47,16 +50,27 @@ pub trait Provider: 'static { /// The IfWatcher type. type Watcher: Stream> + fmt::Debug + Unpin; + type TaskHandle: Abort; + /// Create a new instance of the `IfWatcher` type. fn new_watcher() -> Result; + + fn spawn(task: impl Future + Send + 'static) -> Self::TaskHandle; +} + +#[allow(unreachable_pub)] // Not re-exported. +pub trait Abort { + fn abort(self); } /// The type of a [`Behaviour`] using the `async-io` implementation. #[cfg(feature = "async-io")] pub mod async_io { use super::Provider; - use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer}; + use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; + use async_std::task::JoinHandle; use if_watch::smol::IfWatcher; + use std::future::Future; #[doc(hidden)] pub enum AsyncIo {} @@ -65,10 +79,21 @@ pub mod async_io { type Socket = AsyncUdpSocket; type Timer = AsyncTimer; type Watcher = IfWatcher; + type TaskHandle = JoinHandle<()>; fn new_watcher() -> Result { IfWatcher::new() } + + fn spawn(task: impl Future + Send + 'static) -> JoinHandle<()> { + async_std::task::spawn(task) + } + } + + impl Abort for JoinHandle<()> { + fn abort(self) { + async_std::task::spawn(self.cancel()); + } } pub type Behaviour = super::Behaviour; @@ -78,8 +103,10 @@ pub mod async_io { #[cfg(feature = "tokio")] pub mod tokio { use super::Provider; - use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer}; + use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; use if_watch::tokio::IfWatcher; + use std::future::Future; + use tokio::task::JoinHandle; #[doc(hidden)] pub enum Tokio {} @@ -88,10 +115,21 @@ pub mod tokio { type Socket = TokioUdpSocket; type Timer = TokioTimer; type Watcher = IfWatcher; + type TaskHandle = JoinHandle<()>; fn new_watcher() -> Result { IfWatcher::new() } + + fn spawn(task: impl Future + Send + 'static) -> Self::TaskHandle { + tokio::spawn(task) + } + } + + impl Abort for JoinHandle<()> { + fn abort(self) { + JoinHandle::abort(&self) + } } pub type Behaviour = super::Behaviour; @@ -110,8 +148,11 @@ where /// Iface watcher. if_watch: P::Watcher, - /// Mdns interface states. - iface_states: HashMap>, + /// Handles to tasks running the mDNS queries. + if_tasks: HashMap, + + query_response_receiver: mpsc::Receiver<(PeerId, Multiaddr, Instant)>, + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, /// List of nodes that we have discovered, the address, and when their TTL expires. /// @@ -124,7 +165,11 @@ where /// `None` if `discovered_nodes` is empty. closest_expiration: Option, - listen_addresses: ListenAddresses, + /// The current set of listen addresses. + /// + /// This is shared across all interface tasks using an [`RwLock`]. + /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. + listen_addresses: Arc>, local_peer_id: PeerId, } @@ -135,10 +180,14 @@ where { /// Builds a new `Mdns` behaviour. pub fn new(config: Config, local_peer_id: PeerId) -> io::Result { + let (tx, rx) = mpsc::channel(10); // Chosen arbitrarily. + Ok(Self { config, if_watch: P::new_watcher()?, - iface_states: Default::default(), + if_tasks: Default::default(), + query_response_receiver: rx, + query_response_sender: tx, discovered_nodes: Default::default(), closest_expiration: Default::default(), listen_addresses: Default::default(), @@ -147,6 +196,7 @@ where } /// Returns true if the given `PeerId` is in the list of nodes discovered through mDNS. + #[deprecated(note = "Use `discovered_nodes` iterator instead.")] pub fn has_node(&self, peer_id: &PeerId) -> bool { self.discovered_nodes().any(|p| p == peer_id) } @@ -157,6 +207,7 @@ where } /// Expires a node before the ttl. + #[deprecated(note = "Unused API. Will be removed in the next release.")] pub fn expire_node(&mut self, peer_id: &PeerId) { let now = Instant::now(); for (peer, _addr, expires) in &mut self.discovered_nodes { @@ -224,35 +275,17 @@ where void::unreachable(ev) } - fn on_swarm_event(&mut self, event: FromSwarm) { - self.listen_addresses.on_swarm_event(&event); - - match event { - FromSwarm::NewListener(_) => { - log::trace!("waking interface state because listening address changed"); - for iface in self.iface_states.values_mut() { - iface.fire_timer(); - } - } - FromSwarm::ConnectionClosed(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::DialFailure(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } + fn on_swarm_event(&mut self, event: FromSwarm) { + self.listen_addresses + .write() + .unwrap_or_else(|e| e.into_inner()) + .on_swarm_event(&event); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, ) -> Poll>> { // Poll ifwatch. while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) { @@ -267,43 +300,52 @@ where { continue; } - if let Entry::Vacant(e) = self.iface_states.entry(addr) { - match InterfaceState::new(addr, self.config.clone(), self.local_peer_id) { + if let Entry::Vacant(e) = self.if_tasks.entry(addr) { + match InterfaceState::::new( + addr, + self.config.clone(), + self.local_peer_id, + self.listen_addresses.clone(), + self.query_response_sender.clone(), + ) { Ok(iface_state) => { - e.insert(iface_state); + e.insert(P::spawn(iface_state)); + } + Err(err) => { + tracing::error!("failed to create `InterfaceState`: {}", err) } - Err(err) => log::error!("failed to create `InterfaceState`: {}", err), } } } Ok(IfEvent::Down(inet)) => { - if self.iface_states.contains_key(&inet.addr()) { - log::info!("dropping instance {}", inet.addr()); - self.iface_states.remove(&inet.addr()); + if let Some(handle) = self.if_tasks.remove(&inet.addr()) { + tracing::info!(instance=%inet.addr(), "dropping instance"); + + handle.abort(); } } - Err(err) => log::error!("if watch returned an error: {}", err), + Err(err) => tracing::error!("if watch returned an error: {}", err), } } // Emit discovered event. let mut discovered = Vec::new(); - for iface_state in self.iface_states.values_mut() { - while let Poll::Ready((peer, addr, expiration)) = - iface_state.poll(cx, &self.listen_addresses) + + while let Poll::Ready(Some((peer, addr, expiration))) = + self.query_response_receiver.poll_next_unpin(cx) + { + if let Some((_, _, cur_expires)) = self + .discovered_nodes + .iter_mut() + .find(|(p, a, _)| *p == peer && *a == addr) { - if let Some((_, _, cur_expires)) = self - .discovered_nodes - .iter_mut() - .find(|(p, a, _)| *p == peer && *a == addr) - { - *cur_expires = cmp::max(*cur_expires, expiration); - } else { - log::info!("discovered: {} {}", peer, addr); - self.discovered_nodes.push((peer, addr.clone(), expiration)); - discovered.push((peer, addr)); - } + *cur_expires = cmp::max(*cur_expires, expiration); + } else { + tracing::info!(%peer, address=%addr, "discovered peer on address"); + self.discovered_nodes.push((peer, addr.clone(), expiration)); + discovered.push((peer, addr)); } } + if !discovered.is_empty() { let event = Event::Discovered(discovered); return Poll::Ready(ToSwarm::GenerateEvent(event)); @@ -314,7 +356,7 @@ where let mut expired = Vec::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { - log::info!("expired: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "expired peer on address"); expired.push((*peer, addr.clone())); return false; } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 54d6c657380d..9302065cde2a 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -25,10 +25,14 @@ use self::dns::{build_query, build_query_response, build_service_discovery_respo use self::query::MdnsPacket; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::Config; +use futures::channel::mpsc; +use futures::{SinkExt, StreamExt}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::ListenAddresses; use socket2::{Domain, Socket, Type}; +use std::future::Future; +use std::sync::{Arc, RwLock}; use std::{ collections::VecDeque, io, @@ -72,6 +76,11 @@ pub(crate) struct InterfaceState { recv_socket: U, /// Send socket. send_socket: U, + + listen_addresses: Arc>, + + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, + /// Buffer used for receiving data from the main socket. /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 /// bytes, if it can be ensured that all participating devices can handle such large packets. @@ -101,8 +110,14 @@ where T: Builder + futures::Stream, { /// Builds a new [`InterfaceState`]. - pub(crate) fn new(addr: IpAddr, config: Config, local_peer_id: PeerId) -> io::Result { - log::info!("creating instance on iface {}", addr); + pub(crate) fn new( + addr: IpAddr, + config: Config, + local_peer_id: PeerId, + listen_addresses: Arc>, + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, + ) -> io::Result { + tracing::info!(address=%addr, "creating instance on iface address"); let recv_socket = match addr { IpAddr::V4(addr) => { let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(socket2::Protocol::UDP))?; @@ -154,6 +169,8 @@ where addr, recv_socket, send_socket, + listen_addresses, + query_response_sender, recv_buffer: [0; 4096], send_buffer: Default::default(), discovered: Default::default(), @@ -167,124 +184,146 @@ where } pub(crate) fn reset_timer(&mut self) { - log::trace!("reset timer on {:#?} {:#?}", self.addr, self.probe_state); + tracing::trace!(address=%self.addr, probe_state=?self.probe_state, "reset timer"); let interval = *self.probe_state.interval(); self.timeout = T::interval(interval); } - pub(crate) fn fire_timer(&mut self) { - self.timeout = T::interval_at(Instant::now(), INITIAL_TIMEOUT_INTERVAL); + fn mdns_socket(&self) -> SocketAddr { + SocketAddr::new(self.multicast_addr, 5353) } +} + +impl Future for InterfaceState +where + U: AsyncSocket, + T: Builder + futures::Stream, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); - pub(crate) fn poll( - &mut self, - cx: &mut Context, - listen_addresses: &ListenAddresses, - ) -> Poll<(PeerId, Multiaddr, Instant)> { loop { // 1st priority: Low latency: Create packet ASAP after timeout. - if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { - log::trace!("sending query on iface {}", self.addr); - self.send_buffer.push_back(build_query()); - log::trace!("tick on {:#?} {:#?}", self.addr, self.probe_state); + if this.timeout.poll_next_unpin(cx).is_ready() { + tracing::trace!(address=%this.addr, "sending query on iface"); + this.send_buffer.push_back(build_query()); + tracing::trace!(address=%this.addr, probe_state=?this.probe_state, "tick"); // Stop to probe when the initial interval reach the query interval - if let ProbeState::Probing(interval) = self.probe_state { + if let ProbeState::Probing(interval) = this.probe_state { let interval = interval * 2; - self.probe_state = if interval >= self.query_interval { - ProbeState::Finished(self.query_interval) + this.probe_state = if interval >= this.query_interval { + ProbeState::Finished(this.query_interval) } else { ProbeState::Probing(interval) }; } - self.reset_timer(); + this.reset_timer(); } // 2nd priority: Keep local buffers small: Send packets to remote. - if let Some(packet) = self.send_buffer.pop_front() { - match Pin::new(&mut self.send_socket).poll_write( - cx, - &packet, - SocketAddr::new(self.multicast_addr, 5353), - ) { + if let Some(packet) = this.send_buffer.pop_front() { + match this.send_socket.poll_write(cx, &packet, this.mdns_socket()) { Poll::Ready(Ok(_)) => { - log::trace!("sent packet on iface {}", self.addr); + tracing::trace!(address=%this.addr, "sent packet on iface address"); continue; } Poll::Ready(Err(err)) => { - log::error!("error sending packet on iface {} {}", self.addr, err); + tracing::error!(address=%this.addr, "error sending packet on iface address {}", err); continue; } Poll::Pending => { - self.send_buffer.push_front(packet); + this.send_buffer.push_front(packet); } } } // 3rd priority: Keep local buffers small: Return discovered addresses. - if let Some(discovered) = self.discovered.pop_front() { - return Poll::Ready(discovered); + if this.query_response_sender.poll_ready_unpin(cx).is_ready() { + if let Some(discovered) = this.discovered.pop_front() { + match this.query_response_sender.try_send(discovered) { + Ok(()) => {} + Err(e) if e.is_disconnected() => { + return Poll::Ready(()); + } + Err(e) => { + this.discovered.push_front(e.into_inner()); + } + } + + continue; + } } // 4th priority: Remote work: Answer incoming requests. - match Pin::new(&mut self.recv_socket) - .poll_read(cx, &mut self.recv_buffer) - .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from)) + match this + .recv_socket + .poll_read(cx, &mut this.recv_buffer) + .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&this.recv_buffer[..len], from)) { Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => { - log::trace!( - "received query from {} on {}", - query.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%query.remote_addr(), + "received query from remote address on address" ); - self.send_buffer.extend(build_query_response( + this.send_buffer.extend(build_query_response( query.query_id(), - self.local_peer_id, - listen_addresses.iter(), - self.ttl, + this.local_peer_id, + this.listen_addresses + .read() + .unwrap_or_else(|e| e.into_inner()) + .iter(), + this.ttl, )); continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => { - log::trace!( - "received response from {} on {}", - response.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%response.remote_addr(), + "received response from remote address on address" ); - self.discovered - .extend(response.extract_discovered(Instant::now(), self.local_peer_id)); + this.discovered + .extend(response.extract_discovered(Instant::now(), this.local_peer_id)); // Stop probing when we have a valid response - if !self.discovered.is_empty() { - self.probe_state = ProbeState::Finished(self.query_interval); - self.reset_timer(); + if !this.discovered.is_empty() { + this.probe_state = ProbeState::Finished(this.query_interval); + this.reset_timer(); } continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => { - log::trace!( - "received service discovery from {} on {}", - disc.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%disc.remote_addr(), + "received service discovery from remote address on address" ); - self.send_buffer - .push_back(build_service_discovery_response(disc.query_id(), self.ttl)); + this.send_buffer + .push_back(build_service_discovery_response(disc.query_id(), this.ttl)); continue; } Poll::Ready(Err(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { // No more bytes available on the socket to read + continue; } Poll::Ready(Err(err)) => { - log::error!("failed reading datagram: {}", err); + tracing::error!("failed reading datagram: {}", err); + return Poll::Ready(()); } Poll::Ready(Ok(Err(err))) => { - log::debug!("Parsing mdns packet failed: {:?}", err); + tracing::debug!("Parsing mdns packet failed: {:?}", err); + continue; } - Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} + Poll::Ready(Ok(Ok(None))) => continue, + Poll::Pending => {} } return Poll::Pending; diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 6a10497e69f2..6cc5550dbe50 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -134,7 +134,7 @@ pub(crate) fn build_query_response<'a>( records.push(txt_record); } Err(e) => { - log::warn!("Excluding address {} from response: {:?}", addr, e); + tracing::warn!(address=%addr, "Excluding address from response: {:?}", e); } } @@ -395,9 +395,9 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { use super::*; + use hickory_proto::op::Message; use libp2p_identity as identity; use std::time::Duration; - use trust_dns_proto::op::Message; #[test] fn build_query_correct() { diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 0185028f6ff3..eeb699fca6ba 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -20,6 +20,10 @@ use super::dns; use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use hickory_proto::{ + op::Message, + rr::{Name, RData}, +}; use libp2p_core::{ address_translation, multiaddr::{Multiaddr, Protocol}, @@ -27,10 +31,6 @@ use libp2p_core::{ use libp2p_identity::PeerId; use std::time::Instant; use std::{fmt, net::SocketAddr, str, time::Duration}; -use trust_dns_proto::{ - op::Message, - rr::{Name, RData}, -}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -47,7 +47,7 @@ impl MdnsPacket { pub(crate) fn new_from_bytes( buf: &[u8], from: SocketAddr, - ) -> Result, trust_dns_proto::error::ProtoError> { + ) -> Result, hickory_proto::error::ProtoError> { let packet = Message::from_vec(buf)?; if packet.query().is_none() { @@ -156,9 +156,8 @@ impl MdnsResponse { return None; } - let record_value = match record.data() { - Some(RData::PTR(record)) => record, - _ => return None, + let RData::PTR(record_value) = record.data()? else { + return None; }; MdnsPeer::new(packet, record_value, record.ttl()) @@ -181,6 +180,7 @@ impl MdnsResponse { peer.addresses().iter().filter_map(move |address| { let new_addr = address_translation(address, &observed)?; + let new_addr = new_addr.with_p2p(*peer.id()).ok()?; Some((*peer.id(), new_addr, new_expiration)) }) @@ -247,21 +247,14 @@ impl MdnsPeer { .flat_map(|txt| txt.iter()) .filter_map(|txt| { // TODO: wrong, txt can be multiple character strings - let addr = match dns::decode_character_string(txt) { - Ok(a) => a, - Err(_) => return None, - }; + let addr = dns::decode_character_string(txt).ok()?; + if !addr.starts_with(b"dnsaddr=") { return None; } - let addr = match str::from_utf8(&addr[8..]) { - Ok(a) => a, - Err(_) => return None, - }; - let mut addr = match addr.parse::() { - Ok(a) => a, - Err(_) => return None, - }; + + let mut addr = str::from_utf8(&addr[8..]).ok()?.parse::().ok()?; + match addr.pop() { Some(Protocol::P2p(peer_id)) => { if let Some(pid) = &my_peer_id { @@ -344,9 +337,8 @@ mod tests { if record.name().to_utf8() != SERVICE_NAME_FQDN { return None; } - let record_value = match record.data() { - Some(RData::PTR(record)) => record, - _ => return None, + let Some(RData::PTR(record_value)) = record.data() else { + return None; }; Some(record_value) }) diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 6d45d92cdd96..549f70978af1 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -24,17 +24,22 @@ use libp2p_mdns::{async_io::Behaviour, Config}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn test_discovery_async_std_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[async_std::test] async fn test_discovery_async_std_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -45,7 +50,9 @@ async fn test_discovery_async_std_ipv6() { #[async_std::test] async fn test_expired_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), @@ -78,7 +85,9 @@ async fn test_expired_async_std() { #[async_std::test] async fn test_no_expiration_on_close_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(120), query_interval: Duration::from_secs(10), diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 50d6be0c00f3..cf0d9f4bed4d 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -22,17 +22,22 @@ use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn test_discovery_tokio_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[tokio::test] async fn test_discovery_tokio_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -43,7 +48,9 @@ async fn test_discovery_tokio_ipv6() { #[tokio::test] async fn test_expired_tokio() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), diff --git a/protocols/perf/CHANGELOG.md b/protocols/perf/CHANGELOG.md index e46a94e981ac..4e448d7f44a0 100644 --- a/protocols/perf/CHANGELOG.md +++ b/protocols/perf/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.2.0 +## 0.3.0 + +- Continuously measure on single connection (iperf-style). + See https://github.com/libp2p/test-plans/issues/261 for high level overview. + See [PR 4382](https://github.com/libp2p/rust-libp2p/pull/4382). + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 50ca36a99e24..68b1088a42c2 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-perf" edition = "2021" rust-version = { workspace = true } description = "libp2p perf protocol implementation" -version = "0.2.0" +version = "0.3.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,25 +12,26 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" -async-trait = "0.1" -clap = { version = "4.3.23", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" +clap = { version = "4.4.16", features = ["derive"] } +futures = "0.3.30" +futures-bounded = { workspace = true } +futures-timer = "3.0" instant = "0.1.12" +libp2p = { workspace = true, features = ["tokio", "tcp", "quic", "tls", "yamux", "dns"] } libp2p-core = { workspace = true } libp2p-dns = { workspace = true, features = ["tokio"] } -libp2p-identity = { workspace = true } -libp2p-tls = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-quic = { workspace = true, features = ["tokio"] } -libp2p-request-response = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-tls = { workspace = true } libp2p-yamux = { workspace = true } -log = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.32.0", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tokio = { version = "1.36", default-features = false, features = ["macros", "rt", "rt-multi-thread"] } void = "1" [dev-dependencies] diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 4205cc3843bd..9ac8f0a6cde2 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -22,18 +22,16 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::{bail, Result}; use clap::Parser; -use futures::FutureExt; -use futures::{future::Either, StreamExt}; +use futures::StreamExt; use instant::{Duration, Instant}; -use libp2p_core::{ - multiaddr::Protocol, muxing::StreamMuxerBox, transport::OrTransport, upgrade, Multiaddr, - Transport as _, -}; -use libp2p_identity::PeerId; -use libp2p_perf::{Run, RunDuration, RunParams}; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; -use log::{error, info}; +use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr}; +use libp2p::identity::PeerId; +use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::SwarmBuilder; +use libp2p_perf::{client, server}; +use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; use serde::{Deserialize, Serialize}; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p perf client")] @@ -73,9 +71,9 @@ impl FromStr for Transport { #[tokio::main] async fn main() -> Result<()> { - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) - .format_timestamp_millis() - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); match opts { @@ -123,20 +121,20 @@ async fn server(server_address: SocketAddr) -> Result<()> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address}"); + tracing::info!(%address, "Listening on address"); } SwarmEvent::IncomingConnection { .. } => {} e @ SwarmEvent::IncomingConnectionError { .. } => { - error!("{e:?}"); + tracing::error!("{e:?}"); } SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::ConnectionClosed { .. } => {} - SwarmEvent::Behaviour(()) => { - info!("Finished run",) + SwarmEvent::Behaviour(server::Event { .. }) => { + tracing::info!("Finished run",) } e => panic!("{e:?}"), } @@ -163,266 +161,70 @@ async fn client( .with(Protocol::Udp(server_address.port())) .with(Protocol::QuicV1), }; - - let benchmarks = if upload_bytes.is_some() { - vec![custom( - server_address, - RunParams { - to_send: upload_bytes.unwrap(), - to_receive: download_bytes.unwrap(), - }, - ) - .boxed()] - } else { - vec![ - latency(server_address.clone()).boxed(), - throughput(server_address.clone()).boxed(), - requests_per_second(server_address.clone()).boxed(), - sequential_connections_per_second(server_address.clone()).boxed(), - ] - }; - - tokio::spawn(async move { - for benchmark in benchmarks { - benchmark.await?; - } - - anyhow::Ok(()) - }) - .await??; - - Ok(()) -} - -async fn custom(server_address: Multiaddr, params: RunParams) -> Result<()> { - info!("start benchmark: custom"); - let mut swarm = swarm().await?; - - let start = Instant::now(); - - let server_peer_id = connect(&mut swarm, server_address.clone()).await?; - - perf(&mut swarm, server_peer_id, params).await?; - - #[derive(Serialize, Deserialize)] - #[serde(rename_all = "camelCase")] - struct CustomResult { - latency: f64, - } - - println!( - "{}", - serde_json::to_string(&CustomResult { - latency: start.elapsed().as_secs_f64(), - }) - .unwrap() - ); - - Ok(()) -} - -async fn latency(server_address: Multiaddr) -> Result<()> { - info!("start benchmark: round-trip-time latency"); - let mut swarm = swarm().await?; - - let server_peer_id = connect(&mut swarm, server_address.clone()).await?; - - let mut rounds = 0; - let start = Instant::now(); - let mut latencies = Vec::new(); - - loop { - if start.elapsed() > Duration::from_secs(30) { - break; - } - - let start = Instant::now(); - - perf( - &mut swarm, - server_peer_id, - RunParams { - to_send: 1, - to_receive: 1, - }, - ) - .await?; - - latencies.push(start.elapsed().as_secs_f64()); - rounds += 1; - } - - latencies.sort_by(|a, b| a.partial_cmp(b).unwrap()); - - info!( - "Finished: {rounds} pings in {:.4}s", - start.elapsed().as_secs_f64() - ); - info!("- {:.4} s median", percentile(&latencies, 0.50),); - info!("- {:.4} s 95th percentile\n", percentile(&latencies, 0.95),); - Ok(()) -} - -fn percentile(values: &[V], percentile: f64) -> V { - let n: usize = (values.len() as f64 * percentile).ceil() as usize - 1; - values[n] -} - -async fn throughput(server_address: Multiaddr) -> Result<()> { - info!("start benchmark: single connection single channel throughput"); - let mut swarm = swarm().await?; - - let server_peer_id = connect(&mut swarm, server_address.clone()).await?; - let params = RunParams { - to_send: 10 * 1024 * 1024, - to_receive: 10 * 1024 * 1024, + to_send: upload_bytes.unwrap(), + to_receive: download_bytes.unwrap(), }; - - perf(&mut swarm, server_peer_id, params).await?; - - Ok(()) -} - -async fn requests_per_second(server_address: Multiaddr) -> Result<()> { - info!("start benchmark: single connection parallel requests per second"); let mut swarm = swarm().await?; - let server_peer_id = connect(&mut swarm, server_address.clone()).await?; - - let num = 1_000; - let to_send = 1; - let to_receive = 1; - - for _ in 0..num { - swarm.behaviour_mut().perf( - server_peer_id, - RunParams { - to_send, - to_receive, - }, - )?; - } - - let mut finished = 0; - let start = Instant::now(); - - loop { - match swarm.next().await.unwrap() { - SwarmEvent::Behaviour(libp2p_perf::client::Event { - id: _, - result: Ok(_), - }) => { - finished += 1; - - if finished == num { - break; - } - } - e => panic!("{e:?}"), - } - } - - let duration = start.elapsed().as_secs_f64(); - let requests_per_second = num as f64 / duration; - - info!( - "Finished: sent {num} {to_send} bytes requests with {to_receive} bytes response each within {duration:.2} s", - ); - info!("- {requests_per_second:.2} req/s\n"); - - Ok(()) -} - -async fn sequential_connections_per_second(server_address: Multiaddr) -> Result<()> { - info!("start benchmark: sequential connections with single request per second"); - let mut rounds = 0; - let to_send = 1; - let to_receive = 1; - let start = Instant::now(); - - let mut latency_connection_establishment = Vec::new(); - let mut latency_connection_establishment_plus_request = Vec::new(); - - loop { - if start.elapsed() > Duration::from_secs(30) { - break; - } - - let mut swarm = swarm().await?; + tokio::spawn(async move { + tracing::info!("start benchmark: custom"); let start = Instant::now(); let server_peer_id = connect(&mut swarm, server_address.clone()).await?; - latency_connection_establishment.push(start.elapsed().as_secs_f64()); - - perf( - &mut swarm, - server_peer_id, - RunParams { - to_send, - to_receive, - }, - ) - .await?; - - latency_connection_establishment_plus_request.push(start.elapsed().as_secs_f64()); - rounds += 1; - } - - let duration = start.elapsed().as_secs_f64(); - - latency_connection_establishment.sort_by(|a, b| a.partial_cmp(b).unwrap()); - latency_connection_establishment_plus_request.sort_by(|a, b| a.partial_cmp(b).unwrap()); - - let connection_establishment_95th = percentile(&latency_connection_establishment, 0.95); - let connection_establishment_plus_request_95th = - percentile(&latency_connection_establishment_plus_request, 0.95); - - info!( - "Finished: established {rounds} connections with one {to_send} bytes request and one {to_receive} bytes response within {duration:.2} s", + perf(&mut swarm, server_peer_id, params).await?; + + println!( + "{}", + serde_json::to_string(&BenchmarkResult { + upload_bytes: params.to_send, + download_bytes: params.to_receive, + r#type: "final".to_string(), + time_seconds: start.elapsed().as_secs_f64(), + }) + .unwrap() ); - info!("- {connection_establishment_95th:.4} s 95th percentile connection establishment"); - info!("- {connection_establishment_plus_request_95th:.4} s 95th percentile connection establishment + one request"); + + anyhow::Ok(()) + }) + .await??; Ok(()) } -async fn swarm() -> Result> { - let local_key = libp2p_identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - let transport = { - let tcp = libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default().nodelay(true)) - .upgrade(upgrade::Version::V1Lazy) - .authenticate(libp2p_tls::Config::new(&local_key)?) - .multiplex(libp2p_yamux::Config::default()); - - let quic = { - let mut config = libp2p_quic::Config::new(&local_key); - config.support_draft_29 = true; - libp2p_quic::tokio::Transport::new(config) - }; - - let dns = libp2p_dns::tokio::Transport::system(OrTransport::new(quic, tcp))?; +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct BenchmarkResult { + r#type: String, + time_seconds: f64, + upload_bytes: usize, + download_bytes: usize, +} - dns.map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), +async fn swarm() -> Result> { + let swarm = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + libp2p_tcp::Config::default().nodelay(true), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_behaviour(|_| B::default())? + .with_swarm_config(|cfg| { + cfg.with_substream_upgrade_protocol_override(upgrade::Version::V1Lazy) + .with_idle_connection_timeout(Duration::from_secs(60 * 5)) }) - .boxed() - }; + .build(); - Ok( - SwarmBuilder::with_tokio_executor(transport, Default::default(), local_peer_id) - .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) - .build(), - ) + Ok(swarm) } async fn connect( - swarm: &mut Swarm, + swarm: &mut Swarm, server_address: Multiaddr, ) -> Result { let start = Instant::now(); @@ -439,27 +241,54 @@ async fn connect( let duration = start.elapsed(); let duration_seconds = duration.as_secs_f64(); - info!("established connection in {duration_seconds:.4} s"); + tracing::info!(elapsed_time=%format!("{duration_seconds:.4} s")); Ok(server_peer_id) } async fn perf( - swarm: &mut Swarm, + swarm: &mut Swarm, server_peer_id: PeerId, params: RunParams, -) -> Result { +) -> Result { swarm.behaviour_mut().perf(server_peer_id, params)?; - let duration = match swarm.next().await.unwrap() { - SwarmEvent::Behaviour(libp2p_perf::client::Event { - id: _, - result: Ok(duration), - }) => duration, - e => panic!("{e:?}"), + let duration = loop { + match swarm.next().await.unwrap() { + SwarmEvent::Behaviour(client::Event { + id: _, + result: Ok(RunUpdate::Intermediate(progressed)), + }) => { + tracing::info!("{progressed}"); + + let Intermediate { + duration, + sent, + received, + } = progressed; + + println!( + "{}", + serde_json::to_string(&BenchmarkResult { + r#type: "intermediate".to_string(), + time_seconds: duration.as_secs_f64(), + upload_bytes: sent, + download_bytes: received, + }) + .unwrap() + ); + } + SwarmEvent::Behaviour(client::Event { + id: _, + result: Ok(RunUpdate::Final(Final { duration })), + }) => break duration, + e => panic!("{e:?}"), + }; }; - info!("{}", Run { params, duration }); + let run = Run { params, duration }; + + tracing::info!("{run}"); - Ok(duration) + Ok(run) } diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index 93c2086a49e9..c4614e979db7 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -18,232 +18,32 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use instant::Duration; +mod behaviour; +mod handler; -use std::{ - collections::HashSet, - task::{Context, Poll}, -}; +use std::sync::atomic::{AtomicUsize, Ordering}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_request_response as request_response; -use libp2p_swarm::{ - derive_prelude::ConnectionEstablished, ConnectionClosed, ConnectionId, FromSwarm, - NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, ToSwarm, -}; +pub use behaviour::{Behaviour, Event}; +use libp2p_swarm::StreamUpgradeError; +use void::Void; -use crate::{protocol::Response, RunDuration, RunParams}; +static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); /// Connection identifier. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct RunId(request_response::RequestId); +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct RunId(usize); -impl From for RunId { - fn from(value: request_response::RequestId) -> Self { - Self(value) - } -} - -#[derive(Debug)] -pub struct Event { - pub id: RunId, - pub result: Result, -} - -pub struct Behaviour { - connected: HashSet, - request_response: request_response::Behaviour, -} - -impl Default for Behaviour { - fn default() -> Self { - let mut req_resp_config = request_response::Config::default(); - req_resp_config.set_connection_keep_alive(Duration::from_secs(60 * 5)); - req_resp_config.set_request_timeout(Duration::from_secs(60 * 5)); - Self { - connected: Default::default(), - request_response: request_response::Behaviour::new( - std::iter::once(( - crate::PROTOCOL_NAME, - request_response::ProtocolSupport::Outbound, - )), - req_resp_config, - ), - } - } -} - -impl Behaviour { - pub fn new() -> Self { - Self::default() - } - - pub fn perf(&mut self, server: PeerId, params: RunParams) -> Result { - if !self.connected.contains(&server) { - return Err(PerfError::NotConnected); - } - - let id = self.request_response.send_request(&server, params).into(); - - Ok(id) +impl RunId { + /// Returns the next available [`RunId`]. + pub(crate) fn next() -> Self { + Self(NEXT_RUN_ID.fetch_add(1, Ordering::SeqCst)) } } #[derive(thiserror::Error, Debug)] -pub enum PerfError { - #[error("Not connected to peer")] - NotConnected, -} - -impl NetworkBehaviour for Behaviour { - type ConnectionHandler = - as NetworkBehaviour>::ConnectionHandler; - type ToSwarm = Event; - - fn handle_pending_outbound_connection( - &mut self, - connection_id: ConnectionId, - maybe_peer: Option, - addresses: &[Multiaddr], - effective_role: libp2p_core::Endpoint, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - ) - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: libp2p_core::Endpoint, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response - .handle_established_outbound_connection(connection_id, peer, addr, role_override) - } - - fn handle_pending_inbound_connection( - &mut self, - connection_id: ConnectionId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result<(), libp2p_swarm::ConnectionDenied> { - self.request_response.handle_pending_inbound_connection( - connection_id, - local_addr, - remote_addr, - ) - } - - fn handle_established_inbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response.handle_established_inbound_connection( - connection_id, - peer, - local_addr, - remote_addr, - ) - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, .. }) => { - self.connected.insert(peer_id); - } - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id: _, - endpoint: _, - handler: _, - remaining_established, - }) => { - if remaining_established == 0 { - assert!(self.connected.remove(&peer_id)); - } - } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrConfirmed(_) - | FromSwarm::ExternalAddrExpired(_) => {} - }; - - self.request_response.on_swarm_event(event); - } - - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: ConnectionId, - event: THandlerOutEvent, - ) { - self.request_response - .on_connection_handler_event(peer_id, connection_id, event); - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>> { - self.request_response.poll(cx, params).map(|to_swarm| { - to_swarm.map_out(|m| match m { - request_response::Event::Message { - peer: _, - message: - request_response::Message::Response { - request_id, - response: Response::Receiver(run_duration), - }, - } => Event { - id: request_id.into(), - result: Ok(run_duration), - }, - request_response::Event::Message { - peer: _, - message: - request_response::Message::Response { - response: Response::Sender(_), - .. - }, - } => unreachable!(), - request_response::Event::Message { - peer: _, - message: request_response::Message::Request { .. }, - } => { - unreachable!() - } - request_response::Event::OutboundFailure { - peer: _, - request_id, - error, - } => Event { - id: request_id.into(), - result: Err(error), - }, - request_response::Event::InboundFailure { - peer: _, - request_id: _, - error: _, - } => unreachable!(), - request_response::Event::ResponseSent { .. } => unreachable!(), - }) - }) - } +pub enum RunError { + #[error(transparent)] + Upgrade(#[from] StreamUpgradeError), + #[error("Failed to execute perf run: {0}")] + Io(#[from] std::io::Error), } diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index 912f6d5bb9ec..880bcdd9c831 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -29,19 +29,18 @@ use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::{ derive_prelude::ConnectionEstablished, ConnectionClosed, ConnectionId, FromSwarm, - NetworkBehaviour, NotifyHandler, PollParameters, StreamUpgradeError, THandlerInEvent, - THandlerOutEvent, ToSwarm, + NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use void::Void; -use crate::client::handler::Handler; +use crate::RunParams; +use crate::{client::handler::Handler, RunUpdate}; -use super::{RunId, RunParams, RunStats}; +use super::{RunError, RunId}; #[derive(Debug)] pub struct Event { pub id: RunId, - pub result: Result>, + pub result: Result, } #[derive(Default)] @@ -57,9 +56,9 @@ impl Behaviour { Self::default() } - pub fn perf(&mut self, server: PeerId, params: RunParams) -> Result { + pub fn perf(&mut self, server: PeerId, params: RunParams) -> Result { if !self.connected.contains(&server) { - return Err(PerfError::NotConnected); + return Err(NotConnected {}); } let id = RunId::next(); @@ -75,9 +74,12 @@ impl Behaviour { } #[derive(thiserror::Error, Debug)] -pub enum PerfError { - #[error("Not connected to peer")] - NotConnected, +pub struct NotConnected(); + +impl std::fmt::Display for NotConnected { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "not connected to peer") + } } impl NetworkBehaviour for Behaviour { @@ -104,7 +106,7 @@ impl NetworkBehaviour for Behaviour { Ok(Handler::default()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, .. }) => { self.connected.insert(peer_id); @@ -113,24 +115,13 @@ impl NetworkBehaviour for Behaviour { peer_id, connection_id: _, endpoint: _, - handler: _, remaining_established, }) => { if remaining_established == 0 { assert!(self.connected.remove(&peer_id)); } } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } @@ -144,11 +135,8 @@ impl NetworkBehaviour for Behaviour { .push_back(ToSwarm::GenerateEvent(Event { id, result })); } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 8a6df43d1981..2a2c5499fc26 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -21,22 +21,23 @@ use std::{ collections::VecDeque, task::{Context, Poll}, - time::{Duration, Instant}, }; -use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt, TryFutureExt}; +use futures::{ + stream::{BoxStream, SelectAll}, + StreamExt, +}; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use void::Void; -use super::{RunId, RunParams, RunStats}; +use crate::client::{RunError, RunId}; +use crate::{RunParams, RunUpdate}; #[derive(Debug)] pub struct Command { @@ -47,7 +48,7 @@ pub struct Command { #[derive(Debug)] pub struct Event { pub(crate) id: RunId, - pub(crate) result: Result>, + pub(crate) result: Result, } pub struct Handler { @@ -57,15 +58,12 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, requested_streams: VecDeque, - outbound: FuturesUnordered>>, - - keep_alive: KeepAlive, + outbound: SelectAll)>>, } impl Handler { @@ -74,7 +72,6 @@ impl Handler { queued_events: Default::default(), requested_streams: Default::default(), outbound: Default::default(), - keep_alive: KeepAlive::Yes, } } } @@ -88,7 +85,6 @@ impl Default for Handler { impl ConnectionHandler for Handler { type FromBehaviour = Command; type ToBehaviour = Event; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -129,10 +125,7 @@ impl ConnectionHandler for Handler { .expect("opened a stream without a pending command"); self.outbound.push( crate::protocol::send_receive(params, protocol) - .map_ok(move |timers| Event { - id, - result: Ok(RunStats { params, timers }), - }) + .map(move |result| (id, result)) .boxed(), ); } @@ -148,54 +141,32 @@ impl ConnectionHandler for Handler { self.queued_events .push_back(ConnectionHandlerEvent::NotifyBehaviour(Event { id, - result: Err(error), + result: Err(error.into()), })); } ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } + _ => {} } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - while let Poll::Ready(Some(result)) = self.outbound.poll_next_unpin(cx) { - match result { - Ok(event) => return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)), - Err(e) => { - panic!("{e:?}") - } - } - } - - if self.outbound.is_empty() { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); - } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), - } - } else { - self.keep_alive = KeepAlive::Yes + if let Poll::Ready(Some((id, result))) = self.outbound.poll_next_unpin(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event { + id, + result: result.map_err(Into::into), + })); } Poll::Pending diff --git a/protocols/perf/src/lib.rs b/protocols/perf/src/lib.rs index b2b12244341f..f9db96aa9d9a 100644 --- a/protocols/perf/src/lib.rs +++ b/protocols/perf/src/lib.rs @@ -34,6 +34,46 @@ mod protocol; pub mod server; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/perf/1.0.0"); +const RUN_TIMEOUT: Duration = Duration::from_secs(5 * 60); +const MAX_PARALLEL_RUNS_PER_CONNECTION: usize = 1_000; + +#[derive(Debug, Clone, Copy)] +pub enum RunUpdate { + Intermediate(Intermediate), + Final(Final), +} + +#[derive(Debug, Clone, Copy)] +pub struct Intermediate { + pub duration: Duration, + pub sent: usize, + pub received: usize, +} + +impl Display for Intermediate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Intermediate { + duration, + sent, + received, + } = self; + write!( + f, + "{:4} s uploaded {} downloaded {} ({})", + duration.as_secs_f64(), + format_bytes(*sent), + format_bytes(*received), + format_bandwidth(*duration, sent + received), + )?; + + Ok(()) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct Final { + pub duration: RunDuration, +} /// Parameters for a single run, i.e. one stream, sending and receiving data. /// @@ -52,48 +92,49 @@ pub struct RunDuration { pub download: Duration, } +#[derive(Debug, Clone, Copy)] pub struct Run { pub params: RunParams, pub duration: RunDuration, } +const KILO: f64 = 1024.0; +const MEGA: f64 = KILO * 1024.0; +const GIGA: f64 = MEGA * 1024.0; + +fn format_bytes(bytes: usize) -> String { + let bytes = bytes as f64; + if bytes >= GIGA { + format!("{:.2} GiB", bytes / GIGA) + } else if bytes >= MEGA { + format!("{:.2} MiB", bytes / MEGA) + } else if bytes >= KILO { + format!("{:.2} KiB", bytes / KILO) + } else { + format!("{} B", bytes) + } +} + +fn format_bandwidth(duration: Duration, bytes: usize) -> String { + const KILO: f64 = 1024.0; + const MEGA: f64 = KILO * 1024.0; + const GIGA: f64 = MEGA * 1024.0; + + let bandwidth = (bytes as f64 * 8.0) / duration.as_secs_f64(); + + if bandwidth >= GIGA { + format!("{:.2} Gbit/s", bandwidth / GIGA) + } else if bandwidth >= MEGA { + format!("{:.2} Mbit/s", bandwidth / MEGA) + } else if bandwidth >= KILO { + format!("{:.2} Kbit/s", bandwidth / KILO) + } else { + format!("{:.2} bit/s", bandwidth) + } +} + impl Display for Run { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - const KILO: f64 = 1024.0; - const MEGA: f64 = KILO * 1024.0; - const GIGA: f64 = MEGA * 1024.0; - - fn format_bytes(bytes: usize) -> String { - let bytes = bytes as f64; - if bytes >= GIGA { - format!("{:.2} GiB", bytes / GIGA) - } else if bytes >= MEGA { - format!("{:.2} MiB", bytes / MEGA) - } else if bytes >= KILO { - format!("{:.2} KiB", bytes / KILO) - } else { - format!("{} B", bytes) - } - } - - fn format_bandwidth(duration: Duration, bytes: usize) -> String { - const KILO: f64 = 1024.0; - const MEGA: f64 = KILO * 1024.0; - const GIGA: f64 = MEGA * 1024.0; - - let bandwidth = (bytes as f64 * 8.0) / duration.as_secs_f64(); - - if bandwidth >= GIGA { - format!("{:.2} Gbit/s", bandwidth / GIGA) - } else if bandwidth >= MEGA { - format!("{:.2} Mbit/s", bandwidth / MEGA) - } else if bandwidth >= KILO { - format!("{:.2} Kbit/s", bandwidth / KILO) - } else { - format!("{:.2} bit/s", bandwidth) - } - } - let Run { params: RunParams { to_send, diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs index 4b454fb88b78..d2d65b423030 100644 --- a/protocols/perf/src/protocol.rs +++ b/protocols/perf/src/protocol.rs @@ -18,184 +18,184 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_trait::async_trait; -use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use futures_timer::Delay; use instant::Instant; -use libp2p_request_response as request_response; -use libp2p_swarm::StreamProtocol; -use std::io; - -use crate::{RunDuration, RunParams}; - -const BUF: [u8; 65536] = [0; 64 << 10]; - -#[derive(Debug)] -pub enum Response { - Sender(usize), - Receiver(RunDuration), -} - -#[derive(Default)] -pub struct Codec { - to_receive: Option, - - write_start: Option, - read_start: Option, - read_done: Option, +use std::time::Duration; + +use futures::{ + future::{select, Either}, + AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, +}; + +use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate}; + +const BUF: [u8; 1024] = [0; 1024]; +const REPORT_INTERVAL: Duration = Duration::from_secs(1); + +pub(crate) fn send_receive( + params: RunParams, + stream: S, +) -> impl Stream> { + // Use a channel to simulate a generator. `send_receive_inner` can `yield` events through the + // channel. + let (sender, receiver) = futures::channel::mpsc::channel(0); + let receiver = receiver.fuse(); + let inner = send_receive_inner(params, stream, sender).fuse(); + + futures::stream::select( + receiver.map(|progressed| Ok(RunUpdate::Intermediate(progressed))), + inner + .map(|finished| finished.map(RunUpdate::Final)) + .into_stream(), + ) } -impl Clone for Codec { - fn clone(&self) -> Self { - Default::default() +async fn send_receive_inner( + params: RunParams, + mut stream: S, + mut progress: futures::channel::mpsc::Sender, +) -> Result { + let mut delay = Delay::new(REPORT_INTERVAL); + + let RunParams { + to_send, + to_receive, + } = params; + + let mut receive_buf = vec![0; 1024]; + let to_receive_bytes = (to_receive as u64).to_be_bytes(); + stream.write_all(&to_receive_bytes).await?; + + let write_start = Instant::now(); + let mut intermittant_start = Instant::now(); + let mut sent = 0; + let mut intermittent_sent = 0; + + while sent < to_send { + let n = std::cmp::min(to_send - sent, BUF.len()); + let buf = &BUF[..n]; + + let mut write = stream.write(buf); + sent += loop { + match select(&mut delay, &mut write).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: 0, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; + } + Either::Right((n, _)) => break n?, + } + } } -} -#[async_trait] -impl request_response::Codec for Codec { - /// The type of protocol(s) or protocol versions being negotiated. - type Protocol = StreamProtocol; - /// The type of inbound and outbound requests. - type Request = RunParams; - /// The type of inbound and outbound responses. - type Response = Response; - - /// Reads a request from the given I/O stream according to the - /// negotiated protocol. - async fn read_request(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let mut receive_buf = vec![0; 64 << 10]; - - let to_send = { - let mut buf = [0; 8]; - io.read_exact(&mut buf).await?; - - u64::from_be_bytes(buf) as usize - }; - - let mut received = 0; - loop { - let n = io.read(&mut receive_buf).await?; - received += n; - if n == 0 { - break; + loop { + match select(&mut delay, stream.close()).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: 0, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; } + Either::Right((Ok(_), _)) => break, + Either::Right((Err(e), _)) => return Err(e), } - - Ok(RunParams { - to_receive: received, - to_send, - }) } - /// Reads a response from the given I/O stream according to the - /// negotiated protocol. - async fn read_response( - &mut self, - _: &Self::Protocol, - io: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - assert!(self.write_start.is_some()); - assert_eq!(self.read_start, None); - assert_eq!(self.read_done, None); - - self.read_start = Some(Instant::now()); - - let mut receive_buf = vec![0; 64 << 10]; - - let mut received = 0; - loop { - let n = io.read(&mut receive_buf).await?; - received += n; - // Make sure to wait for the remote to close the stream. Otherwise with `to_receive` of `0` - // one does not measure the full round-trip of the previous write. - if n == 0 { - break; + let write_done = Instant::now(); + let mut received = 0; + let mut intermittend_received = 0; + + while received < to_receive { + let mut read = stream.read(&mut receive_buf); + received += loop { + match select(&mut delay, &mut read).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: received - intermittend_received, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; + intermittend_received = received; + } + Either::Right((n, _)) => break n?, } } + } - self.read_done = Some(Instant::now()); + let read_done = Instant::now(); - assert_eq!(received, self.to_receive.unwrap()); + Ok(Final { + duration: RunDuration { + upload: write_done.duration_since(write_start), + download: read_done.duration_since(write_done), + }, + }) +} - Ok(Response::Receiver(RunDuration { - upload: self - .read_start - .unwrap() - .duration_since(self.write_start.unwrap()), - download: self - .read_done - .unwrap() - .duration_since(self.read_start.unwrap()), - })) +pub(crate) async fn receive_send( + mut stream: S, +) -> Result { + let to_send = { + let mut buf = [0; 8]; + stream.read_exact(&mut buf).await?; + + u64::from_be_bytes(buf) as usize + }; + + let read_start = Instant::now(); + + let mut receive_buf = vec![0; 1024]; + let mut received = 0; + loop { + let n = stream.read(&mut receive_buf).await?; + received += n; + if n == 0 { + break; + } } - /// Writes a request to the given I/O stream according to the - /// negotiated protocol. - async fn write_request( - &mut self, - _: &Self::Protocol, - io: &mut T, - req: Self::Request, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - assert_eq!(self.to_receive, None); - assert_eq!(self.write_start, None); - assert_eq!(self.read_start, None); - assert_eq!(self.read_done, None); - - self.write_start = Some(Instant::now()); - - let RunParams { - to_send, - to_receive, - } = req; - - self.to_receive = Some(to_receive); - - io.write_all(&(to_receive as u64).to_be_bytes()).await?; - - let mut sent = 0; - while sent < to_send { - let n = std::cmp::min(to_send - sent, BUF.len()); - let buf = &BUF[..n]; - - sent += io.write(buf).await?; - } + let read_done = Instant::now(); - Ok(()) + let mut sent = 0; + while sent < to_send { + let n = std::cmp::min(to_send - sent, BUF.len()); + let buf = &BUF[..n]; + + sent += stream.write(buf).await?; } - /// Writes a response to the given I/O stream according to the - /// negotiated protocol. - async fn write_response( - &mut self, - _: &Self::Protocol, - io: &mut T, - response: Self::Response, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - let to_send = match response { - Response::Sender(to_send) => to_send, - Response::Receiver(_) => unreachable!(), - }; - - let mut sent = 0; - while sent < to_send { - let n = std::cmp::min(to_send - sent, BUF.len()); - let buf = &BUF[..n]; - - sent += io.write(buf).await?; - } + stream.close().await?; + let write_done = Instant::now(); - Ok(()) - } + Ok(Run { + params: RunParams { + to_send: sent, + to_receive: received, + }, + duration: RunDuration { + upload: write_done.duration_since(read_done), + download: read_done.duration_since(read_start), + }, + }) } diff --git a/protocols/perf/src/server.rs b/protocols/perf/src/server.rs index 79f77c746503..9671b43878bc 100644 --- a/protocols/perf/src/server.rs +++ b/protocols/perf/src/server.rs @@ -18,150 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +mod behaviour; +mod handler; -use instant::Duration; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_request_response as request_response; -use libp2p_swarm::{ - ConnectionId, FromSwarm, NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, - ToSwarm, -}; - -use crate::protocol::Response; - -pub struct Behaviour { - request_response: request_response::Behaviour, -} - -impl Default for Behaviour { - fn default() -> Self { - let mut req_resp_config = request_response::Config::default(); - req_resp_config.set_connection_keep_alive(Duration::from_secs(60 * 5)); - req_resp_config.set_request_timeout(Duration::from_secs(60 * 5)); - - Self { - request_response: request_response::Behaviour::new( - std::iter::once(( - crate::PROTOCOL_NAME, - request_response::ProtocolSupport::Inbound, - )), - req_resp_config, - ), - } - } -} - -impl Behaviour { - pub fn new() -> Self { - Self::default() - } -} - -impl NetworkBehaviour for Behaviour { - type ConnectionHandler = - as NetworkBehaviour>::ConnectionHandler; - type ToSwarm = (); - - fn handle_pending_outbound_connection( - &mut self, - connection_id: ConnectionId, - maybe_peer: Option, - addresses: &[Multiaddr], - effective_role: libp2p_core::Endpoint, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - ) - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: libp2p_core::Endpoint, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response - .handle_established_outbound_connection(connection_id, peer, addr, role_override) - } - - fn handle_pending_inbound_connection( - &mut self, - connection_id: ConnectionId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result<(), libp2p_swarm::ConnectionDenied> { - self.request_response.handle_pending_inbound_connection( - connection_id, - local_addr, - remote_addr, - ) - } - - fn handle_established_inbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, libp2p_swarm::ConnectionDenied> { - self.request_response.handle_established_inbound_connection( - connection_id, - peer, - local_addr, - remote_addr, - ) - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - self.request_response.on_swarm_event(event); - } - - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: ConnectionId, - event: THandlerOutEvent, - ) { - self.request_response - .on_connection_handler_event(peer_id, connection_id, event); - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>> { - self.request_response.poll(cx, params).map(|to_swarm| { - to_swarm.map_out(|m| match m { - request_response::Event::Message { - peer: _, - message: request_response::Message::Response { .. }, - } => { - unreachable!() - } - request_response::Event::Message { - peer: _, - message: - request_response::Message::Request { - request_id: _, - request, - channel, - }, - } => { - let _ = self - .request_response - .send_response(channel, Response::Sender(request.to_send)); - } - request_response::Event::OutboundFailure { .. } => unreachable!(), - request_response::Event::InboundFailure { .. } => {} - request_response::Event::ResponseSent { .. } => {} - }) - }) - } -} +pub use behaviour::{Behaviour, Event}; diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index b15cb70110d8..da24d7636065 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -27,18 +27,16 @@ use std::{ use libp2p_identity::PeerId; use libp2p_swarm::{ - ConnectionId, FromSwarm, NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, - ToSwarm, + ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use crate::server::handler::Handler; - -use super::RunStats; +use crate::Run; #[derive(Debug)] pub struct Event { pub remote_peer_id: PeerId, - pub stats: RunStats, + pub stats: Run, } #[derive(Default)] @@ -77,23 +75,7 @@ impl NetworkBehaviour for Behaviour { Ok(Handler::default()) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -107,11 +89,8 @@ impl NetworkBehaviour for Behaviour { })) } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index e8f7b72e605e..ddfe8f881e5b 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -18,40 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::{ - task::{Context, Poll}, - time::{Duration, Instant}, -}; +use std::task::{Context, Poll}; -use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures::FutureExt; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamProtocol, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use log::error; +use tracing::error; use void::Void; -use super::RunStats; +use crate::Run; #[derive(Debug)] pub struct Event { - pub stats: RunStats, + pub stats: Run, } pub struct Handler { - inbound: FuturesUnordered>>, - keep_alive: KeepAlive, + inbound: futures_bounded::FuturesSet>, } impl Handler { pub fn new() -> Self { Self { - inbound: Default::default(), - keep_alive: KeepAlive::Yes, + inbound: futures_bounded::FuturesSet::new( + crate::RUN_TIMEOUT, + crate::MAX_PARALLEL_RUNS_PER_CONNECTION, + ), } } } @@ -65,7 +63,6 @@ impl Default for Handler { impl ConnectionHandler for Handler { type FromBehaviour = Void; type ToBehaviour = Event; - type Error = Void; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; type OutboundOpenInfo = Void; @@ -93,8 +90,13 @@ impl ConnectionHandler for Handler { protocol, info: _, }) => { - self.inbound - .push(crate::protocol::receive_send(protocol).boxed()); + if self + .inbound + .try_push(crate::protocol::receive_send(protocol).boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity"); + } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { void::unreachable(info) @@ -109,47 +111,34 @@ impl ConnectionHandler for Handler { ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } + _ => {} } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) { - match result { - Ok(stats) => { + loop { + match self.inbound.poll_unpin(cx) { + Poll::Ready(Ok(Ok(stats))) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event { stats })) } - Err(e) => { - error!("{e:?}") + Poll::Ready(Ok(Err(e))) => { + error!("{e:?}"); + continue; } - } - } - - if self.inbound.is_empty() { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + Poll::Ready(Err(e @ futures_bounded::Timeout { .. })) => { + error!("inbound perf request timed out: {e}"); + continue; } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + Poll::Pending => {} } - } else { - self.keep_alive = KeepAlive::Yes - } - Poll::Pending + return Poll::Pending; + } } } diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index af5bc2c35a25..017d475befd4 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -18,22 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_perf::{client, server, RunParams}; +use libp2p_perf::{ + client::{self}, + server, RunParams, +}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn perf() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server = Swarm::new_ephemeral(|_| server::Behaviour::new()); let server_peer_id = *server.local_peer_id(); let mut client = Swarm::new_ephemeral(|_| client::Behaviour::new()); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; - tokio::spawn(server.loop_on_next()); + tokio::task::spawn(server.loop_on_next()); client .behaviour_mut() diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index db68d3751e7d..33e0139b9965 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,3 +1,6 @@ +## 0.44.0 + + ## 0.43.1 - Honor ping interval in case of errors. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index a15407b28401..a4d9259e9aaa 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = { workspace = true } description = "Ping protocol for libp2p" -version = "0.43.1" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,22 +12,22 @@ categories = ["network-programming", "asynchronous"] [dependencies] either = "1.9.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0.2" instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8" +tracing = "0.1.37" void = "1.0" [dev-dependencies] async-std = "1.6.2" -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 522663196e6f..5e6fc2cd2cf7 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -23,13 +23,12 @@ use futures::future::{BoxFuture, Either}; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, StreamProtocol, - StreamUpgradeError, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, }; use std::collections::VecDeque; use std::{ @@ -147,8 +146,6 @@ pub struct Handler { inbound: Option, /// Tracks the state of our handler. state: State, - /// The peer we are connected to. - peer: PeerId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -166,9 +163,8 @@ enum State { impl Handler { /// Builds a new [`Handler`] with the given configuration. - pub fn new(config: Config, peer: PeerId) -> Self { + pub fn new(config: Config) -> Self { Handler { - peer, config, interval: Delay::new(Duration::new(0, 0)), pending_errors: VecDeque::with_capacity(2), @@ -213,7 +209,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = Void; type ToBehaviour = Result; - type Error = Void; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -225,21 +220,12 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, _: Void) {} - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - ReadyUpgrade, - (), - Result, - Self::Error, - >, - > { + ) -> Poll, (), Result>> + { match self.state { State::Inactive { reported: true } => { return Poll::Pending; // nothing to do on this connection @@ -258,11 +244,11 @@ impl ConnectionHandler for Handler { match fut.poll_unpin(cx) { Poll::Pending => {} Poll::Ready(Err(e)) => { - log::debug!("Inbound ping error: {:?}", e); + tracing::debug!("Inbound ping error: {:?}", e); self.inbound = None; } Poll::Ready(Ok(stream)) => { - log::trace!("answered inbound ping from {}", self.peer); + tracing::trace!("answered inbound ping from peer"); // A ping from a remote peer has been answered, wait for the next. self.inbound = Some(protocol::recv_ping(stream).boxed()); @@ -273,7 +259,7 @@ impl ConnectionHandler for Handler { loop { // Check for outbound ping failures. if let Some(error) = self.pending_errors.pop_back() { - log::debug!("Ping failure: {:?}", error); + tracing::debug!("Ping failure: {:?}", error); self.failures += 1; @@ -295,8 +281,7 @@ impl ConnectionHandler for Handler { break; } Poll::Ready(Ok((stream, rtt))) => { - log::debug!("latency to {} is {}ms", self.peer, rtt.as_millis()); - + tracing::debug!(?rtt, "ping succeeded"); self.failures = 0; self.interval.reset(self.config.interval); self.outbound = Some(OutboundState::Idle(stream)); @@ -349,15 +334,17 @@ impl ConnectionHandler for Handler { ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol: stream, + protocol: mut stream, .. }) => { + stream.ignore_for_keep_alive(); self.inbound = Some(protocol::recv_ping(stream).boxed()); } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: stream, + protocol: mut stream, .. }) => { + stream.ignore_for_keep_alive(); self.outbound = Some(OutboundState::Ping( send_ping(stream, self.config.timeout).boxed(), )); @@ -365,10 +352,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index d1c4a2facaf0..5eaa6d4952a0 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -54,8 +54,8 @@ use handler::Handler; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::time::Duration; use std::{ @@ -111,21 +111,21 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn handle_established_outbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn on_connection_handler_event( @@ -141,11 +141,8 @@ impl NetworkBehaviour for Behaviour { }) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(e) = self.events.pop_back() { Poll::Ready(ToSwarm::GenerateEvent(e)) } else { @@ -153,21 +150,5 @@ impl NetworkBehaviour for Behaviour { } } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 946a2daadb61..3ca469f16a84 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -36,7 +36,7 @@ fn ping_pong() { let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; for _ in 0..count.get() { @@ -67,7 +67,7 @@ fn unsupported_doesnt_fail() { let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(ping::Config::new())); let result = async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); async_std::task::spawn(swarm1.loop_on_next()); diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 6af89e25d712..aaade5e48f9e 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,3 +1,34 @@ +## 0.17.1 + +- Automatically register relayed addresses as external addresses. + See [PR 4809](https://github.com/libp2p/rust-libp2p/pull/4809). +- Fix an error where performing too many reservations at once could lead to inconsistent internal state. + See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). + +## 0.17.0 +- Don't close connections on protocol failures within the relay-server. + To achieve this, error handling was restructured: + - `libp2p::relay::outbound::stop::FatalUpgradeError` has been removed. + - `libp2p::relay::outbound::stop::{Error, ProtocolViolation}` have been introduced. + - Several variants of `libp2p::relay::Event` have been deprecated. + + See [PR 4718](https://github.com/libp2p/rust-libp2p/pull/4718). +- Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. + See [PR 4747](https://github.com/libp2p/rust-libp2p/pull/4747). +- Propagate errors of relay client to the listener / dialer. + A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. + A failed circuit request will now appear as `SwarmEvent::OutgoingConnectionError` with the `ConnectionId` of the corresponding `Swarm::dial` call. + Lastly, a failed reservation or circuit request will **no longer** close the underlying relay connection. + As a result, we remove the following enum variants: + - `relay::client::Event::ReservationReqFailed` + - `relay::client::Event::OutboundCircuitReqFailed` + - `relay::client::Event::InboundCircuitReqDenied` + - `relay::client::Event::InboundCircuitReqDenyFailed` + + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). + +## 0.16.2 + ## 0.16.1 - Export `RateLimiter` type. diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 03799a8c77cf..94b9deb1a645 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-relay" edition = "2021" rust-version = { workspace = true } description = "Communications relaying for libp2p" -version = "0.16.1" +version = "0.17.1" authors = ["Parity Technologies ", "Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,31 +11,34 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } bytes = "1" either = "1.9.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3" futures-bounded = { workspace = true } instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8.4" static_assertions = "1" thiserror = "1.0" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } +libp2p-swarm-test = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 9e49b9cea086..df8443e83595 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -34,7 +34,7 @@ use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, - PollParameters, StreamUpgradeError, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, HashSet, VecDeque}; use std::num::NonZeroU32; @@ -169,16 +169,22 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqAcceptFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation request has been denied. ReservationReqDenied { src_peer_id: PeerId }, /// Denying an inbound reservation request has failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqDenyFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation has timed out. ReservationTimedOut { src_peer_id: PeerId }, @@ -188,27 +194,36 @@ pub enum Event { dst_peer_id: PeerId, }, /// Denying an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqDenyFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, - /// An inbound cirucit request has been accepted. + /// An inbound circuit request has been accepted. CircuitReqAccepted { src_peer_id: PeerId, dst_peer_id: PeerId, }, - /// An outbound connect for an inbound cirucit request failed. + /// An outbound connect for an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqOutboundConnectFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: StreamUpgradeError, + error: outbound_stop::Error, }, /// Accepting an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqAcceptFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -252,7 +267,7 @@ impl Behaviour { peer_id, connection_id, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if let hash_map::Entry::Occupied(mut peer) = self.reservations.entry(peer_id) { peer.get_mut().remove(&connection_id); @@ -332,25 +347,11 @@ impl NetworkBehaviour for Behaviour { ))) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.external_addresses.on_swarm_event(&event); - match event { - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::DialFailure(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + if let FromSwarm::ConnectionClosed(connection_closed) = event { + self.on_connection_closed(connection_closed) } } @@ -455,6 +456,7 @@ impl NetworkBehaviour for Behaviour { )); } handler::Event::ReservationReqAcceptFailed { error } => { + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::ReservationReqAcceptFailed { src_peer_id: event_source, @@ -470,6 +472,7 @@ impl NetworkBehaviour for Behaviour { )); } handler::Event::ReservationReqDenyFailed { error } => { + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::ReservationReqDenyFailed { src_peer_id: event_source, @@ -592,6 +595,7 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqDenyFailed { src_peer_id: event_source, @@ -605,7 +609,6 @@ impl NetworkBehaviour for Behaviour { src_peer_id, src_connection_id, inbound_circuit_req, - dst_handler_notifier, dst_stream, dst_pending_data, } => { @@ -616,7 +619,6 @@ impl NetworkBehaviour for Behaviour { circuit_id, dst_peer_id: event_source, inbound_circuit_req, - dst_handler_notifier, dst_stream, dst_pending_data, }), @@ -639,6 +641,7 @@ impl NetworkBehaviour for Behaviour { status, }), }); + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqOutboundConnectFailed { src_peer_id, @@ -664,6 +667,7 @@ impl NetworkBehaviour for Behaviour { error, } => { self.circuits.remove(circuit_id); + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqAcceptFailed { src_peer_id: event_source, @@ -689,11 +693,8 @@ impl NetworkBehaviour for Behaviour { } } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(to_swarm) = self.queued_actions.pop_front() { return Poll::Ready(to_swarm); } diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 895228e807b8..958c6a9b9062 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -24,7 +24,6 @@ use crate::protocol::{inbound_hop, outbound_stop}; use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; use bytes::Bytes; use either::Either; -use futures::channel::oneshot::{self, Canceled}; use futures::future::{BoxFuture, FutureExt, TryFutureExt}; use futures::io::AsyncWriteExt; use futures::stream::{FuturesUnordered, StreamExt}; @@ -37,13 +36,13 @@ use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, KeepAlive, Stream, StreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::fmt; +use std::collections::{HashMap, VecDeque}; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); @@ -79,7 +78,6 @@ pub enum In { circuit_id: CircuitId, dst_peer_id: PeerId, inbound_circuit_req: inbound_hop::CircuitReq, - dst_handler_notifier: oneshot::Sender<()>, dst_stream: Stream, dst_pending_data: Bytes, }, @@ -126,7 +124,6 @@ impl fmt::Debug for In { circuit_id, inbound_circuit_req: _, dst_peer_id, - dst_handler_notifier: _, dst_stream: _, dst_pending_data: _, } => f @@ -154,11 +151,11 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. - ReservationReqAcceptFailed { error: inbound_hop::UpgradeError }, + ReservationReqAcceptFailed { error: inbound_hop::Error }, /// An inbound reservation request has been denied. ReservationReqDenied {}, /// Denying an inbound reservation request has failed. - ReservationReqDenyFailed { error: inbound_hop::UpgradeError }, + ReservationReqDenyFailed { error: inbound_hop::Error }, /// An inbound reservation has timed out. ReservationTimedOut {}, /// An inbound circuit request has been received. @@ -175,7 +172,7 @@ pub enum Event { CircuitReqDenyFailed { circuit_id: Option, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound circuit request has been accepted. CircuitReqAccepted { @@ -186,7 +183,7 @@ pub enum Event { CircuitReqAcceptFailed { circuit_id: CircuitId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An outbound substream for an inbound circuit request has been /// negotiated. @@ -195,7 +192,6 @@ pub enum Event { src_peer_id: PeerId, src_connection_id: ConnectionId, inbound_circuit_req: inbound_hop::CircuitReq, - dst_handler_notifier: oneshot::Sender<()>, dst_stream: Stream, dst_pending_data: Bytes, }, @@ -206,7 +202,7 @@ pub enum Event { src_connection_id: ConnectionId, inbound_circuit_req: inbound_hop::CircuitReq, status: proto::Status, - error: StreamUpgradeError, + error: outbound_stop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -292,7 +288,6 @@ impl fmt::Debug for Event { src_peer_id, src_connection_id, inbound_circuit_req: _, - dst_handler_notifier: _, dst_stream: _, dst_pending_data: _, } => f @@ -344,19 +339,11 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, - - /// Until when to keep the connection alive. - keep_alive: KeepAlive, + /// The point in time when this connection started idleing. + idle_at: Option, /// Future handling inbound reservation request. reservation_request_future: Option, @@ -364,98 +351,91 @@ pub struct Handler { active_reservation: Option, /// Futures accepting an inbound circuit request. - circuit_accept_futures: - Futures>, + circuit_accept_futures: Futures>, /// Futures denying an inbound circuit request. - circuit_deny_futures: Futures<( - Option, - PeerId, - Result<(), inbound_hop::UpgradeError>, - )>, - /// Tracks substreams lend out to other [`Handler`]s. - /// - /// Contains a [`futures::future::Future`] for each lend out substream that - /// resolves once the substream is dropped. - /// - /// Once all substreams are dropped and this handler has no other work, - /// [`KeepAlive::Until`] can be set, allowing the connection to be closed - /// eventually. - alive_lend_out_substreams: FuturesUnordered>, + circuit_deny_futures: Futures<(Option, PeerId, Result<(), inbound_hop::Error>)>, /// Futures relaying data for circuit between two peers. circuits: Futures<(CircuitId, PeerId, Result<(), std::io::Error>)>, - pending_connect_requests: VecDeque, - - workers: futures_bounded::FuturesSet< - Either< - Result< - Either, - inbound_hop::FatalUpgradeError, - >, - Result< - Result, - outbound_stop::FatalUpgradeError, - >, - >, + /// We issue a stream upgrade for each [`PendingConnect`] request. + pending_connect_requests: VecDeque, + + /// A `CONNECT` request is in flight for these circuits. + active_connect_requests: HashMap, + + inbound_workers: futures_bounded::FuturesSet< + Result, inbound_hop::Error>, + >, + outbound_workers: futures_bounded::FuturesMap< + CircuitId, + Result, >, } impl Handler { pub fn new(config: Config, endpoint: ConnectedPoint) -> Handler { Handler { - workers: futures_bounded::FuturesSet::new( + inbound_workers: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + outbound_workers: futures_bounded::FuturesMap::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), endpoint, config, queued_events: Default::default(), - pending_error: Default::default(), + idle_at: None, reservation_request_future: Default::default(), circuit_accept_futures: Default::default(), circuit_deny_futures: Default::default(), - alive_lend_out_substreams: Default::default(), circuits: Default::default(), active_reservation: Default::default(), - keep_alive: KeepAlive::Yes, pending_connect_requests: Default::default(), + active_connect_requests: Default::default(), } } fn on_fully_negotiated_inbound(&mut self, stream: Stream) { if self - .workers - .try_push( - inbound_hop::handle_inbound_request( - stream, - self.config.reservation_duration, - self.config.max_circuit_duration, - self.config.max_circuit_bytes, - ) - .map(Either::Left), - ) + .inbound_workers + .try_push(inbound_hop::handle_inbound_request( + stream, + self.config.reservation_duration, + self.config.max_circuit_duration, + self.config.max_circuit_bytes, + )) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } fn on_fully_negotiated_outbound(&mut self, stream: Stream) { - let stop_command = self + let connect = self .pending_connect_requests .pop_front() .expect("opened a stream without a pending stop command"); - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - if self - .workers - .try_push(outbound_stop::connect(stream, stop_command, tx).map(Either::Right)) + .outbound_workers + .try_push( + connect.circuit_id, + outbound_stop::connect( + stream, + connect.src_peer_id, + connect.max_circuit_duration, + connect.max_circuit_bytes, + ), + ) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } + + self.active_connect_requests + .insert(connect.circuit_id, connect); } fn on_dial_upgrade_error( @@ -465,21 +445,10 @@ impl Handler { ::OutboundProtocol, >, ) { - let (non_fatal_error, status) = match error { - StreamUpgradeError::Timeout => ( - StreamUpgradeError::Timeout, - proto::Status::CONNECTION_FAILED, - ), - StreamUpgradeError::NegotiationFailed => { - // The remote has previously done a reservation. Doing a reservation but not - // supporting the stop protocol is pointless, thus disconnecting. - self.pending_error = Some(StreamUpgradeError::NegotiationFailed); - return; - } - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; - } + let error = match error { + StreamUpgradeError::Timeout => outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + StreamUpgradeError::NegotiationFailed => outbound_stop::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound_stop::Error::Io(e), StreamUpgradeError::Apply(v) => void::unreachable(v), }; @@ -495,16 +464,16 @@ impl Handler { src_peer_id: stop_command.src_peer_id, src_connection_id: stop_command.src_connection_id, inbound_circuit_req: stop_command.inbound_circuit_req, - status, - error: non_fatal_error, + status: proto::Status::CONNECTION_FAILED, + error, }, )); } } enum ReservationRequestFuture { - Accepting(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), - Denying(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), + Accepting(BoxFuture<'static, Result<(), inbound_hop::Error>>), + Denying(BoxFuture<'static, Result<(), inbound_hop::Error>>), } type Futures = FuturesUnordered>; @@ -512,9 +481,6 @@ type Futures = FuturesUnordered>; impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; @@ -537,7 +503,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::DenyReservationReq { @@ -551,7 +517,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::NegotiateOutboundConnect { @@ -560,14 +526,13 @@ impl ConnectionHandler for Handler { src_peer_id, src_connection_id, } => { - self.pending_connect_requests - .push_back(outbound_stop::PendingConnect::new( - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - &self.config, - )); + self.pending_connect_requests.push_back(PendingConnect::new( + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + &self.config, + )); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()), @@ -591,7 +556,6 @@ impl ConnectionHandler for Handler { circuit_id, dst_peer_id, inbound_circuit_req, - dst_handler_notifier, dst_stream, dst_pending_data, } => { @@ -604,7 +568,6 @@ impl ConnectionHandler for Handler { src_stream, src_pending_data, dst_peer_id, - dst_handler_notifier, dst_stream, dst_pending_data, }) @@ -615,27 +578,21 @@ impl ConnectionHandler for Handler { } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn connection_keep_alive(&self) -> bool { + let Some(idle_at) = self.idle_at else { + return true; + }; + + Instant::now().duration_since(idle_at) <= Duration::from_secs(10) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); @@ -667,62 +624,92 @@ impl ConnectionHandler for Handler { } } - // Process protocol requests - match self.workers.poll_unpin(cx) { - Poll::Ready(Ok(Either::Left(Ok(Either::Left(inbound_reservation_req))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqReceived { - inbound_reservation_req, - endpoint: self.endpoint.clone(), - renewed: self.active_reservation.is_some(), - }, - )); - } - Poll::Ready(Ok(Either::Left(Ok(Either::Right(inbound_circuit_req))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::CircuitReqReceived { - inbound_circuit_req, - endpoint: self.endpoint.clone(), - }, - )); + // Process inbound protocol workers + loop { + match self.inbound_workers.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Either::Left(inbound_reservation_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::ReservationReqReceived { + inbound_reservation_req, + endpoint: self.endpoint.clone(), + renewed: self.active_reservation.is_some(), + }, + )); + } + Poll::Ready(Ok(Ok(Either::Right(inbound_circuit_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitReqReceived { + inbound_circuit_req, + endpoint: self.endpoint.clone(), + }, + )); + } + Poll::Ready(Err(e)) => { + tracing::debug!("Inbound stream operation timed out: {e}"); + continue; + } + Poll::Ready(Ok(Err(e))) => { + tracing::debug!("Inbound stream operation failed: {e}"); + continue; + } + Poll::Pending => { + break; + } } - Poll::Ready(Ok(Either::Right(Ok(Ok(circuit))))) => { + } + + // Process outbound protocol workers + match self.outbound_workers.poll_unpin(cx) { + Poll::Ready((id, Ok(Ok(circuit)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::OutboundConnectNegotiated { - circuit_id: circuit.circuit_id, - src_peer_id: circuit.src_peer_id, - src_connection_id: circuit.src_connection_id, - inbound_circuit_req: circuit.inbound_circuit_req, - dst_handler_notifier: circuit.dst_handler_notifier, + circuit_id: id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, dst_stream: circuit.dst_stream, dst_pending_data: circuit.dst_pending_data, }, )); } - Poll::Ready(Ok(Either::Right(Ok(Err(circuit_failed))))) => { + Poll::Ready((id, Ok(Err(error)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::OutboundConnectNegotiationFailed { - circuit_id: circuit_failed.circuit_id, - src_peer_id: circuit_failed.src_peer_id, - src_connection_id: circuit_failed.src_connection_id, - inbound_circuit_req: circuit_failed.inbound_circuit_req, - status: circuit_failed.status, - error: circuit_failed.error, + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: error.to_status(), + error, }, )); } - Poll::Ready(Err(futures_bounded::Timeout { .. })) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); - } - Poll::Ready(Ok(Either::Left(Err(e)))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))); - } - Poll::Ready(Ok(Either::Right(Err(e)))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Right(e), - ))); + Poll::Ready((id, Err(futures_bounded::Timeout { .. }))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: proto::Status::CONNECTION_FAILED, // Best fit? + error: outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )); } Poll::Pending => {} } @@ -761,7 +748,6 @@ impl ConnectionHandler for Handler { mut src_stream, src_pending_data, dst_peer_id, - dst_handler_notifier, mut dst_stream, dst_pending_data, } = parts; @@ -785,8 +771,6 @@ impl ConnectionHandler for Handler { ) .await?; - // Inform destination handler that the stream to the destination is dropped. - drop(dst_handler_notifier); Ok(()) } .map(move |r| (circuit_id, dst_peer_id, r)) @@ -870,28 +854,13 @@ impl ConnectionHandler for Handler { None => {} } - // Check lend out substreams. - while let Poll::Ready(Some(Err(Canceled))) = - self.alive_lend_out_substreams.poll_next_unpin(cx) - {} - // Check keep alive status. - if self.reservation_request_future.is_none() - && self.circuit_accept_futures.is_empty() - && self.circuit_deny_futures.is_empty() - && self.alive_lend_out_substreams.is_empty() - && self.circuits.is_empty() - && self.active_reservation.is_none() - { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); - } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + if self.active_reservation.is_none() { + if self.idle_at.is_none() { + self.idle_at = Some(Instant::now()); } } else { - self.keep_alive = KeepAlive::Yes; + self.idle_at = None; } Poll::Pending @@ -922,10 +891,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error); } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } @@ -935,7 +901,35 @@ struct CircuitParts { src_stream: Stream, src_pending_data: Bytes, dst_peer_id: PeerId, - dst_handler_notifier: oneshot::Sender<()>, dst_stream: Stream, dst_pending_data: Bytes, } + +/// Holds everything we know about a to-be-issued `CONNECT` request to a peer. +struct PendingConnect { + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + max_circuit_duration: Duration, + max_circuit_bytes: u64, +} + +impl PendingConnect { + fn new( + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + config: &Config, + ) -> Self { + Self { + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + max_circuit_duration: config.max_circuit_duration, + max_circuit_bytes: config.max_circuit_bytes, + } + } +} diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index a4a127e1253d..9c4f67d04a86 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -124,7 +124,7 @@ impl GenericRateLimiter { // Note when used with a high number of buckets: This loop refills all the to-be-refilled // buckets at once, thus potentially delaying the parent call to `try_next`. loop { - match self.refill_schedule.get(0) { + match self.refill_schedule.front() { // Only continue if (a) there is a bucket and (b) the bucket has not already been // refilled recently. Some((last_refill, _)) if now.duration_since(*last_refill) >= self.interval => {} diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index 39ccd539838b..eca3578d5995 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -45,20 +45,20 @@ pub use protocol::{HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; /// Types related to the relay protocol inbound. pub mod inbound { pub mod hop { - pub use crate::protocol::inbound_hop::FatalUpgradeError; - } - pub mod stop { - pub use crate::protocol::inbound_stop::FatalUpgradeError; + #[deprecated(note = "Renamed to `Error`.")] + pub type FatalUpgradeError = Error; + + pub use crate::protocol::inbound_hop::Error; } } /// Types related to the relay protocol outbound. pub mod outbound { pub mod hop { - pub use crate::protocol::outbound_hop::FatalUpgradeError; + pub use crate::protocol::outbound_hop::{ConnectError, ProtocolViolation, ReserveError}; } pub mod stop { - pub use crate::protocol::outbound_stop::FatalUpgradeError; + pub use crate::protocol::outbound_stop::{Error, ProtocolViolation}; } } diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index d4f0c07cae35..e414852ef819 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -25,23 +25,22 @@ pub(crate) mod transport; use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop, outbound_hop}; +use crate::protocol::{self, inbound_stop}; use bytes::Bytes; use either::Either; use futures::channel::mpsc::Receiver; -use futures::channel::oneshot; use futures::future::{BoxFuture, FutureExt}; use futures::io::{AsyncRead, AsyncWrite}; use futures::ready; use futures::stream::StreamExt; +use libp2p_core::multiaddr::Protocol; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, - NotifyHandler, PollParameters, Stream, StreamUpgradeError, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, VecDeque}; use std::io::{Error, ErrorKind, IoSlice}; @@ -60,32 +59,21 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - relay_peer_id: PeerId, - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: StreamUpgradeError, - }, OutboundCircuitEstablished { relay_peer_id: PeerId, limit: Option, }, - OutboundCircuitReqFailed { - relay_peer_id: PeerId, - error: StreamUpgradeError, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum ReservationStatus { + Pending, + Confirmed, } /// [`NetworkBehaviour`] implementation of the relay client @@ -98,6 +86,11 @@ pub struct Behaviour { /// connection. directly_connected_peers: HashMap>, + /// Stores the address of a pending or confirmed reservation. + /// + /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it. + reservation_addresses: HashMap, + /// Queue of actions to return when polled. queued_actions: VecDeque>>, @@ -111,6 +104,7 @@ pub fn new(local_peer_id: PeerId) -> (Transport, Behaviour) { local_peer_id, from_transport, directly_connected_peers: Default::default(), + reservation_addresses: Default::default(), queued_actions: Default::default(), pending_handler_commands: Default::default(), }; @@ -125,7 +119,7 @@ impl Behaviour { connection_id, endpoint, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if !endpoint.is_relayed() { match self.directly_connected_peers.entry(peer_id) { @@ -145,6 +139,12 @@ impl Behaviour { unreachable!("`on_connection_closed` for unconnected peer.") } }; + if let Some((addr, ReservationStatus::Confirmed)) = + self.reservation_addresses.remove(&connection_id) + { + self.queued_actions + .push_back(ToSwarm::ExternalAddrExpired(addr)); + } } } } @@ -192,7 +192,7 @@ impl NetworkBehaviour for Behaviour { Ok(Either::Left(handler)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -219,25 +219,17 @@ impl NetworkBehaviour for Behaviour { self.on_connection_closed(connection_closed) } FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { + self.reservation_addresses.remove(&connection_id); self.pending_handler_commands.remove(&connection_id); } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } fn on_connection_handler_event( &mut self, event_source: PeerId, - _connection: ConnectionId, + connection: ConnectionId, handler_event: THandlerOutEvent, ) { let handler_event = match handler_event { @@ -247,47 +239,41 @@ impl NetworkBehaviour for Behaviour { let event = match handler_event { handler::Event::ReservationReqAccepted { renewal, limit } => { + let (addr, status) = self + .reservation_addresses + .get_mut(&connection) + .expect("Relay connection exist"); + + if !renewal && *status == ReservationStatus::Pending { + *status = ReservationStatus::Confirmed; + self.queued_actions + .push_back(ToSwarm::ExternalAddrConfirmed(addr.clone())); + } + Event::ReservationReqAccepted { relay_peer_id: event_source, renewal, limit, } } - handler::Event::ReservationReqFailed { renewal, error } => { - Event::ReservationReqFailed { - relay_peer_id: event_source, - renewal, - error, - } - } handler::Event::OutboundCircuitEstablished { limit } => { Event::OutboundCircuitEstablished { relay_peer_id: event_source, limit, } } - handler::Event::OutboundCircuitReqFailed { error } => Event::OutboundCircuitReqFailed { - relay_peer_id: event_source, - error, - }, handler::Event::InboundCircuitEstablished { src_peer_id, limit } => { Event::InboundCircuitEstablished { src_peer_id, limit } } - handler::Event::InboundCircuitReqDenied { src_peer_id } => { - Event::InboundCircuitReqDenied { src_peer_id } - } - handler::Event::InboundCircuitReqDenyFailed { src_peer_id, error } => { - Event::InboundCircuitReqDenyFailed { src_peer_id, error } - } }; - self.queued_actions.push_back(ToSwarm::GenerateEvent(event)) + self.queued_actions.push_back(ToSwarm::GenerateEvent(event)); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _poll_parameters: &mut impl PollParameters, ) -> Poll>> { if let Some(action) = self.queued_actions.pop_front() { return Poll::Ready(action); @@ -302,20 +288,44 @@ impl NetworkBehaviour for Behaviour { match self .directly_connected_peers .get(&relay_peer_id) - .and_then(|cs| cs.get(0)) + .and_then(|cs| cs.first()) { - Some(connection_id) => ToSwarm::NotifyHandler { - peer_id: relay_peer_id, - handler: NotifyHandler::One(*connection_id), - event: Either::Left(handler::In::Reserve { to_listener }), - }, + Some(connection_id) => { + self.reservation_addresses.insert( + *connection_id, + ( + relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(self.local_peer_id)), + ReservationStatus::Pending, + ), + ); + + ToSwarm::NotifyHandler { + peer_id: relay_peer_id, + handler: NotifyHandler::One(*connection_id), + event: Either::Left(handler::In::Reserve { to_listener }), + } + } None => { let opts = DialOpts::peer_id(relay_peer_id) - .addresses(vec![relay_addr]) + .addresses(vec![relay_addr.clone()]) .extend_addresses_through_behaviour() .build(); let relayed_connection_id = opts.connection_id(); + self.reservation_addresses.insert( + relayed_connection_id, + ( + relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(self.local_peer_id)), + ReservationStatus::Pending, + ), + ); + self.pending_handler_commands .insert(relayed_connection_id, handler::In::Reserve { to_listener }); ToSwarm::Dial { opts } @@ -332,13 +342,13 @@ impl NetworkBehaviour for Behaviour { match self .directly_connected_peers .get(&relay_peer_id) - .and_then(|cs| cs.get(0)) + .and_then(|cs| cs.first()) { Some(connection_id) => ToSwarm::NotifyHandler { peer_id: relay_peer_id, handler: NotifyHandler::One(*connection_id), event: Either::Left(handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }), }, @@ -352,7 +362,7 @@ impl NetworkBehaviour for Behaviour { self.pending_handler_commands.insert( connection_id, handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }, ); @@ -387,22 +397,13 @@ pub(crate) enum ConnectionState { Operational { read_buffer: Bytes, substream: Stream, - /// "Drop notifier" pattern to signal to the transport that the connection has been dropped. - /// - /// This is flagged as "dead-code" by the compiler because we never read from it here. - /// However, it is actual use is to trigger the `Canceled` error in the `Transport` when this `Sender` is dropped. - #[allow(dead_code)] - drop_notifier: oneshot::Sender, }, } impl Unpin for ConnectionState {} impl ConnectionState { - pub(crate) fn new_inbound( - circuit: inbound_stop::Circuit, - drop_notifier: oneshot::Sender, - ) -> Self { + pub(crate) fn new_inbound(circuit: inbound_stop::Circuit) -> Self { ConnectionState::InboundAccepting { accept: async { let (substream, read_buffer) = circuit @@ -412,22 +413,16 @@ impl ConnectionState { Ok(ConnectionState::Operational { read_buffer, substream, - drop_notifier, }) } .boxed(), } } - pub(crate) fn new_outbound( - substream: Stream, - read_buffer: Bytes, - drop_notifier: oneshot::Sender, - ) -> Self { + pub(crate) fn new_outbound(substream: Stream, read_buffer: Bytes) -> Self { ConnectionState::Operational { substream, read_buffer, - drop_notifier, } } } diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 25488ac3041c..662d63cc742a 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,34 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::client::Connection; use crate::priv_client::transport; +use crate::priv_client::transport::ToListenerMsg; use crate::protocol::{self, inbound_stop, outbound_hop}; -use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; -use either::Either; +use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use futures::channel::mpsc::Sender; use futures::channel::{mpsc, oneshot}; -use futures::future::{BoxFuture, FutureExt}; -use futures::sink::SinkExt; -use futures::stream::{FuturesUnordered, StreamExt}; -use futures::TryFutureExt; -use futures_bounded::{PushError, Timeout}; +use futures::future::FutureExt; use futures_timer::Delay; -use instant::Instant; use libp2p_core::multiaddr::Protocol; use libp2p_core::upgrade::ReadyUpgrade; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamProtocol, StreamUpgradeError, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use log::debug; use std::collections::VecDeque; -use std::fmt; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; +use void::Void; /// The maximum number of circuits being denied concurrently. /// @@ -62,7 +57,7 @@ pub enum In { }, EstablishCircuit { dst_peer_id: PeerId, - send_back: oneshot::Sender>, + to_dial: oneshot::Sender>, }, } @@ -72,7 +67,7 @@ impl fmt::Debug for In { In::Reserve { to_listener: _ } => f.debug_struct("In::Reserve").finish(), In::EstablishCircuit { dst_peer_id, - send_back: _, + to_dial: _, } => f .debug_struct("In::EstablishCircuit") .field("dst_peer_id", dst_peer_id) @@ -88,42 +83,19 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: StreamUpgradeError, - }, /// An outbound circuit has been established. OutboundCircuitEstablished { limit: Option }, - OutboundCircuitReqFailed { - error: StreamUpgradeError, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, } pub struct Handler { local_peer_id: PeerId, remote_peer_id: PeerId, remote_addr: Multiaddr, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, - /// Until when to keep the connection alive. - keep_alive: KeepAlive, /// Queue of events to return when polled. queued_events: VecDeque< @@ -131,43 +103,28 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, - wait_for_outbound_stream: VecDeque, - outbound_circuits: futures_bounded::FuturesSet< - Result< - Either< - Result, - Result, outbound_hop::CircuitFailedReason>, - >, - outbound_hop::FatalUpgradeError, - >, + pending_streams: VecDeque>>>, + + inflight_reserve_requests: futures_bounded::FuturesTupleSet< + Result, + mpsc::Sender, >, - reservation: Reservation, + inflight_outbound_connect_requests: futures_bounded::FuturesTupleSet< + Result, + oneshot::Sender>, + >, - /// Tracks substreams lent out to the transport. - /// - /// Contains a [`futures::future::Future`] for each lend out substream that - /// resolves once the substream is dropped. - /// - /// Once all substreams are dropped and this handler has no other work, - /// [`KeepAlive::Until`] can be set, allowing the connection to be closed - /// eventually. - alive_lend_out_substreams: FuturesUnordered>, - - open_circuit_futs: - futures_bounded::FuturesSet>, - - circuit_deny_futs: futures_bounded::FuturesMap>, - - /// Futures that try to send errors to the transport. - /// - /// We may drop errors if this handler ends up in a terminal state (by returning - /// [`ConnectionHandlerEvent::Close`]). - send_error_futs: FuturesUnordered>, + inflight_inbound_circuit_requests: + futures_bounded::FuturesSet>, + + inflight_outbound_circuit_deny_requests: + futures_bounded::FuturesSet>, + + reservation: Reservation, } impl Handler { @@ -177,110 +134,95 @@ impl Handler { remote_peer_id, remote_addr, queued_events: Default::default(), - pending_error: Default::default(), - wait_for_outbound_stream: Default::default(), - outbound_circuits: futures_bounded::FuturesSet::new( + pending_streams: Default::default(), + inflight_reserve_requests: futures_bounded::FuturesTupleSet::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - reservation: Reservation::None, - alive_lend_out_substreams: Default::default(), - open_circuit_futs: futures_bounded::FuturesSet::new( + inflight_inbound_circuit_requests: futures_bounded::FuturesSet::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - circuit_deny_futs: futures_bounded::FuturesMap::new( + inflight_outbound_connect_requests: futures_bounded::FuturesTupleSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + inflight_outbound_circuit_deny_requests: futures_bounded::FuturesSet::new( DENYING_CIRCUIT_TIMEOUT, MAX_NUMBER_DENYING_CIRCUIT, ), - send_error_futs: Default::default(), - keep_alive: KeepAlive::Yes, + reservation: Reservation::None, } } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { error, .. }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - let outbound_info = self.wait_for_outbound_stream.pop_front().expect( - "got a stream error without a pending connection command or a reserve listener", - ); - match outbound_info { - outbound_hop::OutboundStreamInfo::Reserve(mut to_listener) => { - let non_fatal_error = match error { - StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, - StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; - } - StreamUpgradeError::Apply(v) => void::unreachable(v), - }; - - if self.pending_error.is_none() { - self.send_error_futs.push( - async move { - let _ = to_listener - .send(transport::ToListenerMsg::Reservation(Err(()))) - .await; - } - .boxed(), - ); - } else { - // Fatal error occurred, thus handler is closing as quickly as possible. - // Transport is notified through dropping `to_listener`. - } + fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { + let src_peer_id = circuit.src_peer_id(); - let renewal = self.reservation.failed(); + if self + .inflight_outbound_circuit_deny_requests + .try_push(circuit.deny(proto::Status::NO_RESERVATION)) + .is_err() + { + tracing::warn!( + peer=%src_peer_id, + "Dropping existing inbound circuit request to be denied from peer in favor of new one" + ) + } + } - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqFailed { - renewal, - error: non_fatal_error, - }, - )); - } - outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { - let non_fatal_error = match error { - StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, - StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; - } - StreamUpgradeError::Apply(v) => void::unreachable(v), - }; + fn make_new_reservation(&mut self, to_listener: Sender) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_reserve_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_reserve_error)?; - let _ = cmd.send_back.send(Err(())); + let reservation = outbound_hop::make_reservation(stream).await?; - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitReqFailed { - error: non_fatal_error, - }, - )); - } + Ok(reservation) + }, + to_listener, + ); + + if result.is_err() { + tracing::warn!("Dropping in-flight reservation request because we are at capacity"); } } - fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { - let src_peer_id = circuit.src_peer_id(); + fn establish_new_circuit( + &mut self, + to_dial: oneshot::Sender>, + dst_peer_id: PeerId, + ) { + let (sender, receiver) = oneshot::channel(); - match self.circuit_deny_futs.try_push( - src_peer_id, - circuit.deny(proto::Status::NO_RESERVATION), - ) { - Err(PushError::BeyondCapacity(_)) => log::warn!( - "Dropping inbound circuit request to be denied from {src_peer_id} due to exceeding limit." - ), - Err(PushError::ReplacedFuture(_)) => log::warn!( - "Dropping existing inbound circuit request to be denied from {src_peer_id} in favor of new one." - ), - Ok(()) => {} + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_outbound_connect_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_connect_error)?; + + outbound_hop::open_circuit(stream, dst_peer_id).await + }, + to_dial, + ); + + if result.is_err() { + tracing::warn!("Dropping in-flight connect request because we are at capacity") } } } @@ -288,9 +230,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; @@ -303,60 +242,39 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::Reserve { to_listener } => { - self.wait_for_outbound_stream - .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); + self.make_new_reservation(to_listener); } In::EstablishCircuit { - send_back, + to_dial, dst_peer_id, } => { - self.wait_for_outbound_stream.push_back( - outbound_hop::OutboundStreamInfo::CircuitConnection( - outbound_hop::Command::new(dst_peer_id, send_back), - ), - ); - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); + self.establish_new_circuit(to_dial, dst_peer_id); } } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn connection_keep_alive(&self) -> bool { + self.reservation.is_some() } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - - // Inbound circuits loop { - match self.outbound_circuits.poll_unpin(cx) { - Poll::Ready(Ok(Ok(Either::Left(Ok(outbound_hop::Reservation { - renewal_timeout, - addrs, - limit, + // Reservations + match self.inflight_reserve_requests.poll_unpin(cx) { + Poll::Ready(( + Ok(Ok(outbound_hop::Reservation { + renewal_timeout, + addrs, + limit, + })), to_listener, - }))))) => { + )) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( self.reservation.accepted( renewal_timeout, @@ -365,65 +283,89 @@ impl ConnectionHandler for Handler { self.local_peer_id, limit, ), - )) - } - Poll::Ready(Ok(Ok(Either::Right(Ok(Some(outbound_hop::Circuit { limit })))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitEstablished { limit }, )); } - Poll::Ready(Ok(Ok(Either::Right(Ok(None))))) => continue, - Poll::Ready(Ok(Ok(Either::Right(Err(e))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitReqFailed { - error: StreamUpgradeError::Apply(e), - }, - )); + Poll::Ready((Ok(Err(error)), mut to_listener)) => { + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) + { + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; + } + Poll::Ready((Err(futures_bounded::Timeout { .. }), mut to_listener)) => { + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err( + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), + ))) + { + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; } - Poll::Ready(Ok(Ok(Either::Left(Err(e))))) => { - let renewal = self.reservation.failed(); + Poll::Pending => {} + } + + // Circuits + match self.inflight_outbound_connect_requests.poll_unpin(cx) { + Poll::Ready(( + Ok(Ok(outbound_hop::Circuit { + limit, + read_buffer, + stream, + })), + to_dialer, + )) => { + if to_dialer + .send(Ok(priv_client::Connection { + state: priv_client::ConnectionState::new_outbound(stream, read_buffer), + })) + .is_err() + { + tracing::debug!( + "Dropping newly established circuit because the listener is gone" + ); + continue; + } + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqFailed { - renewal, - error: StreamUpgradeError::Apply(e), - }, + Event::OutboundCircuitEstablished { limit }, )); } - Poll::Ready(Ok(Err(e))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Right(e), - ))) + Poll::Ready((Ok(Err(error)), to_dialer)) => { + let _ = to_dialer.send(Err(error)); + continue; } - Poll::Ready(Err(Timeout { .. })) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); + Poll::Ready((Err(futures_bounded::Timeout { .. }), to_dialer)) => { + if to_dialer + .send(Err(outbound_hop::ConnectError::Io( + io::ErrorKind::TimedOut.into(), + ))) + .is_err() + { + tracing::debug!("Unable to send error to dialer") + } + self.reservation.failed(); + continue; } - Poll::Pending => break, + Poll::Pending => {} } - } - // Return queued events. - if let Some(event) = self.queued_events.pop_front() { - return Poll::Ready(event); - } - - if let Poll::Ready(worker_res) = self.open_circuit_futs.poll_unpin(cx) { - let res = match worker_res { - Ok(r) => r, - Err(Timeout { .. }) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); - } - }; + // Return queued events. + if let Some(event) = self.queued_events.pop_front() { + return Poll::Ready(event); + } - match res { - Ok(circuit) => match &mut self.reservation { + match self.inflight_inbound_circuit_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(circuit))) => match &mut self.reservation { Reservation::Accepted { pending_msgs, .. } | Reservation::Renewing { pending_msgs, .. } => { let src_peer_id = circuit.src_peer_id(); let limit = circuit.limit(); - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - let connection = super::ConnectionState::new_inbound(circuit, tx); + let connection = super::ConnectionState::new_inbound(circuit); pending_msgs.push_back( transport::ToListenerMsg::IncomingRelayedConnection { @@ -439,72 +381,41 @@ impl ConnectionHandler for Handler { } Reservation::None => { self.insert_to_deny_futs(circuit); + continue; } }, - Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))); + Poll::Ready(Ok(Err(e))) => { + tracing::debug!("An inbound circuit request failed: {e}"); + continue; } + Poll::Ready(Err(e)) => { + tracing::debug!("An inbound circuit request timed out: {e}"); + continue; + } + Poll::Pending => {} } - } - - if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { - self.wait_for_outbound_stream - .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); - - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); - } - - // Deny incoming circuit requests. - match self.circuit_deny_futs.poll_unpin(cx) { - Poll::Ready((src_peer_id, Ok(Ok(())))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundCircuitReqDenied { src_peer_id }, - )); - } - Poll::Ready((src_peer_id, Ok(Err(error)))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundCircuitReqDenyFailed { src_peer_id, error }, - )); - } - Poll::Ready((src_peer_id, Err(Timeout { .. }))) => { - log::warn!("Dropping inbound circuit request to be denied from {:?} due to exceeding limit.", src_peer_id); - } - Poll::Pending => {} - } - - // Send errors to transport. - while let Poll::Ready(Some(())) = self.send_error_futs.poll_next_unpin(cx) {} - // Check status of lend out substreams. - loop { - match self.alive_lend_out_substreams.poll_next_unpin(cx) { - Poll::Ready(Some(Err(oneshot::Canceled))) => {} - Poll::Ready(Some(Ok(v))) => void::unreachable(v), - Poll::Ready(None) | Poll::Pending => break, + if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { + self.make_new_reservation(to_listener); + continue; } - } - // Update keep-alive handling. - if matches!(self.reservation, Reservation::None) - && self.alive_lend_out_substreams.is_empty() - && self.circuit_deny_futs.is_empty() - { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + // Deny incoming circuit requests. + match self.inflight_outbound_circuit_deny_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(()))) => continue, + Poll::Ready(Ok(Err(error))) => { + tracing::debug!("Denying inbound circuit failed: {error}"); + continue; + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + tracing::debug!("Denying inbound circuit timed out"); + continue; } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + Poll::Pending => {} } - } else { - self.keep_alive = KeepAlive::Yes; - } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -522,64 +433,25 @@ impl ConnectionHandler for Handler { .. }) => { if self - .open_circuit_futs + .inflight_inbound_circuit_requests .try_push(inbound_stop::handle_open_circuit(stream)) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: stream, - .. - }) => { - let outbound_info = self.wait_for_outbound_stream.pop_front().expect( - "opened a stream without a pending connection command or a reserve listener", - ); - match outbound_info { - outbound_hop::OutboundStreamInfo::Reserve(to_listener) => { - if self - .outbound_circuits - .try_push( - outbound_hop::handle_reserve_message_response(stream, to_listener) - .map_ok(Either::Left), - ) - .is_err() - { - log::warn!("Dropping outbound stream because we are at capacity") - } - } - outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - - if self - .outbound_circuits - .try_push( - outbound_hop::handle_connection_message_response( - stream, - self.remote_peer_id, - cmd, - tx, - ) - .map_ok(Either::Right), - ) - .is_err() - { - log::warn!("Dropping outbound stream because we are at capacity") - } - } + ConnectionEvent::FullyNegotiatedOutbound(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Ok(ev.protocol)); } } - ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - void::unreachable(listen_upgrade_error.error) - } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::ListenUpgradeError(ev) => void::unreachable(ev.error), + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Err(ev.error)); + } } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } @@ -636,18 +508,13 @@ impl Reservation { Event::ReservationReqAccepted { renewal, limit } } - /// Marks the current reservation as failed. - /// - /// Returns whether the reservation request was a renewal. - fn failed(&mut self) -> bool { - let renewal = matches!( - self, - Reservation::Accepted { .. } | Reservation::Renewing { .. } - ); + fn is_some(&self) -> bool { + matches!(self, Self::Accepted { .. } | Self::Renewing { .. }) + } + /// Marks the current reservation as failed. + fn failed(&mut self) { *self = Reservation::None; - - renewal } fn forward_messages_to_transport_listener(&mut self, cx: &mut Context<'_>) { @@ -663,12 +530,12 @@ impl Reservation { if let Err(e) = to_listener .start_send(pending_msgs.pop_front().expect("Called !is_empty().")) { - debug!("Failed to sent pending message to listener: {:?}", e); + tracing::debug!("Failed to sent pending message to listener: {:?}", e); *self = Reservation::None; } } Poll::Ready(Err(e)) => { - debug!("Channel to listener failed: {:?}", e); + tracing::debug!("Channel to listener failed: {:?}", e); *self = Reservation::None; } Poll::Pending => {} @@ -710,3 +577,25 @@ impl Reservation { poll_val } } + +fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ReserveError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), + } +} + +fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ConnectError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), + } +} diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index 41114d0cdd51..7147f0b5e55f 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -21,6 +21,8 @@ use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::Connection; +use crate::protocol::outbound_hop; +use crate::protocol::outbound_hop::{ConnectError, ReserveError}; use crate::RequestId; use futures::channel::mpsc; use futures::channel::oneshot; @@ -97,7 +99,7 @@ pub struct Transport { impl Transport { pub(crate) fn new() -> (Self, mpsc::Receiver) { - let (to_behaviour, from_transport) = mpsc::channel(0); + let (to_behaviour, from_transport) = mpsc::channel(1000); let transport = Transport { to_behaviour, pending_to_behaviour: VecDeque::new(), @@ -189,7 +191,8 @@ impl libp2p_core::Transport for Transport { send_back: tx, }) .await?; - let stream = rx.await?.map_err(|()| Error::Connect)?; + let stream = rx.await??; + Ok(stream) } .boxed()) @@ -342,13 +345,10 @@ impl Stream for Listener { return Poll::Ready(None); } - let msg = match ready!(self.from_behaviour.poll_next_unpin(cx)) { - Some(msg) => msg, - None => { - // Sender of `from_behaviour` has been dropped, signaling listener to close. - self.close(Ok(())); - continue; - } + let Some(msg) = ready!(self.from_behaviour.poll_next_unpin(cx)) else { + // Sender of `from_behaviour` has been dropped, signaling listener to close. + self.close(Ok(())); + continue; }; match msg { @@ -381,7 +381,7 @@ impl Stream for Listener { send_back_addr: Protocol::P2p(src_peer_id).into(), }) } - ToListenerMsg::Reservation(Err(())) => self.close(Err(Error::Reservation)), + ToListenerMsg::Reservation(Err(e)) => self.close(Err(Error::Reservation(e))), }; } } @@ -409,9 +409,9 @@ pub enum Error { #[error("One of the provided multiaddresses is malformed.")] MalformedMultiaddr, #[error("Failed to get Reservation.")] - Reservation, + Reservation(#[from] ReserveError), #[error("Failed to connect to destination.")] - Connect, + Connect(#[from] ConnectError), } impl From for TransportError { @@ -431,7 +431,7 @@ pub(crate) enum TransportToBehaviourMsg { relay_peer_id: PeerId, dst_addr: Option, dst_peer_id: PeerId, - send_back: oneshot::Sender>, + send_back: oneshot::Sender>, }, /// Listen for incoming relayed connections via relay node. ListenReq { @@ -443,7 +443,7 @@ pub(crate) enum TransportToBehaviourMsg { #[allow(clippy::large_enum_variant)] pub enum ToListenerMsg { - Reservation(Result), + Reservation(Result), IncomingRelayedConnection { stream: Connection, src_peer_id: PeerId, diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index b44d29e42ce0..41fe2675dce1 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -35,25 +35,11 @@ use crate::proto::message_v2::pb::mod_HopMessage::Type; use crate::protocol::MAX_MESSAGE_SIZE; #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} - -#[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum Error { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), #[error("Stream closed")] StreamClosed, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -70,9 +56,9 @@ pub struct ReservationReq { } impl ReservationReq { - pub async fn accept(self, addrs: Vec) -> Result<(), FatalUpgradeError> { + pub async fn accept(self, addrs: Vec) -> Result<(), Error> { if addrs.is_empty() { - log::debug!( + tracing::debug!( "Accepting relay reservation without providing external addresses of local node. \ Thus the remote node might not be able to advertise its relayed address." ) @@ -104,7 +90,7 @@ impl ReservationReq { self.send(msg).await } - pub async fn deny(self, status: proto::Status) -> Result<(), FatalUpgradeError> { + pub async fn deny(self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -116,7 +102,7 @@ impl ReservationReq { self.send(msg).await } - async fn send(mut self, msg: proto::HopMessage) -> Result<(), FatalUpgradeError> { + async fn send(mut self, msg: proto::HopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; self.substream.close().await?; @@ -135,7 +121,7 @@ impl CircuitReq { self.dst } - pub async fn accept(mut self) -> Result<(Stream, Bytes), FatalUpgradeError> { + pub async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -160,7 +146,7 @@ impl CircuitReq { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), FatalUpgradeError> { + pub async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -185,13 +171,13 @@ pub(crate) async fn handle_inbound_request( reservation_duration: Duration, max_circuit_duration: Duration, max_circuit_bytes: u64, -) -> Result, FatalUpgradeError> { +) -> Result, Error> { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); let res = substream.next().await; if let None | Some(Err(_)) = res { - return Err(FatalUpgradeError::StreamClosed); + return Err(Error::StreamClosed); } let proto::HopMessage { @@ -212,17 +198,14 @@ pub(crate) async fn handle_inbound_request( Type::CONNECT => { let peer_id_res = match peer { Some(r) => PeerId::from_bytes(&r.id), - None => return Err(FatalUpgradeError::MissingPeer), + None => return Err(Error::MissingPeer), }; - let dst = match peer_id_res { - Ok(res) => res, - Err(_) => return Err(FatalUpgradeError::ParsePeerId), - }; + let dst = peer_id_res.map_err(|_| Error::ParsePeerId)?; Either::Right(CircuitReq { dst, substream }) } - Type::STATUS => return Err(FatalUpgradeError::UnexpectedTypeStatus), + Type::STATUS => return Err(Error::UnexpectedTypeStatus), }; Ok(req) diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index caaeee9cc533..b698a5ff769b 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -25,9 +25,10 @@ use bytes::Bytes; use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use std::io; use thiserror::Error; -pub(crate) async fn handle_open_circuit(io: Stream) -> Result { +pub(crate) async fn handle_open_circuit(io: Stream) -> Result { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); let proto::StopMessage { @@ -38,42 +39,42 @@ pub(crate) async fn handle_open_circuit(io: Stream) -> Result { - let src_peer_id = PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; + let src_peer_id = PeerId::from_bytes(&peer.ok_or(ProtocolViolation::MissingPeer)?.id) + .map_err(|_| ProtocolViolation::ParsePeerId)?; Ok(Circuit { substream, src_peer_id, limit: limit.map(Into::into), }) } - proto::StopMessageType::STATUS => Err(FatalUpgradeError::UnexpectedTypeStatus), + proto::StopMessageType::STATUS => { + Err(Error::Protocol(ProtocolViolation::UnexpectedTypeStatus)) + } } } #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), +pub(crate) enum Error { + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), + #[error("IO error")] + Io(#[from] io::Error), } -impl From for UpgradeError { +impl From for Error { fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) + Self::Protocol(ProtocolViolation::Codec(error)) } } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub(crate) enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -97,7 +98,7 @@ impl Circuit { self.limit } - pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), UpgradeError> { + pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -121,7 +122,7 @@ impl Circuit { Ok((io, read_buffer.freeze())) } - pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -129,10 +130,12 @@ impl Circuit { status: Some(status), }; - self.send(msg).await.map_err(Into::into) + self.send(msg).await?; + + Ok(()) } - async fn send(&mut self, msg: proto::StopMessage) -> Result<(), quick_protobuf_codec::Error> { + async fn send(&mut self, msg: proto::StopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index adad0e23711d..3ae824be167c 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,26 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; use std::time::{Duration, SystemTime}; use asynchronous_codec::{Framed, FramedParts}; -use futures::channel::{mpsc, oneshot}; +use bytes::Bytes; use futures::prelude::*; use futures_timer::Delay; -use log::debug; use thiserror::Error; -use void::Void; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; -use crate::priv_client::transport; use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; -use crate::{priv_client, proto}; +use crate::{proto, HOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum ConnectError { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Relay failed to connect to destination.")] @@ -46,22 +44,32 @@ pub enum CircuitFailedReason { NoReservation, #[error("Remote denied permission.")] PermissionDenied, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum ReservationFailedReason { +pub enum ReserveError { #[error("Reservation refused.")] Refused, #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] @@ -72,33 +80,39 @@ pub enum FatalUpgradeError { InvalidReservationExpiration, #[error("Invalid addresses in reservation.")] InvalidReservationAddrs, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, #[error("Unexpected message type 'reserve'")] UnexpectedTypeReserve, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } +impl From for ConnectError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ConnectError::Protocol(ProtocolViolation::Codec(e)) + } +} + +impl From for ReserveError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ReserveError::Protocol(ProtocolViolation::Codec(e)) + } +} + pub(crate) struct Reservation { pub(crate) renewal_timeout: Delay, pub(crate) addrs: Vec, pub(crate) limit: Option, - pub(crate) to_listener: mpsc::Sender, } pub(crate) struct Circuit { + pub(crate) stream: Stream, + pub(crate) read_buffer: Bytes, pub(crate) limit: Option, } -pub(crate) async fn handle_reserve_message_response( - protocol: Stream, - to_listener: mpsc::Sender, -) -> Result, FatalUpgradeError> { +pub(crate) async fn make_reservation(stream: Stream) -> Result { let msg = proto::HopMessage { type_pb: proto::HopMessageType::RESERVE, peer: None, @@ -106,10 +120,12 @@ pub(crate) async fn handle_reserve_message_response( limit: None, status: None, }; - let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + let mut substream = Framed::new(stream, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); substream.send(msg).await?; + substream.close().await?; + let proto::HopMessage { type_pb, peer: _, @@ -119,35 +135,47 @@ pub(crate) async fn handle_reserve_message_response( } = substream .next() .await - .ok_or(FatalUpgradeError::StreamClosed)??; + .ok_or(ReserveError::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect); + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); } proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve); + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); } proto::HopMessageType::STATUS => {} } let limit = limit.map(Into::into); - match status.ok_or(FatalUpgradeError::MissingStatusField)? { + match status.ok_or(ProtocolViolation::MissingStatusField)? { proto::Status::OK => {} proto::Status::RESERVATION_REFUSED => { - return Ok(Err(ReservationFailedReason::Refused)); + return Err(ReserveError::Refused); } proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Ok(Err(ReservationFailedReason::ResourceLimitExceeded)); + return Err(ReserveError::ResourceLimitExceeded); + } + s => { + return Err(ReserveError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))) } - s => return Err(FatalUpgradeError::UnexpectedStatus(s)), } - let reservation = reservation.ok_or(FatalUpgradeError::MissingReservationField)?; + let reservation = reservation.ok_or(ReserveError::Protocol( + ProtocolViolation::MissingReservationField, + ))?; if reservation.addrs.is_empty() { - return Err(FatalUpgradeError::NoAddressesInReservation); + return Err(ReserveError::Protocol( + ProtocolViolation::NoAddressesInReservation, + )); } let addrs = reservation @@ -155,7 +183,7 @@ pub(crate) async fn handle_reserve_message_response( .into_iter() .map(|b| Multiaddr::try_from(b.to_vec())) .collect::, _>>() - .map_err(|_| FatalUpgradeError::InvalidReservationAddrs)?; + .map_err(|_| ReserveError::Protocol(ProtocolViolation::InvalidReservationAddrs))?; let renewal_timeout = reservation .expire @@ -169,28 +197,25 @@ pub(crate) async fn handle_reserve_message_response( .and_then(|duration| duration.checked_sub(duration / 4)) .map(Duration::from_secs) .map(Delay::new) - .ok_or(FatalUpgradeError::InvalidReservationExpiration)?; - - substream.close().await?; + .ok_or(ReserveError::Protocol( + ProtocolViolation::InvalidReservationExpiration, + ))?; - Ok(Ok(Reservation { + Ok(Reservation { renewal_timeout, addrs, limit, - to_listener, - })) + }) } -pub(crate) async fn handle_connection_message_response( +pub(crate) async fn open_circuit( protocol: Stream, - remote_peer_id: PeerId, - con_command: Command, - tx: oneshot::Sender, -) -> Result, CircuitFailedReason>, FatalUpgradeError> { + dst_peer_id: PeerId, +) -> Result { let msg = proto::HopMessage { type_pb: proto::HopMessageType::CONNECT, peer: Some(proto::Peer { - id: con_command.dst_peer_id.to_bytes(), + id: dst_peer_id.to_bytes(), addrs: vec![], }), reservation: None, @@ -200,9 +225,7 @@ pub(crate) async fn handle_connection_message_response( let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); - if substream.send(msg).await.is_err() { - return Err(FatalUpgradeError::StreamClosed); - } + substream.send(msg).await?; let proto::HopMessage { type_pb, @@ -210,17 +233,21 @@ pub(crate) async fn handle_connection_message_response( reservation: _, limit, status, - } = match substream.next().await { - Some(Ok(r)) => r, - _ => return Err(FatalUpgradeError::StreamClosed), - }; + } = substream + .next() + .await + .ok_or(ConnectError::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect); + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); } proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve); + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); } proto::HopMessageType::STATUS => {} } @@ -228,22 +255,26 @@ pub(crate) async fn handle_connection_message_response( match status { Some(proto::Status::OK) => {} Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { - return Ok(Err(CircuitFailedReason::ResourceLimitExceeded)); + return Err(ConnectError::ResourceLimitExceeded); } Some(proto::Status::CONNECTION_FAILED) => { - return Ok(Err(CircuitFailedReason::ConnectionFailed)); + return Err(ConnectError::ConnectionFailed); } Some(proto::Status::NO_RESERVATION) => { - return Ok(Err(CircuitFailedReason::NoReservation)); + return Err(ConnectError::NoReservation); } Some(proto::Status::PERMISSION_DENIED) => { - return Ok(Err(CircuitFailedReason::PermissionDenied)); + return Err(ConnectError::PermissionDenied); } Some(s) => { - return Err(FatalUpgradeError::UnexpectedStatus(s)); + return Err(ConnectError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))); } None => { - return Err(FatalUpgradeError::MissingStatusField); + return Err(ConnectError::Protocol( + ProtocolViolation::MissingStatusField, + )); } } @@ -260,40 +291,11 @@ pub(crate) async fn handle_connection_message_response( "Expect a flushed Framed to have empty write buffer." ); - match con_command.send_back.send(Ok(priv_client::Connection { - state: priv_client::ConnectionState::new_outbound(io, read_buffer.freeze(), tx), - })) { - Ok(()) => Ok(Ok(Some(Circuit { limit }))), - Err(_) => { - debug!( - "Oneshot to `client::transport::Dial` future dropped. \ - Dropping established relayed connection to {:?}.", - remote_peer_id, - ); - - Ok(Ok(None)) - } - } -} - -pub(crate) enum OutboundStreamInfo { - Reserve(mpsc::Sender), - CircuitConnection(Command), -} - -pub(crate) struct Command { - dst_peer_id: PeerId, - pub(crate) send_back: oneshot::Sender>, -} + let circuit = Circuit { + stream: io, + read_buffer: read_buffer.freeze(), + limit, + }; -impl Command { - pub(crate) fn new( - dst_peer_id: PeerId, - send_back: oneshot::Sender>, - ) -> Self { - Self { - dst_peer_id, - send_back, - } - } + Ok(circuit) } diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index e45029579959..525ebc108218 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,134 +18,120 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; use std::time::Duration; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::channel::oneshot::{self}; use futures::prelude::*; use thiserror::Error; use libp2p_identity::PeerId; -use libp2p_swarm::{ConnectionId, Stream, StreamUpgradeError}; +use libp2p_swarm::Stream; -use crate::behaviour::handler::Config; -use crate::protocol::{inbound_hop, MAX_MESSAGE_SIZE}; -use crate::{proto, CircuitId}; +use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub(crate) enum UpgradeError { - #[error("Circuit failed")] - CircuitFailed(#[from] CircuitFailedReason), - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} - -#[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum Error { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Remote reported permission denied.")] PermissionDenied, + #[error("Remote does not support the `{STOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[source] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl Error { + pub(crate) fn to_status(&self) -> proto::Status { + match self { + Error::ResourceLimitExceeded => proto::Status::RESOURCE_LIMIT_EXCEEDED, + Error::PermissionDenied => proto::Status::PERMISSION_DENIED, + Error::Unsupported => proto::Status::CONNECTION_FAILED, + Error::Io(_) => proto::Status::CONNECTION_FAILED, + Error::Protocol( + ProtocolViolation::UnexpectedStatus(_) | ProtocolViolation::UnexpectedTypeConnect, + ) => proto::Status::UNEXPECTED_MESSAGE, + Error::Protocol(_) => proto::Status::MALFORMED_MESSAGE, + } + } } +/// Depicts all forms of protocol violations. #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } +} + /// Attempts to _connect_ to a peer via the given stream. pub(crate) async fn connect( io: Stream, - stop_command: PendingConnect, - tx: oneshot::Sender<()>, -) -> Result, FatalUpgradeError> { + src_peer_id: PeerId, + max_duration: Duration, + max_bytes: u64, +) -> Result { let msg = proto::StopMessage { type_pb: proto::StopMessageType::CONNECT, peer: Some(proto::Peer { - id: stop_command.src_peer_id.to_bytes(), + id: src_peer_id.to_bytes(), addrs: vec![], }), limit: Some(proto::Limit { duration: Some( - stop_command - .max_circuit_duration + max_duration .as_secs() .try_into() .expect("`max_circuit_duration` not to exceed `u32::MAX`."), ), - data: Some(stop_command.max_circuit_bytes), + data: Some(max_bytes), }), status: None, }; let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); - if substream.send(msg).await.is_err() { - return Err(FatalUpgradeError::StreamClosed); - } - - let res = substream.next().await; - - if let None | Some(Err(_)) = res { - return Err(FatalUpgradeError::StreamClosed); - } + substream.send(msg).await?; let proto::StopMessage { type_pb, peer: _, limit: _, status, - } = res.unwrap().expect("should be ok"); + } = substream + .next() + .await + .ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { - proto::StopMessageType::CONNECT => return Err(FatalUpgradeError::UnexpectedTypeConnect), + proto::StopMessageType::CONNECT => { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)) + } proto::StopMessageType::STATUS => {} } match status { Some(proto::Status::OK) => {} - Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { - return Ok(Err(CircuitFailed { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, - status: proto::Status::RESOURCE_LIMIT_EXCEEDED, - error: StreamUpgradeError::Apply(CircuitFailedReason::ResourceLimitExceeded), - })) - } - Some(proto::Status::PERMISSION_DENIED) => { - return Ok(Err(CircuitFailed { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, - status: proto::Status::PERMISSION_DENIED, - error: StreamUpgradeError::Apply(CircuitFailedReason::PermissionDenied), - })) - } - Some(s) => return Err(FatalUpgradeError::UnexpectedStatus(s)), - None => return Err(FatalUpgradeError::MissingStatusField), + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => return Err(Error::ResourceLimitExceeded), + Some(proto::Status::PERMISSION_DENIED) => return Err(Error::PermissionDenied), + Some(s) => return Err(Error::Protocol(ProtocolViolation::UnexpectedStatus(s))), + None => return Err(Error::Protocol(ProtocolViolation::MissingStatusField)), } let FramedParts { @@ -159,60 +145,13 @@ pub(crate) async fn connect( "Expect a flushed Framed to have an empty write buffer." ); - Ok(Ok(Circuit { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, - dst_handler_notifier: tx, + Ok(Circuit { dst_stream: io, dst_pending_data: read_buffer.freeze(), - })) + }) } pub(crate) struct Circuit { - pub(crate) circuit_id: CircuitId, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, - pub(crate) dst_handler_notifier: oneshot::Sender<()>, pub(crate) dst_stream: Stream, pub(crate) dst_pending_data: Bytes, } - -pub(crate) struct CircuitFailed { - pub(crate) circuit_id: CircuitId, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, - pub(crate) status: proto::Status, - pub(crate) error: StreamUpgradeError, -} - -pub(crate) struct PendingConnect { - pub(crate) circuit_id: CircuitId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - max_circuit_duration: Duration, - max_circuit_bytes: u64, -} - -impl PendingConnect { - pub(crate) fn new( - circuit_id: CircuitId, - inbound_circuit_req: inbound_hop::CircuitReq, - src_peer_id: PeerId, - src_connection_id: ConnectionId, - config: &Config, - ) -> Self { - Self { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - max_circuit_duration: config.max_circuit_duration, - max_circuit_bytes: config.max_circuit_bytes, - } - } -} diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index b7784d17b3a0..2b28d5a50cd8 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -33,12 +33,18 @@ use libp2p_identity::PeerId; use libp2p_ping as ping; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::dial_opts::DialOpts; +use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[test] fn reservation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -79,7 +85,9 @@ fn reservation() { #[test] fn new_reservation_to_same_relay_replaces_old() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -111,7 +119,7 @@ fn new_reservation_to_same_relay_replaces_old() { )); // Trigger new reservation. - let new_listener = client.listen_on(client_addr).unwrap(); + let new_listener = client.listen_on(client_addr.clone()).unwrap(); // Wait for // - listener of old reservation to close @@ -161,6 +169,12 @@ fn new_reservation_to_same_relay_replaces_old() { break; } } + SwarmEvent::ExternalAddrConfirmed { address } => { + assert_eq!( + address, + client_addr.clone().with(Protocol::P2p(client_peer_id)) + ); + } SwarmEvent::Behaviour(ClientEvent::Ping(_)) => {} e => panic!("{e:?}"), } @@ -170,7 +184,9 @@ fn new_reservation_to_same_relay_replaces_old() { #[test] fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -254,7 +270,9 @@ async fn connection_established_to( #[test] fn handle_dial_failure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -271,9 +289,116 @@ fn handle_dial_failure() { assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id))); } +#[test] +fn propagate_reservation_error_to_listener() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay_with_config(relay::Config { + max_reservations: 0, // Will make us fail to make the reservation + ..relay::Config::default() + }); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let client_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit); + let mut client = build_client(); + + let reservation_listener = client.listen_on(client_addr.clone()).unwrap(); + + // Wait for connection to relay. + assert!(pool.run_until(wait_for_dial(&mut client, relay_peer_id))); + + let error = pool.run_until(client.wait(|e| match e { + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + } if listener_id == reservation_listener => Some(e), + _ => None, + })); + + let error = error + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert!(matches!( + error, + relay::outbound::hop::ReserveError::ResourceLimitExceeded + )); +} + +#[test] +fn propagate_connect_error_to_unknown_peer_to_dialer() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay(); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let mut src = build_client(); + + let dst_peer_id = PeerId::random(); // We don't have a destination peer in this test, so the CONNECT request will fail. + let dst_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(dst_peer_id)); + + let opts = DialOpts::from(dst_addr.clone()); + let circuit_connection_id = opts.connection_id(); + + src.dial(opts).unwrap(); + + let (failed_address, error) = pool.run_until(src.wait(|e| match e { + SwarmEvent::OutgoingConnectionError { + connection_id, + error: DialError::Transport(mut errors), + .. + } if connection_id == circuit_connection_id => { + assert_eq!(errors.len(), 1); + Some(errors.remove(0)) + } + _ => None, + })); + + // This is a bit wonky but we need to get the _actual_ source error :) + let error = error + .source() + .unwrap() + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert_eq!(failed_address, dst_addr); + assert!(matches!( + error, + relay::outbound::hop::ConnectError::NoReservation + )); +} + #[test] fn reuse_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -288,7 +413,11 @@ fn reuse_connection() { .clone() .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); - let mut client = build_client(); + + // To reuse the connection, we need to ensure it is not shut down due to being idle. + let mut client = build_client_with_config( + Config::with_async_std_executor().with_idle_connection_timeout(Duration::from_secs(1)), + ); let client_peer_id = *client.local_peer_id(); client.dial(relay_addr).unwrap(); @@ -305,29 +434,34 @@ fn reuse_connection() { } fn build_relay() -> Swarm { + build_relay_with_config(relay::Config { + reservation_duration: Duration::from_secs(2), + ..Default::default() + }) +} + +fn build_relay_with_config(config: relay::Config) -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = local_key.public().to_peer_id(); let transport = upgrade_transport(MemoryTransport::default().boxed(), &local_key); - SwarmBuilder::with_async_std_executor( + Swarm::new( transport, Relay { ping: ping::Behaviour::new(ping::Config::new()), - relay: relay::Behaviour::new( - local_peer_id, - relay::Config { - reservation_duration: Duration::from_secs(2), - ..Default::default() - }, - ), + relay: relay::Behaviour::new(local_peer_id, config), }, local_peer_id, + Config::with_async_std_executor(), ) - .build() } fn build_client() -> Swarm { + build_client_with_config(Config::with_async_std_executor()) +} + +fn build_client_with_config(config: Config) -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = local_key.public().to_peer_id(); @@ -337,15 +471,15 @@ fn build_client() -> Swarm { &local_key, ); - SwarmBuilder::with_async_std_executor( + Swarm::new( transport, Client { ping: ping::Behaviour::new(ping::Config::new()), relay: behaviour, }, local_peer_id, + config, ) - .build() } fn upgrade_transport( @@ -393,6 +527,9 @@ async fn wait_for_reservation( loop { match client.select_next_some().await { + SwarmEvent::ExternalAddrConfirmed { address } if !is_renewal => { + assert_eq!(address, client_addr); + } SwarmEvent::Behaviour(ClientEvent::Relay( relay::client::Event::ReservationReqAccepted { relay_peer_id: peer_id, diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 15438502daab..e60699da7346 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,4 +1,13 @@ -## 0.13.0 +## 0.14.0 + + +## 0.13.1 +- Refresh registration upon a change in external addresses. + See [PR 4629]. + +[PR 4629]: https://github.com/libp2p/rust-libp2p/pull/4629 + +## 0.13.0 - Changed the signature of the function `client::Behavior::register()`, it returns `Result<(), RegisterError>` now. diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index c3912b78ddf3..f9272d011e37 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = { workspace = true } description = "Rendezvous protocol for libp2p" -version = "0.13.0" +version = "0.14.0" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } async-trait = "0.1" bimap = "0.6.3" futures = { version = "0.3", default-features = false, features = ["std"] } @@ -21,24 +21,24 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } libp2p-request-response = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" thiserror = "1" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-noise = { workspace = true } libp2p-ping = { workspace = true } libp2p-identify = { workspace = true } -libp2p-yamux = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-yamux = { workspace = true } rand = "0.8" -tokio = { version = "1.32", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } -libp2p-swarm-test = { path = "../../swarm-test" } +tokio = { version = "1.36", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index 505635efda8a..92d7884758b5 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -26,10 +26,10 @@ use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use libp2p_core::{Endpoint, Multiaddr, PeerRecord}; use libp2p_identity::{Keypair, PeerId, SigningError}; -use libp2p_request_response::{ProtocolSupport, RequestId}; +use libp2p_request_response::{OutboundRequestId, ProtocolSupport}; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::HashMap; use std::iter; @@ -41,14 +41,16 @@ pub struct Behaviour { keypair: Keypair, - waiting_for_register: HashMap, - waiting_for_discovery: HashMap)>, + waiting_for_register: HashMap, + waiting_for_discovery: HashMap)>, /// Hold addresses of all peers that we have discovered so far. /// /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. discovered_peers: HashMap<(PeerId, Namespace), Vec>, + registered_namespaces: HashMap<(PeerId, Namespace), Ttl>, + /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak. expiring_registrations: FuturesUnordered>, @@ -68,6 +70,7 @@ impl Behaviour { waiting_for_register: Default::default(), waiting_for_discovery: Default::default(), discovered_peers: Default::default(), + registered_namespaces: Default::default(), expiring_registrations: FuturesUnordered::from_iter(vec![ futures::future::pending().boxed() ]), @@ -103,6 +106,9 @@ impl Behaviour { /// Unregister ourselves from the given namespace with the given rendezvous peer. pub fn unregister(&mut self, namespace: Namespace, rendezvous_node: PeerId) { + self.registered_namespaces + .retain(|(rz_node, ns), _| rz_node.ne(&rendezvous_node) && ns.ne(&namespace)); + self.inner .send_request(&rendezvous_node, Unregister(namespace)); } @@ -217,21 +223,30 @@ impl NetworkBehaviour for Behaviour { .on_connection_handler_event(peer_id, connection_id, event); } - fn on_swarm_event(&mut self, event: FromSwarm) { - self.external_addresses.on_swarm_event(&event); + fn on_swarm_event(&mut self, event: FromSwarm) { + let changed = self.external_addresses.on_swarm_event(&event); self.inner.on_swarm_event(event); + + if changed && self.external_addresses.iter().count() > 0 { + let registered = self.registered_namespaces.clone(); + for ((rz_node, ns), ttl) in registered { + if let Err(e) = self.register(ns, rz_node, Some(ttl)) { + tracing::warn!("refreshing registration failed: {e}") + } + } + } } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, ) -> Poll>> { use libp2p_request_response as req_res; loop { - match self.inner.poll(cx, params) { + match self.inner.poll(cx) { Poll::Ready(ToSwarm::GenerateEvent(req_res::Event::Message { message: req_res::Message::Response { @@ -266,16 +281,7 @@ impl NetworkBehaviour for Behaviour { )) => { unreachable!("rendezvous clients never receive requests") } - Poll::Ready( - other @ (ToSwarm::ExternalAddrConfirmed(_) - | ToSwarm::ExternalAddrExpired(_) - | ToSwarm::NewExternalAddrCandidate(_) - | ToSwarm::NotifyHandler { .. } - | ToSwarm::Dial { .. } - | ToSwarm::CloseConnection { .. } - | ToSwarm::ListenOn { .. } - | ToSwarm::RemoveListener { .. }), - ) => { + Poll::Ready(other) => { let new_to_swarm = other.map_out(|_| unreachable!("we manually map `GenerateEvent` variants")); @@ -322,7 +328,7 @@ impl NetworkBehaviour for Behaviour { } impl Behaviour { - fn event_for_outbound_failure(&mut self, req_id: &RequestId) -> Option { + fn event_for_outbound_failure(&mut self, req_id: &OutboundRequestId) -> Option { if let Some((rendezvous_node, namespace)) = self.waiting_for_register.remove(req_id) { return Some(Event::RegisterFailed { rendezvous_node, @@ -342,12 +348,19 @@ impl Behaviour { None } - fn handle_response(&mut self, request_id: &RequestId, response: Message) -> Option { + fn handle_response( + &mut self, + request_id: &OutboundRequestId, + response: Message, + ) -> Option { match response { RegisterResponse(Ok(ttl)) => { if let Some((rendezvous_node, namespace)) = self.waiting_for_register.remove(request_id) { + self.registered_namespaces + .insert((rendezvous_node, namespace.clone()), ttl); + return Some(Event::Registered { rendezvous_node, ttl, diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index bfc3cf275fc0..41432a91d8c6 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -208,10 +208,10 @@ pub enum ErrorCode { } impl Encoder for Codec { - type Item = Message; + type Item<'a> = Message; type Error = Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { let mut pb: ProtobufCodec = ProtobufCodec::new(MAX_MESSAGE_LEN_BYTES); pb.encode(proto::Message::from(item), dst)?; @@ -227,9 +227,8 @@ impl Decoder for Codec { fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { let mut pb: ProtobufCodec = ProtobufCodec::new(MAX_MESSAGE_LEN_BYTES); - let message = match pb.decode(src)? { - Some(p) => p, - None => return Ok(None), + let Some(message) = pb.decode(src)? else { + return Ok(None); }; Ok(Some(message.try_into()?)) diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 44f2f97a6a09..667c71e20e37 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -29,8 +29,8 @@ use libp2p_identity::PeerId; use libp2p_request_response::ProtocolSupport; use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, + ToSwarm, }; use std::collections::{HashMap, HashSet}; use std::iter; @@ -155,10 +155,10 @@ impl NetworkBehaviour for Behaviour { .on_connection_handler_event(peer_id, connection, event); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, ) -> Poll>> { if let Poll::Ready(ExpiredRegistration(registration)) = self.registrations.poll(cx) { return Poll::Ready(ToSwarm::GenerateEvent(Event::RegistrationExpired( @@ -167,7 +167,7 @@ impl NetworkBehaviour for Behaviour { } loop { - if let Poll::Ready(to_swarm) = self.inner.poll(cx, params) { + if let Poll::Ready(to_swarm) = self.inner.poll(cx) { match to_swarm { ToSwarm::GenerateEvent(libp2p_request_response::Event::Message { peer: peer_id, @@ -195,7 +195,11 @@ impl NetworkBehaviour for Behaviour { request_id, error, }) => { - log::warn!("Inbound request {request_id} with peer {peer} failed: {error}"); + tracing::warn!( + %peer, + request=%request_id, + "Inbound request with peer failed: {error}" + ); continue; } @@ -211,15 +215,8 @@ impl NetworkBehaviour for Behaviour { }) => { continue; } - ToSwarm::Dial { .. } - | ToSwarm::ListenOn { .. } - | ToSwarm::RemoveListener { .. } - | ToSwarm::NotifyHandler { .. } - | ToSwarm::NewExternalAddrCandidate(_) - | ToSwarm::ExternalAddrConfirmed(_) - | ToSwarm::ExternalAddrExpired(_) - | ToSwarm::CloseConnection { .. } => { - let new_to_swarm = to_swarm + other => { + let new_to_swarm = other .map_out(|_| unreachable!("we manually map `GenerateEvent` variants")); return Poll::Ready(new_to_swarm); @@ -231,7 +228,7 @@ impl NetworkBehaviour for Behaviour { } } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.inner.on_swarm_event(event); } } diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 992876d19718..c2de88fd6150 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -20,6 +20,8 @@ use futures::stream::FuturesUnordered; use futures::StreamExt; +use libp2p_core::multiaddr::Protocol; +use libp2p_core::Multiaddr; use libp2p_identity as identity; use libp2p_rendezvous as rendezvous; use libp2p_rendezvous::client::RegisterError; @@ -27,10 +29,13 @@ use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::convert::TryInto; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn given_successful_registration_then_successful_discovery() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -83,7 +88,9 @@ async fn given_successful_registration_then_successful_discovery() { #[tokio::test] async fn should_return_error_when_no_external_addresses() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let server = new_server(rendezvous::server::Config::default()).await; let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); @@ -98,7 +105,9 @@ async fn should_return_error_when_no_external_addresses() { #[tokio::test] async fn given_successful_registration_then_refresh_ttl() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -162,9 +171,64 @@ async fn given_successful_registration_then_refresh_ttl() { } } +#[tokio::test] +async fn given_successful_registration_then_refresh_external_addrs() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let namespace = rendezvous::Namespace::from_static("some-namespace"); + let ([mut alice], mut robert) = + new_server_with_connected_clients(rendezvous::server::Config::default()).await; + + let roberts_peer_id = *robert.local_peer_id(); + + alice + .behaviour_mut() + .register(namespace.clone(), roberts_peer_id, None) + .unwrap(); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { .. }], + ) => {} + events => panic!("Unexpected events: {events:?}"), + } + + let external_addr = Multiaddr::empty().with(Protocol::Memory(0)); + + alice.add_external_address(external_addr.clone()); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { registration, .. }], + ) => { + let record = registration.record; + assert!(record.addresses().contains(&external_addr)); + } + events => panic!("Unexpected events: {events:?}"), + } + + alice.remove_external_address(&external_addr); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { registration, .. }], + ) => { + let record = registration.record; + assert!(!record.addresses().contains(&external_addr)); + } + events => panic!("Unexpected events: {events:?}"), + } +} + #[tokio::test] async fn given_invalid_ttl_then_unsuccessful_registration() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -191,7 +255,9 @@ async fn given_invalid_ttl_then_unsuccessful_registration() { #[tokio::test] async fn discover_allows_for_dial_by_peer_id() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -246,7 +312,9 @@ async fn discover_allows_for_dial_by_peer_id() { #[tokio::test] async fn eve_cannot_register() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let mut robert = new_server(rendezvous::server::Config::default()).await; let mut eve = new_impersonating_client().await; @@ -272,7 +340,9 @@ async fn eve_cannot_register() { // test if charlie can operate as client and server simultaneously #[tokio::test] async fn can_combine_client_and_server() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -308,7 +378,9 @@ async fn can_combine_client_and_server() { #[tokio::test] async fn registration_on_clients_expire() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default().with_min_ttl(1)) @@ -317,7 +389,7 @@ async fn registration_on_clients_expire() { let roberts_peer_id = *robert.local_peer_id(); tokio::spawn(robert.loop_on_next()); - let registration_ttl = 3; + let registration_ttl = 1; alice .behaviour_mut() @@ -336,7 +408,7 @@ async fn registration_on_clients_expire() { event => panic!("Unexpected event: {event:?}"), } - tokio::time::sleep(Duration::from_secs(registration_ttl + 5)).await; + tokio::time::sleep(Duration::from_secs(registration_ttl + 1)).await; let event = bob.select_next_some().await; let error = bob.dial(*alice.local_peer_id()).unwrap_err(); @@ -376,7 +448,7 @@ async fn new_server_with_connected_clients( async fn new_client() -> Swarm { let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); - client.listen().await; // we need to listen otherwise we don't have addresses to register + client.listen().with_memory_addr_external().await; // we need to listen otherwise we don't have addresses to register client } @@ -384,7 +456,7 @@ async fn new_client() -> Swarm { async fn new_server(config: rendezvous::server::Config) -> Swarm { let mut server = Swarm::new_ephemeral(|_| rendezvous::server::Behaviour::new(config)); - server.listen().await; + server.listen().with_memory_addr_external().await; server } @@ -394,7 +466,7 @@ async fn new_combined_node() -> Swarm { client: rendezvous::client::Behaviour::new(identity), server: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), }); - node.listen().await; + node.listen().with_memory_addr_external().await; node } @@ -405,7 +477,7 @@ async fn new_impersonating_client() -> Swarm { // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); - eve.listen().await; + eve.listen().with_memory_addr_external().await; eve } diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 693145c6f727..924175087861 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,38 @@ +## 0.26.2 + +- Deprecate `Behaviour::add_address` in favor of `Swarm::add_peer_address`. + See [PR 4371](https://github.com/libp2p/rust-libp2p/pull/4371). + +## 0.26.1 + +- Derive `PartialOrd` and `Ord` for `{Out,In}boundRequestId`. + See [PR 4956](https://github.com/libp2p/rust-libp2p/pull/4956). + +## 0.26.0 + +- Remove `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4679](https://github.com/libp2p/rust-libp2p/pull/4679). +- Allow at most 100 concurrent inbound + outbound streams per instance of `request_response::Behaviour`. + This limit is configurable via `Config::with_max_concurrent_streams`. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Report IO failures on inbound and outbound streams. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Introduce dedicated types for `InboundRequestId` and `OutboundRequestId`. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Keep peer addresses in `HashSet` instead of `SmallVec` to prevent adding duplicate addresses. + See [PR 4700](https://github.com/libp2p/rust-libp2p/pull/4700). + +## 0.25.2 + +- Deprecate `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4029](https://github.com/libp2p/rust-libp2p/pull/4029). + + + ## 0.25.1 - Replace unmaintained `serde_cbor` dependency with `cbor4ii`. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index b87048bb6298..7e27c8246155 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.25.1" +version = "0.26.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,33 +12,36 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -cbor4ii = { version = "0.3.1", features = ["serde1", "use_std"], optional = true } -futures = "0.3.28" +cbor4ii = { version = "0.3.2", features = ["serde1", "use_std"], optional = true } +futures = "0.3.30" instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" serde = { version = "1.0", optional = true} -serde_json = { version = "1.0.107", optional = true } -smallvec = "1.11.1" +serde_json = { version = "1.0.113", optional = true } +smallvec = "1.12.0" +tracing = "0.1.37" void = "1.0.2" -log = "0.4.20" +futures-timer = "3.0.2" +futures-bounded = { workspace = true } [features] json = ["dep:serde", "dep:serde_json", "libp2p-swarm/macros"] cbor = ["dep:serde", "dep:cbor4ii", "libp2p-swarm/macros"] [dev-dependencies] +anyhow = "1.0.79" async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" libp2p-swarm-test = { path = "../../swarm-test" } futures_ringbuf = "0.4.0" -serde = { version = "1.0", features = ["derive"]} +serde = { version = "1.0", features = ["derive"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs index ff56aa52ddfa..f371f6149dcc 100644 --- a/protocols/request-response/src/cbor.rs +++ b/protocols/request-response/src/cbor.rs @@ -25,7 +25,7 @@ /// /// ``` /// # use libp2p_request_response::{cbor, ProtocolSupport, self as request_response}; -/// # use libp2p_swarm::{StreamProtocol, SwarmBuilder}; +/// # use libp2p_swarm::StreamProtocol; /// #[derive(Debug, serde::Serialize, serde::Deserialize)] /// struct GreetRequest { /// name: String, diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index 35a2db98bdc0..2d45e0d7dc30 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -23,23 +23,23 @@ pub(crate) mod protocol; pub use protocol::ProtocolSupport; use crate::codec::Codec; -use crate::handler::protocol::{RequestProtocol, ResponseProtocol}; -use crate::{RequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; +use crate::handler::protocol::Protocol; +use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use instant::Instant; +use futures::channel::mpsc; +use futures::{channel::oneshot, prelude::*}; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }; use libp2p_swarm::{ - handler::{ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamUpgradeError}, + handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError}, SubstreamProtocol, }; use smallvec::SmallVec; use std::{ collections::VecDeque, - fmt, + fmt, io, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -57,32 +57,34 @@ where inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, /// The request/response message codec. codec: TCodec, - /// The keep-alive timeout of idle connections. A connection is considered - /// idle if there are no outbound substreams. - keep_alive_timeout: Duration, - /// The timeout for inbound and outbound substreams (i.e. request - /// and response processing). - substream_timeout: Duration, - /// The current connection keep-alive. - keep_alive: KeepAlive, /// Queue of events to emit in `poll()`. pending_events: VecDeque>, /// Outbound upgrades waiting to be emitted as an `OutboundSubstreamRequest`. - outbound: VecDeque>, - /// Inbound upgrades waiting for the incoming request. - inbound: FuturesUnordered< - BoxFuture< - 'static, - Result< - ( - (RequestId, TCodec::Request), - oneshot::Sender, - ), - oneshot::Canceled, - >, - >, - >, + pending_outbound: VecDeque>, + + requested_outbound: VecDeque>, + /// A channel for receiving inbound requests. + inbound_receiver: mpsc::Receiver<( + InboundRequestId, + TCodec::Request, + oneshot::Sender, + )>, + /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound request. + inbound_sender: mpsc::Sender<( + InboundRequestId, + TCodec::Request, + oneshot::Sender, + )>, + inbound_request_id: Arc, + + worker_streams: futures_bounded::FuturesMap, io::Error>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum RequestId { + Inbound(InboundRequestId), + Outbound(OutboundRequestId), } impl Handler @@ -92,52 +94,136 @@ where pub(super) fn new( inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, codec: TCodec, - keep_alive_timeout: Duration, substream_timeout: Duration, inbound_request_id: Arc, + max_concurrent_streams: usize, ) -> Self { + let (inbound_sender, inbound_receiver) = mpsc::channel(0); Self { inbound_protocols, codec, - keep_alive: KeepAlive::Yes, - keep_alive_timeout, - substream_timeout, - outbound: VecDeque::new(), - inbound: FuturesUnordered::new(), + pending_outbound: VecDeque::new(), + requested_outbound: Default::default(), + inbound_receiver, + inbound_sender, pending_events: VecDeque::new(), inbound_request_id, + worker_streams: futures_bounded::FuturesMap::new( + substream_timeout, + max_concurrent_streams, + ), } } + /// Returns the next inbound request ID. + fn next_inbound_request_id(&mut self) -> InboundRequestId { + InboundRequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)) + } + fn on_fully_negotiated_inbound( &mut self, FullyNegotiatedInbound { - protocol: sent, - info: request_id, + protocol: (mut stream, protocol), + info: (), }: FullyNegotiatedInbound< ::InboundProtocol, ::InboundOpenInfo, >, ) { - if sent { - self.pending_events - .push_back(Event::ResponseSent(request_id)) - } else { - self.pending_events - .push_back(Event::ResponseOmission(request_id)) + let mut codec = self.codec.clone(); + let request_id = self.next_inbound_request_id(); + let mut sender = self.inbound_sender.clone(); + + let recv = async move { + // A channel for notifying the inbound upgrade when the + // response is sent. + let (rs_send, rs_recv) = oneshot::channel(); + + let read = codec.read_request(&protocol, &mut stream); + let request = read.await?; + sender + .send((request_id, request, rs_send)) + .await + .expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + if let Ok(response) = rs_recv.await { + let write = codec.write_response(&protocol, &mut stream, response); + write.await?; + + stream.close().await?; + Ok(Event::ResponseSent(request_id)) + } else { + stream.close().await?; + Ok(Event::ResponseOmission(request_id)) + } + }; + + if self + .worker_streams + .try_push(RequestId::Inbound(request_id), recv.boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") + } + } + + fn on_fully_negotiated_outbound( + &mut self, + FullyNegotiatedOutbound { + protocol: (mut stream, protocol), + info: (), + }: FullyNegotiatedOutbound< + ::OutboundProtocol, + ::OutboundOpenInfo, + >, + ) { + let message = self + .requested_outbound + .pop_front() + .expect("negotiated a stream without a pending message"); + + let mut codec = self.codec.clone(); + let request_id = message.request_id; + + let send = async move { + let write = codec.write_request(&protocol, &mut stream, message.request); + write.await?; + stream.close().await?; + let read = codec.read_response(&protocol, &mut stream); + let response = read.await?; + + Ok(Event::Response { + request_id, + response, + }) + }; + + if self + .worker_streams + .try_push(RequestId::Outbound(request_id), send.boxed()) + .is_err() + { + tracing::warn!("Dropping outbound stream because we are at capacity") } } fn on_dial_upgrade_error( &mut self, - DialUpgradeError { info, error }: DialUpgradeError< + DialUpgradeError { error, info: () }: DialUpgradeError< ::OutboundOpenInfo, ::OutboundProtocol, >, ) { + let message = self + .requested_outbound + .pop_front() + .expect("negotiated a stream without a pending message"); + match error { StreamUpgradeError::Timeout => { - self.pending_events.push_back(Event::OutboundTimeout(info)); + self.pending_events + .push_back(Event::OutboundTimeout(message.request_id)); } StreamUpgradeError::NegotiationFailed => { // The remote merely doesn't support the protocol(s) we requested. @@ -146,24 +232,26 @@ where // An event is reported to permit user code to react to the fact that // the remote peer does not support the requested protocol(s). self.pending_events - .push_back(Event::OutboundUnsupportedProtocols(info)); - } - StreamUpgradeError::Apply(e) => { - log::debug!("outbound stream {info} failed: {e}"); + .push_back(Event::OutboundUnsupportedProtocols(message.request_id)); } + StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::Io(e) => { - log::debug!("outbound stream {info} failed: {e}"); + tracing::debug!( + "outbound stream for request {} failed: {e}, retrying", + message.request_id + ); + self.requested_outbound.push_back(message); } } } fn on_listen_upgrade_error( &mut self, - ListenUpgradeError { error, info }: ListenUpgradeError< + ListenUpgradeError { error, .. }: ListenUpgradeError< ::InboundOpenInfo, ::InboundProtocol, >, ) { - log::debug!("inbound stream {info} failed: {error}"); + void::unreachable(error) } } @@ -174,25 +262,36 @@ where { /// A request has been received. Request { - request_id: RequestId, + request_id: InboundRequestId, request: TCodec::Request, sender: oneshot::Sender, }, /// A response has been received. Response { - request_id: RequestId, + request_id: OutboundRequestId, response: TCodec::Response, }, /// A response to an inbound request has been sent. - ResponseSent(RequestId), + ResponseSent(InboundRequestId), /// A response to an inbound request was omitted as a result /// of dropping the response `sender` of an inbound `Request`. - ResponseOmission(RequestId), + ResponseOmission(InboundRequestId), /// An outbound request timed out while sending the request /// or waiting for the response. - OutboundTimeout(RequestId), + OutboundTimeout(OutboundRequestId), /// An outbound request failed to negotiate a mutually supported protocol. - OutboundUnsupportedProtocols(RequestId), + OutboundUnsupportedProtocols(OutboundRequestId), + OutboundStreamFailed { + request_id: OutboundRequestId, + error: io::Error, + }, + /// An inbound request timed out while waiting for the request + /// or sending the response. + InboundTimeout(InboundRequestId), + InboundStreamFailed { + request_id: InboundRequestId, + error: io::Error, + }, } impl fmt::Debug for Event { @@ -229,72 +328,102 @@ impl fmt::Debug for Event { .debug_tuple("Event::OutboundUnsupportedProtocols") .field(request_id) .finish(), + Event::OutboundStreamFailed { request_id, error } => f + .debug_struct("Event::OutboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), + Event::InboundTimeout(request_id) => f + .debug_tuple("Event::InboundTimeout") + .field(request_id) + .finish(), + Event::InboundStreamFailed { request_id, error } => f + .debug_struct("Event::InboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), } } } +pub struct OutboundMessage { + pub(crate) request_id: OutboundRequestId, + pub(crate) request: TCodec::Request, + pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, +} + +impl fmt::Debug for OutboundMessage +where + TCodec: Codec, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OutboundMessage").finish_non_exhaustive() + } +} + impl ConnectionHandler for Handler where TCodec: Codec + Send + Clone + 'static, { - type FromBehaviour = RequestProtocol; + type FromBehaviour = OutboundMessage; type ToBehaviour = Event; - type Error = void::Void; - type InboundProtocol = ResponseProtocol; - type OutboundProtocol = RequestProtocol; - type OutboundOpenInfo = RequestId; - type InboundOpenInfo = RequestId; + type InboundProtocol = Protocol; + type OutboundProtocol = Protocol; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - // A channel for notifying the handler when the inbound - // upgrade received the request. - let (rq_send, rq_recv) = oneshot::channel(); - - // A channel for notifying the inbound upgrade when the - // response is sent. - let (rs_send, rs_recv) = oneshot::channel(); - - let request_id = RequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)); - - // By keeping all I/O inside the `ResponseProtocol` and thus the - // inbound substream upgrade via above channels, we ensure that it - // is all subject to the configured timeout without extra bookkeeping - // for inbound substreams as well as their timeouts and also make the - // implementation of inbound and outbound upgrades symmetric in - // this sense. - let proto = ResponseProtocol { - protocols: self.inbound_protocols.clone(), - codec: self.codec.clone(), - request_sender: rq_send, - response_receiver: rs_recv, - request_id, - }; - - // The handler waits for the request to come in. It then emits - // `Event::Request` together with a - // `ResponseChannel`. - self.inbound - .push(rq_recv.map_ok(move |rq| (rq, rs_send)).boxed()); - - SubstreamProtocol::new(proto, request_id).with_timeout(self.substream_timeout) + SubstreamProtocol::new( + Protocol { + protocols: self.inbound_protocols.clone(), + }, + (), + ) } fn on_behaviour_event(&mut self, request: Self::FromBehaviour) { - self.keep_alive = KeepAlive::Yes; - self.outbound.push_back(request); - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + self.pending_outbound.push_back(request); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, RequestId, Self::ToBehaviour, Self::Error>, - > { - // Drain pending events. + ) -> Poll, (), Self::ToBehaviour>> { + match self.worker_streams.poll_unpin(cx) { + Poll::Ready((_, Ok(Ok(event)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + Poll::Ready((RequestId::Inbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundStreamFailed { + request_id: id, + error: e, + }, + )); + } + Poll::Ready((RequestId::Outbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundStreamFailed { + request_id: id, + error: e, + }, + )); + } + Poll::Ready((RequestId::Inbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundTimeout(id), + )); + } + Poll::Ready((RequestId::Outbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundTimeout(id), + )); + } + Poll::Pending => {} + } + + // Drain pending events that were produced by `worker_streams`. if let Some(event) = self.pending_events.pop_front() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { @@ -302,46 +431,30 @@ where } // Check for inbound requests. - while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) { - match result { - Ok(((id, rq), rs_sender)) => { - // We received an inbound request. - self.keep_alive = KeepAlive::Yes; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Request { - request_id: id, - request: rq, - sender: rs_sender, - })); - } - Err(oneshot::Canceled) => { - // The inbound upgrade has errored or timed out reading - // or waiting for the request. The handler is informed - // via `on_connection_event` call with `ConnectionEvent::ListenUpgradeError`. - } - } + if let Poll::Ready(Some((id, rq, rs_sender))) = self.inbound_receiver.poll_next_unpin(cx) { + // We received an inbound request. + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Request { + request_id: id, + request: rq, + sender: rs_sender, + })); } // Emit outbound requests. - if let Some(request) = self.outbound.pop_front() { - let info = request.request_id; + if let Some(request) = self.pending_outbound.pop_front() { + let protocols = request.protocols.clone(); + self.requested_outbound.push_back(request); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(request, info) - .with_timeout(self.substream_timeout), + protocol: SubstreamProtocol::new(Protocol { protocols }, ()), }); } - debug_assert!(self.outbound.is_empty()); - - if self.outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { - self.outbound.shrink_to_fit(); - } + debug_assert!(self.pending_outbound.is_empty()); - if self.inbound.is_empty() && self.keep_alive.is_yes() { - // No new inbound or outbound requests. However, we may just have - // started the latest inbound or outbound upgrade(s), so make sure - // the keep-alive timeout is preceded by the substream timeout. - let until = Instant::now() + self.substream_timeout + self.keep_alive_timeout; - self.keep_alive = KeepAlive::Until(until); + if self.pending_outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_outbound.shrink_to_fit(); } Poll::Pending @@ -360,14 +473,8 @@ where ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { self.on_fully_negotiated_inbound(fully_negotiated_inbound) } - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: response, - info: request_id, - }) => { - self.pending_events.push_back(Event::Response { - request_id, - response, - }); + ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { + self.on_fully_negotiated_outbound(fully_negotiated_outbound) } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) @@ -375,9 +482,7 @@ where ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/request-response/src/handler/protocol.rs b/protocols/request-response/src/handler/protocol.rs index 1368a3c1f98f..833cacdd6cee 100644 --- a/protocols/request-response/src/handler/protocol.rs +++ b/protocols/request-response/src/handler/protocol.rs @@ -23,14 +23,10 @@ //! receives a request and sends a response, whereas the //! outbound upgrade send a request and receives a response. -use crate::codec::Codec; -use crate::RequestId; - -use futures::{channel::oneshot, future::BoxFuture, prelude::*}; +use futures::future::{ready, Ready}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_swarm::Stream; use smallvec::SmallVec; -use std::{fmt, io}; /// The level of support for a particular protocol. #[derive(Debug, Clone)] @@ -65,22 +61,15 @@ impl ProtocolSupport { /// /// Receives a request and sends a response. #[derive(Debug)] -pub struct ResponseProtocol -where - TCodec: Codec, -{ - pub(crate) codec: TCodec, - pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, - pub(crate) request_sender: oneshot::Sender<(RequestId, TCodec::Request)>, - pub(crate) response_receiver: oneshot::Receiver, - pub(crate) request_id: RequestId, +pub struct Protocol

{ + pub(crate) protocols: SmallVec<[P; 2]>, } -impl UpgradeInfo for ResponseProtocol +impl

UpgradeInfo for Protocol

where - TCodec: Codec, + P: AsRef + Clone, { - type Info = TCodec::Protocol; + type Info = P; type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; fn protocol_info(&self) -> Self::InfoIter { @@ -88,94 +77,28 @@ where } } -impl InboundUpgrade for ResponseProtocol +impl

InboundUpgrade for Protocol

where - TCodec: Codec + Send + 'static, + P: AsRef + Clone, { - type Output = bool; - type Error = io::Error; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(mut self, mut io: Stream, protocol: Self::Info) -> Self::Future { - async move { - let read = self.codec.read_request(&protocol, &mut io); - let request = read.await?; - match self.request_sender.send((self.request_id, request)) { - Ok(()) => {}, - Err(_) => panic!( - "Expect request receiver to be alive i.e. protocol handler to be alive.", - ), - } + type Output = (Stream, P); + type Error = void::Void; + type Future = Ready>; - if let Ok(response) = self.response_receiver.await { - let write = self.codec.write_response(&protocol, &mut io, response); - write.await?; - - io.close().await?; - // Response was sent. Indicate to handler to emit a `ResponseSent` event. - Ok(true) - } else { - io.close().await?; - // No response was sent. Indicate to handler to emit a `ResponseOmission` event. - Ok(false) - } - }.boxed() + fn upgrade_inbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) } } -/// Request substream upgrade protocol. -/// -/// Sends a request and receives a response. -pub struct RequestProtocol +impl

OutboundUpgrade for Protocol

{ ) -> Result> { let (socket_addr, _version, peer_id) = self.remote_multiaddr_to_socketaddr(addr.clone(), true)?; - let peer_id = peer_id.ok_or(TransportError::MultiaddrNotSupported(addr))?; + let peer_id = peer_id.ok_or(TransportError::MultiaddrNotSupported(addr.clone()))?; let socket = self .eligible_listener(&socket_addr) @@ -318,6 +318,8 @@ impl Transport for GenTransport

{ .try_clone_socket() .map_err(Self::Error::from)?; + tracing::debug!("Preparing for hole-punch from {addr}"); + let hole_puncher = hole_puncher::

(socket, socket_addr, self.handshake_timeout); let (sender, receiver) = oneshot::channel(); @@ -346,7 +348,12 @@ impl Transport for GenTransport

{ .expect("hole punch connection sender is never dropped before receiver") .await?; if inbound_peer_id != peer_id { - log::warn!("expected inbound connection from {socket_addr} to resolve to {peer_id} but got {inbound_peer_id}"); + tracing::warn!( + peer=%peer_id, + inbound_peer=%inbound_peer_id, + socket_address=%socket_addr, + "expected inbound connection from socket_address to resolve to peer but got inbound peer" + ); } Ok((inbound_peer_id, connection)) } @@ -515,9 +522,8 @@ impl Listener

{ /// Poll for a next If Event. fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { let endpoint_addr = self.socket_addr(); - let if_watcher = match self.if_watcher.as_mut() { - Some(iw) => iw, - None => return Poll::Pending, + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; loop { match ready!(P::poll_if_event(if_watcher, cx)) { @@ -525,7 +531,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("New listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "New listen address" + ); self.listening_addresses.insert(inet.addr()); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -537,7 +546,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("Expired listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "Expired listen address" + ); self.listening_addresses.remove(&inet.addr()); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, @@ -588,7 +600,7 @@ impl Stream for Listener

{ return Poll::Ready(Some(event)); } Poll::Ready(None) => { - self.close(Err(Error::EndpointDriverCrashed)); + self.close(Ok(())); continue; } Poll::Pending => {} @@ -704,17 +716,14 @@ fn multiaddr_to_socketaddr( fn is_quic_addr(addr: &Multiaddr, support_draft_29: bool) -> bool { use Protocol::*; let mut iter = addr.iter(); - let first = match iter.next() { - Some(p) => p, - None => return false, + let Some(first) = iter.next() else { + return false; }; - let second = match iter.next() { - Some(p) => p, - None => return false, + let Some(second) = iter.next() else { + return false; }; - let third = match iter.next() { - Some(p) => p, - None => return false, + let Some(third) = iter.next() else { + return false; }; let fourth = iter.next(); let fifth = iter.next(); diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index 5581ceb744ca..36fb72a5ee7e 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -26,6 +26,7 @@ use std::{ pin::Pin, sync::{Arc, Mutex}, }; +use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] #[tokio::test] @@ -39,28 +40,12 @@ async fn async_std_smoke() { smoke::().await } -#[cfg(feature = "async-std")] -#[async_std::test] -async fn dial_failure() { - let _ = env_logger::try_init(); - let mut a = create_default_transport::().1; - let mut b = create_default_transport::().1; - - let addr = start_listening(&mut a, "/ip4/127.0.0.1/udp/0/quic-v1").await; - drop(a); // stop a so b can never reach it - - match dial(&mut b, addr).await { - Ok(_) => panic!("Expected dial to fail"), - Err(error) => { - assert_eq!("Handshake with the remote timed out.", error.to_string()) - } - }; -} - #[cfg(feature = "tokio")] #[tokio::test] async fn endpoint_reuse() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_default_transport::(); let (_, mut b_transport) = create_default_transport::(); @@ -85,7 +70,9 @@ async fn endpoint_reuse() { #[cfg(feature = "async-std")] #[async_std::test] async fn ipv4_dial_ipv6() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::(); let (b_peer_id, mut b_transport) = create_default_transport::(); @@ -103,7 +90,9 @@ async fn ipv4_dial_ipv6() { #[cfg(feature = "async-std")] #[async_std::test] async fn wrapped_with_delay() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); struct DialDelay(Arc>>); @@ -271,7 +260,9 @@ async fn tcp_and_quic() { #[cfg(feature = "async-std")] #[test] fn concurrent_connections_and_streams_async_std() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); quickcheck::QuickCheck::new() .min_tests_passed(1) @@ -282,7 +273,9 @@ fn concurrent_connections_and_streams_async_std() { #[cfg(feature = "tokio")] #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -299,7 +292,9 @@ async fn draft_29_support() { use futures::{future::poll_fn, select}; use libp2p_core::transport::TransportError; - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); @@ -360,7 +355,9 @@ async fn draft_29_support() { #[cfg(feature = "async-std")] #[async_std::test] async fn backpressure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let max_stream_data = quic::Config::new(&generate_tls_keypair()).max_stream_data; let (mut stream_a, mut stream_b) = build_streams::().await; @@ -384,7 +381,9 @@ async fn backpressure() { #[cfg(feature = "async-std")] #[async_std::test] async fn read_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (mut stream_a, mut stream_b) = build_streams::().await; let data = vec![0; 10]; @@ -404,7 +403,9 @@ async fn read_after_peer_dropped_stream() { #[async_std::test] #[should_panic] async fn write_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (stream_a, mut stream_b) = build_streams::().await; drop(stream_a); futures_timer::Delay::new(Duration::from_millis(1)).await; @@ -458,7 +459,9 @@ async fn test_local_listener_reuse() { } async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::

(); let (b_peer_id, mut b_transport) = create_default_transport::

(); @@ -580,7 +583,11 @@ fn prop( let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -646,15 +653,13 @@ async fn answer_inbound_streams( mut connection: StreamMuxerBox, ) { loop { - let mut inbound_stream = match future::poll_fn(|cx| { + let Ok(mut inbound_stream) = future::poll_fn(|cx| { let _ = connection.poll_unpin(cx)?; - connection.poll_inbound_unpin(cx) }) .await - { - Ok(s) => s, - Err(_) => return, + else { + return; }; P::spawn(async move { @@ -721,7 +726,10 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!( + stream_count=%number_streams, + "Created streams" + ); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index f0164b342e52..2bde64056cb7 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,4 +1,12 @@ -## 0.40.0 +## 0.41.0 + + +## 0.40.1 + +- Expose `async_io::TcpStream`. + See [PR 4683](https://github.com/libp2p/rust-libp2p/pull/4683). + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 14eec8c4caad..515d47d2a5a5 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-tcp" edition = "2021" rust-version = { workspace = true } description = "TCP/IP transport protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,16 +11,16 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "1.13.0", optional = true } -futures = "0.3.28" +async-io = { version = "2.3.1", optional = true } +futures = "0.3.30" futures-timer = "3.0" -if-watch = "3.0.1" -libc = "0.2.149" +if-watch = "3.2.0" +libc = "0.2.153" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" -socket2 = { version = "0.5.4", features = ["all"] } -tokio = { version = "1.32.0", default-features = false, features = ["net"], optional = true } +socket2 = { version = "0.5.5", features = ["all"] } +tokio = { version = "1.36.0", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -28,8 +28,9 @@ async-io = ["dep:async-io", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.6.5", features = ["attributes"] } -tokio = { version = "1.32.0", default-features = false, features = ["full"] } -env_logger = "0.10.0" +libp2p-identity = { workspace = true, features = ["rand"] } +tokio = { version = "1.36.0", default-features = false, features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 5efdf16fff5a..fbb7008aa5bd 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -98,7 +98,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn register(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Registering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Registering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -111,7 +111,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn unregister(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Unregistering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Unregistering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -441,12 +441,9 @@ where id: ListenerId, addr: Multiaddr, ) -> Result<(), TransportError> { - let socket_addr = if let Ok(sa) = multiaddr_to_socketaddr(addr.clone()) { - sa - } else { - return Err(TransportError::MultiaddrNotSupported(addr)); - }; - log::debug!("listening on {}", socket_addr); + let socket_addr = multiaddr_to_socketaddr(addr.clone()) + .map_err(|_| TransportError::MultiaddrNotSupported(addr))?; + tracing::debug!("listening on {}", socket_addr); let listener = self .do_listen(id, socket_addr) .map_err(TransportError::Other)?; @@ -472,14 +469,14 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("dialing {}", socket_addr); + tracing::debug!(address=%socket_addr, "dialing address"); let socket = self .create_socket(socket_addr) .map_err(TransportError::Other)?; if let Some(addr) = self.port_reuse.local_dial_addr(&socket_addr.ip()) { - log::trace!("Binding dial socket to listen socket {}", addr); + tracing::trace!(address=%addr, "Binding dial socket to listen socket address"); socket.bind(&addr.into()).map_err(TransportError::Other)?; } @@ -538,6 +535,7 @@ where } /// Poll all listeners. + #[tracing::instrument(level = "trace", name = "Transport::poll", skip(self, cx))] fn poll( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -664,9 +662,8 @@ where /// Poll for a next If Event. fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - let if_watcher = match self.if_watcher.as_mut() { - Some(if_watcher) => if_watcher, - None => return Poll::Pending, + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; let my_listen_addr_port = self.listen_addr.port(); @@ -677,7 +674,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("New listen address: {}", ma); + tracing::debug!(address=%ma, "New listen address"); self.port_reuse.register(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -689,7 +686,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("Expired listen address: {}", ma); + tracing::debug!(address=%ma, "Expired listen address"); self.port_reuse.unregister(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, @@ -762,7 +759,11 @@ where let local_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); let remote_addr = ip_to_multiaddr(remote_addr.ip(), remote_addr.port()); - log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + tracing::debug!( + remote_address=%remote_addr, + local_address=%local_addr, + "Incoming connection from remote at local" + ); return Poll::Ready(Some(TransportEvent::Incoming { listener_id: self.listener_id, @@ -900,7 +901,9 @@ mod tests { #[test] fn communicating_between_dialer_and_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -969,7 +972,9 @@ mod tests { #[test] fn wildcard_expansion() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -1038,7 +1043,9 @@ mod tests { #[test] fn port_reuse_dialing() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener( addr: Multiaddr, @@ -1145,7 +1152,9 @@ mod tests { #[test] fn port_reuse_listening() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen_twice(addr: Multiaddr) { let mut tcp = Transport::::new(Config::new().port_reuse(true)); @@ -1199,7 +1208,9 @@ mod tests { #[test] fn listen_port_0() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen(addr: Multiaddr) -> Multiaddr { let mut tcp = Transport::::default().boxed(); @@ -1234,7 +1245,9 @@ mod tests { #[test] fn listen_invalid_addr() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); fn test(addr: Multiaddr) { #[cfg(feature = "async-io")] @@ -1304,7 +1317,9 @@ mod tests { #[test] fn test_remove_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn cycle_listeners() -> bool { let mut tcp = Transport::::default().boxed(); diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index 9f43ed232366..fe0abe42d541 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -54,7 +54,7 @@ pub type Transport = crate::Transport; pub enum Tcp {} impl Provider for Tcp { - type Stream = Async; + type Stream = TcpStream; type Listener = Async; type IfWatcher = if_watch::smol::IfWatcher; @@ -116,3 +116,5 @@ impl Provider for Tcp { })) } } + +pub type TcpStream = Async; diff --git a/transports/tls/CHANGELOG.md b/transports/tls/CHANGELOG.md index 4c85ccf578cd..83f72286559d 100644 --- a/transports/tls/CHANGELOG.md +++ b/transports/tls/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + ## 0.2.1 - Switch from webpki to rustls-webpki. diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index b2ca28784f76..0ca134d418b9 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-tls" -version = "0.2.1" +version = "0.3.0" edition = "2021" rust-version = { workspace = true } description = "TLS configuration based on libp2p TLS specs." @@ -9,20 +9,20 @@ license = "MIT" exclude = ["src/test_assets"] [dependencies] -futures = { version = "0.3.28", default-features = false } +futures = { version = "0.3.30", default-features = false } futures-rustls = "0.24.0" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -rcgen = "0.10.0" +rcgen = "0.11.3" ring = "0.16.20" -thiserror = "1.0.49" +thiserror = "1.0.57" webpki = { version = "0.101.4", package = "rustls-webpki", features = ["std"] } x509-parser = "0.15.1" yasna = "0.5.2" # Exposed dependencies. Breaking changes to these are breaking changes to us. [dependencies.rustls] -version = "0.21.7" +version = "0.21.9" default-features = false features = ["dangerous_configuration"] # Must enable this to allow for custom verification code. @@ -30,10 +30,10 @@ features = ["dangerous_configuration"] # Must enable this to allow for custom ve hex = "0.4.3" hex-literal = "0.4.1" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa"] } -libp2p-swarm = { workspace = true } +libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa", "rand"] } +libp2p-swarm = { workspace = true, features = ["tokio"] } libp2p-yamux = { workspace = true } -tokio = { version = "1.32.0", features = ["full"] } +tokio = { version = "1.36.0", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index ff9d296bb162..801ba3fe3ce7 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -27,7 +27,7 @@ use libp2p_identity::PeerId; use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm}; /// The libp2p Public Key Extension is a X.509 extension -/// with the Object Identier 1.3.6.1.4.1.53594.1.1, +/// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. const P2P_EXT_OID: [u64; 9] = [1, 3, 6, 1, 4, 1, 53594, 1, 1]; @@ -374,7 +374,7 @@ impl P2pCertificate<'_> { } if signature_algorithm.algorithm == OID_PKCS1_RSASSAPSS { // According to https://datatracker.ietf.org/doc/html/rfc4055#section-3.1: - // Inside of params there shuld be a sequence of: + // Inside of params there should be a sequence of: // - Hash Algorithm // - Mask Algorithm // - Salt Length diff --git a/transports/tls/src/upgrade.rs b/transports/tls/src/upgrade.rs index bf64ce61505f..84510b6bab0b 100644 --- a/transports/tls/src/upgrade.rs +++ b/transports/tls/src/upgrade.rs @@ -24,7 +24,8 @@ use futures::future::BoxFuture; use futures::AsyncWrite; use futures::{AsyncRead, FutureExt}; use futures_rustls::TlsStream; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use rustls::{CommonState, ServerName}; @@ -67,7 +68,7 @@ impl UpgradeInfo for Config { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -90,7 +91,7 @@ where } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -120,12 +121,8 @@ where fn extract_single_certificate( state: &CommonState, ) -> Result, certificate::ParseError> { - let cert = match state - .peer_certificates() - .expect("config enforces presence of certificates") - { - [single] => single, - _ => panic!("config enforces exactly one certificate"), + let Some([cert]) = state.peer_certificates() else { + panic!("config enforces exactly one certificate"); }; certificate::parse(cert) diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index 0db39edf2809..d488ae7846a0 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -3,7 +3,7 @@ use libp2p_core::multiaddr::Protocol; use libp2p_core::transport::MemoryTransport; use libp2p_core::upgrade::Version; use libp2p_core::Transport; -use libp2p_swarm::{dummy, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, Config, Swarm, SwarmEvent}; use std::time::Duration; #[tokio::test] @@ -65,7 +65,10 @@ fn make_swarm() -> Swarm { .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::without_executor(transport, dummy::Behaviour, identity.public().to_peer_id()) - .idle_connection_timeout(Duration::from_secs(5)) - .build() + Swarm::new( + transport, + dummy::Behaviour, + identity.public().to_peer_id(), + Config::with_tokio_executor().with_idle_connection_timeout(Duration::from_secs(60)), + ) } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index d40067ad7844..aad61d215473 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.39.0 +## 0.40.0 + + +## 0.39.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index 1bc78ff35a96..7b01e0e38d52 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-uds" edition = "2021" rust-version = { workspace = true } description = "Unix domain sockets transport for libp2p" -version = "0.39.0" +version = "0.40.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,12 +13,12 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.6.2", optional = true } libp2p-core = { workspace = true } -log = "0.4.20" -futures = "0.3.28" -tokio = { version = "1.32", default-features = false, features = ["net"], optional = true } +futures = "0.3.30" +tokio = { version = "1.36", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [dev-dependencies] -tempfile = "3.8" +tempfile = "3.10" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 3cd71552d187..075cbadb80ac 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -49,7 +49,6 @@ use libp2p_core::{ transport::{TransportError, TransportEvent}, Transport, }; -use log::debug; use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; @@ -104,7 +103,7 @@ macro_rules! codegen { stream::once({ let addr = addr.clone(); async move { - debug!("Now listening on {}", addr); + tracing::debug!(address=%addr, "Now listening on address"); Ok(TransportEvent::NewAddress { listener_id: id, listen_addr: addr, @@ -118,7 +117,7 @@ macro_rules! codegen { async move { let event = match listener.accept().await { Ok((stream, _)) => { - debug!("incoming connection on {}", addr); + tracing::debug!(address=%addr, "incoming connection on address"); TransportEvent::Incoming { upgrade: future::ok(stream), local_addr: addr.clone(), @@ -163,7 +162,7 @@ macro_rules! codegen { fn dial(&mut self, addr: Multiaddr) -> Result> { // TODO: Should we dial at all? if let Ok(path) = multiaddr_to_path(&addr) { - debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); Ok(async move { <$unix_stream>::connect(&path).await }.boxed()) } else { Err(TransportError::MultiaddrNotSupported(addr)) diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md deleted file mode 100644 index 2de4811133bd..000000000000 --- a/transports/wasm-ext/CHANGELOG.md +++ /dev/null @@ -1,118 +0,0 @@ -## 0.40.0 - -- Raise MSRV to 1.65. - See [PR 3715]. - -[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 - -## 0.39.0 - -- Update to `libp2p-core` `v0.39.0`. - -## 0.38.0 - -- Update to `libp2p-core` `v0.38.0`. - -- Update `rust-version` to reflect the actual MSRV: 1.60.0. See [PR 3090]. - -[PR 3090]: https://github.com/libp2p/rust-libp2p/pull/3090 - -## 0.37.0 - -- Update to `libp2p-core` `v0.37.0`. - -## 0.36.0 - -- Update to `libp2p-core` `v0.36.0`. - -## 0.35.0 - -- Update to `libp2p-core` `v0.35.0`. - -## 0.34.0 - -- Update to `libp2p-core` `v0.34.0`. -- Add `Transport::poll` and `Transport::remove_listener` and remove `Transport::Listener` - for `ExtTransport`. Drive the `Listen` streams within `ExtTransport`. See [PR 2652]. - -[PR 2652]: https://github.com/libp2p/rust-libp2p/pull/2652 - -## 0.33.0 - -- Update to `libp2p-core` `v0.33.0`. - -## 0.32.0 [2022-02-22] - -- Update to `libp2p-core` `v0.32.0`. - -## 0.31.0 [2022-01-27] - -- Update dependencies. - -- Migrate to Rust edition 2021 (see [PR 2339]). - -[PR 2339]: https://github.com/libp2p/rust-libp2p/pull/2339 - -## 0.30.0 [2021-11-01] - -- Make default features of `libp2p-core` optional. - [PR 2181](https://github.com/libp2p/rust-libp2p/pull/2181) - -- Update dependencies. - -## 0.29.0 [2021-07-12] - -- Update dependencies. - -## 0.28.2 [2021-04-27] - -- Support dialing `Multiaddr` with `/p2p` protocol [PR - 2058](https://github.com/libp2p/rust-libp2p/pull/2058). - -## 0.28.1 [2021-04-01] - -- Require at least js-sys v0.3.50 [PR - 2023](https://github.com/libp2p/rust-libp2p/pull/2023). - -## 0.28.0 [2021-03-17] - -- Update `libp2p-core`. - -## 0.27.0 [2021-01-12] - -- Update dependencies. - -## 0.26.0 [2020-12-17] - -- Update `libp2p-core`. - -## 0.25.0 [2020-11-25] - -- Update `libp2p-core`. - -## 0.24.0 [2020-11-09] - -- Fix the WebSocket implementation parsing `x-parity-ws` multiaddresses as `x-parity-wss`. -- Update dependencies. - -## 0.23.0 [2020-10-16] - -- Update `libp2p-core` dependency. - -## 0.22.0 [2020-09-09] - -- Update `libp2p-core` dependency. - -## 0.21.0 [2020-08-18] - -- Update `libp2p-core` dependency. - -## 0.20.1 [2020-07-06] - -- Improve the code quality of the `websockets.js` binding with the browser's `WebSocket` API. - -## 0.20.0 [2020-07-01] - -- Updated dependencies. -- Support `/dns` in the websocket implementation - ([PR 1626](https://github.com/libp2p/rust-libp2p/pull/1626)) diff --git a/transports/wasm-ext/Cargo.toml b/transports/wasm-ext/Cargo.toml deleted file mode 100644 index 6d5ed8e229c9..000000000000 --- a/transports/wasm-ext/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "libp2p-wasm-ext" -edition = "2021" -rust-version = { workspace = true } -description = "Allows passing in an external transport in a WASM environment" -version = "0.40.0" -authors = ["Pierre Krieger "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[dependencies] -futures = "0.3.28" -js-sys = "0.3.64" -libp2p-core = { workspace = true } -send_wrapper = "0.6.0" -wasm-bindgen = "0.2.87" -wasm-bindgen-futures = "0.4.37" - -[features] -websocket = [] - -# Passing arguments to the docsrs builder in order to properly document cfg's. -# More information: https://docs.rs/about/builds#cross-compiling -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] -rustc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true diff --git a/transports/wasm-ext/src/lib.rs b/transports/wasm-ext/src/lib.rs deleted file mode 100644 index 94259cb1ee69..000000000000 --- a/transports/wasm-ext/src/lib.rs +++ /dev/null @@ -1,655 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implementation of the libp2p `Transport` trait for external transports. -//! -//! This `Transport` is used in the context of WASM to allow delegating the transport mechanism -//! to the code that uses rust-libp2p, as opposed to inside of rust-libp2p itself. -//! -//! > **Note**: This only allows transports that produce a raw stream with the remote. You -//! > couldn't, for example, pass an implementation QUIC. -//! -//! # Usage -//! -//! Call `new()` with a JavaScript object that implements the interface described in the `ffi` -//! module. -//! - -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use futures::{future::Ready, prelude::*, ready, stream::SelectAll}; -use libp2p_core::{ - connection::Endpoint, - transport::{ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, -}; -use send_wrapper::SendWrapper; -use std::{collections::VecDeque, error, fmt, io, mem, pin::Pin, task::Context, task::Poll}; -use wasm_bindgen::{prelude::*, JsCast}; -use wasm_bindgen_futures::JsFuture; - -/// Contains the definition that one must match on the JavaScript side. -pub mod ffi { - use wasm_bindgen::prelude::*; - - #[wasm_bindgen] - extern "C" { - /// Type of the object that allows opening connections. - pub type Transport; - /// Type of the object that represents an open connection with a remote. - pub type Connection; - /// Type of the object that represents an event generated by listening. - pub type ListenEvent; - /// Type of the object that represents an event containing a new connection with a remote. - pub type ConnectionEvent; - - /// Start attempting to dial the given multiaddress. - /// - /// The returned `Promise` must yield a [`Connection`] on success. - /// - /// If the multiaddress is not supported, you should return an instance of `Error` whose - /// `name` property has been set to the string `"NotSupportedError"`. - #[wasm_bindgen(method, catch)] - pub fn dial( - this: &Transport, - multiaddr: &str, - _role_override: bool, - ) -> Result; - - /// Start listening on the given multiaddress. - /// - /// The returned `Iterator` must yield `Promise`s to [`ListenEvent`] events. - /// - /// If the multiaddress is not supported, you should return an instance of `Error` whose - /// `name` property has been set to the string `"NotSupportedError"`. - #[wasm_bindgen(method, catch)] - pub fn listen_on(this: &Transport, multiaddr: &str) -> Result; - - /// Returns an iterator of JavaScript `Promise`s that resolve to `ArrayBuffer` objects - /// (or resolve to null, see below). These `ArrayBuffer` objects contain the data that the - /// remote has sent to us. If the remote closes the connection, the iterator must produce - /// a `Promise` that resolves to `null`. - #[wasm_bindgen(method, getter)] - pub fn read(this: &Connection) -> js_sys::Iterator; - - /// Writes data to the connection. Returns a `Promise` that resolves when the connection is - /// ready for writing again. - /// - /// If the `Promise` produces an error, the writing side of the connection is considered - /// unrecoverable and the connection should be closed as soon as possible. - /// - /// Guaranteed to only be called after the previous write promise has resolved. - #[wasm_bindgen(method, catch)] - pub fn write(this: &Connection, data: &[u8]) -> Result; - - /// Shuts down the writing side of the connection. After this has been called, the `write` - /// method will no longer be called. - #[wasm_bindgen(method, catch)] - pub fn shutdown(this: &Connection) -> Result<(), JsValue>; - - /// Closes the connection. No other method will be called on this connection anymore. - #[wasm_bindgen(method)] - pub fn close(this: &Connection); - - /// List of addresses we have started listening on. Must be an array of strings of - /// multiaddrs. - #[wasm_bindgen(method, getter)] - pub fn new_addrs(this: &ListenEvent) -> Option>; - - /// List of addresses that have expired. Must be an array of strings of multiaddrs. - #[wasm_bindgen(method, getter)] - pub fn expired_addrs(this: &ListenEvent) -> Option>; - - /// List of [`ConnectionEvent`] object that has been received. - #[wasm_bindgen(method, getter)] - pub fn new_connections(this: &ListenEvent) -> Option>; - - /// Promise to the next event that the listener will generate. - #[wasm_bindgen(method, getter)] - pub fn next_event(this: &ListenEvent) -> JsValue; - - /// The [`Connection`] object for communication with the remote. - #[wasm_bindgen(method, getter)] - pub fn connection(this: &ConnectionEvent) -> Connection; - - /// The address we observe for the remote connection. - #[wasm_bindgen(method, getter)] - pub fn observed_addr(this: &ConnectionEvent) -> String; - - /// The address we are listening on, that received the remote connection. - #[wasm_bindgen(method, getter)] - pub fn local_addr(this: &ConnectionEvent) -> String; - } - - #[cfg(feature = "websocket")] - #[wasm_bindgen(module = "/src/websockets.js")] - extern "C" { - /// Returns a `Transport` implemented using websockets. - pub fn websocket_transport() -> Transport; - } -} - -/// Implementation of `Transport` whose implementation is handled by some FFI. -pub struct ExtTransport { - inner: SendWrapper, - listeners: SelectAll, -} - -impl ExtTransport { - /// Creates a new `ExtTransport` that uses the given external `Transport`. - pub fn new(transport: ffi::Transport) -> Self { - ExtTransport { - inner: SendWrapper::new(transport), - listeners: SelectAll::new(), - } - } - - fn do_dial( - &mut self, - addr: Multiaddr, - role_override: Endpoint, - ) -> Result<::Dial, TransportError<::Error>> { - let promise = self - .inner - .dial( - &addr.to_string(), - matches!(role_override, Endpoint::Listener), - ) - .map_err(|err| { - if is_not_supported_error(&err) { - TransportError::MultiaddrNotSupported(addr) - } else { - TransportError::Other(JsErr::from(err)) - } - })?; - - Ok(Dial { - inner: SendWrapper::new(promise.into()), - }) - } -} - -impl fmt::Debug for ExtTransport { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ExtTransport").finish() - } -} - -impl Transport for ExtTransport { - type Output = Connection; - type Error = JsErr; - type ListenerUpgrade = Ready>; - type Dial = Dial; - - fn listen_on( - &mut self, - listener_id: ListenerId, - addr: Multiaddr, - ) -> Result<(), TransportError> { - let iter = self.inner.listen_on(&addr.to_string()).map_err(|err| { - if is_not_supported_error(&err) { - TransportError::MultiaddrNotSupported(addr) - } else { - TransportError::Other(JsErr::from(err)) - } - })?; - let listen = Listen { - listener_id, - iterator: SendWrapper::new(iter), - next_event: None, - pending_events: VecDeque::new(), - is_closed: false, - }; - self.listeners.push(listen); - Ok(()) - } - - fn remove_listener(&mut self, id: ListenerId) -> bool { - match self.listeners.iter_mut().find(|l| l.listener_id == id) { - Some(listener) => { - listener.close(Ok(())); - true - } - None => false, - } - } - - fn dial(&mut self, addr: Multiaddr) -> Result> { - self.do_dial(addr, Endpoint::Dialer) - } - - fn dial_as_listener( - &mut self, - addr: Multiaddr, - ) -> Result> { - self.do_dial(addr, Endpoint::Listener) - } - - fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { - None - } - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - match ready!(self.listeners.poll_next_unpin(cx)) { - Some(event) => Poll::Ready(event), - None => Poll::Pending, - } - } -} - -/// Future that dial a remote through an external transport. -#[must_use = "futures do nothing unless polled"] -pub struct Dial { - /// A promise that will resolve to a `ffi::Connection` on success. - inner: SendWrapper, -} - -impl fmt::Debug for Dial { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Dial").finish() - } -} - -impl Future for Dial { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Future::poll(Pin::new(&mut *self.inner), cx) { - Poll::Ready(Ok(connec)) => Poll::Ready(Ok(Connection::new(connec.into()))), - Poll::Pending => Poll::Pending, - Poll::Ready(Err(err)) => Poll::Ready(Err(JsErr::from(err))), - } - } -} - -/// Stream that listens for incoming connections through an external transport. -#[must_use = "futures do nothing unless polled"] -pub struct Listen { - listener_id: ListenerId, - /// Iterator of `ListenEvent`s. - iterator: SendWrapper, - /// Promise that will yield the next `ListenEvent`. - next_event: Option>, - /// List of events that we are waiting to propagate. - pending_events: VecDeque<::Item>, - /// If the iterator is done close the listener. - is_closed: bool, -} - -impl Listen { - /// Report the listener as closed and terminate its stream. - fn close(&mut self, reason: Result<(), JsErr>) { - self.pending_events - .push_back(TransportEvent::ListenerClosed { - listener_id: self.listener_id, - reason, - }); - self.is_closed = true; - } -} - -impl fmt::Debug for Listen { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Listen").field(&self.listener_id).finish() - } -} - -impl Stream for Listen { - type Item = TransportEvent<::ListenerUpgrade, JsErr>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(Some(ev)); - } - - if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. - return Poll::Ready(None); - } - - // Try to fill `self.next_event` if necessary and possible. If we fail, then - // `Ready(None)` is returned below. - if self.next_event.is_none() { - if let Ok(ev) = self.iterator.next() { - if !ev.done() { - let promise: js_sys::Promise = ev.value().into(); - self.next_event = Some(SendWrapper::new(promise.into())); - } - } - } - - let event = if let Some(next_event) = self.next_event.as_mut() { - let e = match Future::poll(Pin::new(&mut **next_event), cx) { - Poll::Ready(Ok(ev)) => ffi::ListenEvent::from(ev), - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(err)) => { - self.close(Err(err.into())); - continue; - } - }; - self.next_event = None; - e - } else { - self.close(Ok(())); - continue; - }; - - let listener_id = self.listener_id; - - if let Some(addrs) = event.new_addrs() { - for addr in addrs.iter() { - match js_value_to_addr(addr) { - Ok(addr) => self.pending_events.push_back(TransportEvent::NewAddress { - listener_id, - listen_addr: addr, - }), - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err, - }), - }; - } - } - - if let Some(upgrades) = event.new_connections() { - for upgrade in upgrades.iter().cloned() { - let upgrade: ffi::ConnectionEvent = upgrade.into(); - match upgrade.local_addr().parse().and_then(|local| { - let observed = upgrade.observed_addr().parse()?; - Ok((local, observed)) - }) { - Ok((local_addr, send_back_addr)) => { - self.pending_events.push_back(TransportEvent::Incoming { - listener_id, - local_addr, - send_back_addr, - upgrade: futures::future::ok(Connection::new(upgrade.connection())), - }) - } - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err.into(), - }), - } - } - } - - if let Some(addrs) = event.expired_addrs() { - for addr in addrs.iter() { - match js_value_to_addr(addr) { - Ok(addr) => self - .pending_events - .push_back(TransportEvent::AddressExpired { - listener_id, - listen_addr: addr, - }), - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err, - }), - } - } - } - } - } -} - -/// Active stream of data with a remote. -/// -/// It is guaranteed that each call to `io::Write::write` on this object maps to exactly one call -/// to `write` on the FFI. In other words, no internal buffering happens for writes, and data can't -/// be split. -pub struct Connection { - /// The FFI object. - inner: SendWrapper, - - /// The iterator that was returned by `read()`. - read_iterator: SendWrapper, - - /// Reading part of the connection. - read_state: ConnectionReadState, - - /// When we write data using the FFI, a promise is returned containing the moment when the - /// underlying transport is ready to accept data again. This promise is stored here. - /// If this is `Some`, we must wait until the contained promise is resolved to write again. - previous_write_promise: Option>, -} - -impl Connection { - /// Initializes a `Connection` object from the FFI connection. - fn new(inner: ffi::Connection) -> Self { - let read_iterator = inner.read(); - - Connection { - inner: SendWrapper::new(inner), - read_iterator: SendWrapper::new(read_iterator), - read_state: ConnectionReadState::PendingData(Vec::new()), - previous_write_promise: None, - } - } -} - -/// Reading side of the connection. -enum ConnectionReadState { - /// Some data have been read and are waiting to be transferred. Can be empty. - PendingData(Vec), - /// Waiting for a `Promise` containing the next data. - Waiting(SendWrapper), - /// An error occurred or an earlier read yielded EOF. - Finished, -} - -impl fmt::Debug for Connection { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Connection").finish() - } -} - -impl AsyncRead for Connection { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - loop { - match mem::replace(&mut self.read_state, ConnectionReadState::Finished) { - ConnectionReadState::Finished => { - break Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } - - ConnectionReadState::PendingData(ref data) if data.is_empty() => { - let iter_next = self.read_iterator.next().map_err(JsErr::from)?; - if iter_next.done() { - self.read_state = ConnectionReadState::Finished; - } else { - let promise: js_sys::Promise = iter_next.value().into(); - let promise = SendWrapper::new(promise.into()); - self.read_state = ConnectionReadState::Waiting(promise); - } - continue; - } - - ConnectionReadState::PendingData(mut data) => { - debug_assert!(!data.is_empty()); - if buf.len() <= data.len() { - buf.copy_from_slice(&data[..buf.len()]); - self.read_state = - ConnectionReadState::PendingData(data.split_off(buf.len())); - break Poll::Ready(Ok(buf.len())); - } else { - let len = data.len(); - buf[..len].copy_from_slice(&data); - self.read_state = ConnectionReadState::PendingData(Vec::new()); - break Poll::Ready(Ok(len)); - } - } - - ConnectionReadState::Waiting(mut promise) => { - let data = match Future::poll(Pin::new(&mut *promise), cx) { - Poll::Ready(Ok(ref data)) if data.is_null() => break Poll::Ready(Ok(0)), - Poll::Ready(Ok(data)) => data, - Poll::Ready(Err(err)) => { - break Poll::Ready(Err(io::Error::from(JsErr::from(err)))) - } - Poll::Pending => { - self.read_state = ConnectionReadState::Waiting(promise); - break Poll::Pending; - } - }; - - // Try to directly copy the data into `buf` if it is large enough, otherwise - // transition to `PendingData` and loop again. - let data = js_sys::Uint8Array::new(&data); - let data_len = data.length() as usize; - if data_len <= buf.len() { - data.copy_to(&mut buf[..data_len]); - self.read_state = ConnectionReadState::PendingData(Vec::new()); - break Poll::Ready(Ok(data_len)); - } else { - let mut tmp_buf = vec![0; data_len]; - data.copy_to(&mut tmp_buf[..]); - self.read_state = ConnectionReadState::PendingData(tmp_buf); - continue; - } - } - } - } - } -} - -impl AsyncWrite for Connection { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - // Note: as explained in the doc-comments of `Connection`, each call to this function must - // map to exactly one call to `self.inner.write()`. - - if let Some(mut promise) = self.previous_write_promise.take() { - match Future::poll(Pin::new(&mut *promise), cx) { - Poll::Ready(Ok(_)) => (), - Poll::Ready(Err(err)) => { - return Poll::Ready(Err(io::Error::from(JsErr::from(err)))) - } - Poll::Pending => { - self.previous_write_promise = Some(promise); - return Poll::Pending; - } - } - } - - debug_assert!(self.previous_write_promise.is_none()); - self.previous_write_promise = Some(SendWrapper::new( - self.inner.write(buf).map_err(JsErr::from)?.into(), - )); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // There's no flushing mechanism. In the FFI we consider that writing implicitly flushes. - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // Shutting down is considered instantaneous. - match self.inner.shutdown() { - Ok(()) => Poll::Ready(Ok(())), - Err(err) => Poll::Ready(Err(io::Error::from(JsErr::from(err)))), - } - } -} - -impl Drop for Connection { - fn drop(&mut self) { - self.inner.close(); - } -} - -/// Returns true if `err` is an error about an address not being supported. -fn is_not_supported_error(err: &JsValue) -> bool { - if let Some(err) = err.dyn_ref::() { - err.name() == "NotSupportedError" - } else { - false - } -} - -/// Turns a `JsValue` containing a `String` into a `Multiaddr`, if possible. -fn js_value_to_addr(addr: &JsValue) -> Result { - if let Some(addr) = addr.as_string() { - Ok(addr.parse()?) - } else { - Err(JsValue::from_str("Element in new_addrs is not a string").into()) - } -} - -/// Error that can be generated by the `ExtTransport`. -pub struct JsErr(SendWrapper); - -impl From for JsErr { - fn from(val: JsValue) -> JsErr { - JsErr(SendWrapper::new(val)) - } -} - -impl From for JsErr { - fn from(err: libp2p_core::multiaddr::Error) -> JsErr { - JsValue::from_str(&err.to_string()).into() - } -} - -impl From for io::Error { - fn from(err: JsErr) -> io::Error { - io::Error::new(io::ErrorKind::Other, err.to_string()) - } -} - -impl fmt::Debug for JsErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{self}") - } -} - -impl fmt::Display for JsErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(s) = self.0.as_string() { - write!(f, "{s}") - } else if let Some(err) = self.0.dyn_ref::() { - write!(f, "{}", String::from(err.message())) - } else if let Some(obj) = self.0.dyn_ref::() { - write!(f, "{}", String::from(obj.to_string())) - } else { - write!(f, "{:?}", &*self.0) - } - } -} - -impl error::Error for JsErr {} diff --git a/transports/wasm-ext/src/websockets.js b/transports/wasm-ext/src/websockets.js deleted file mode 100644 index 1ef2faf6ded1..000000000000 --- a/transports/wasm-ext/src/websockets.js +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -export const websocket_transport = () => { - return { - dial: dial, - listen_on: (addr) => { - let err = new Error("Listening on WebSockets is not possible from within a browser"); - err.name = "NotSupportedError"; - throw err; - }, - }; -} - -/// Turns a string multiaddress into a WebSockets string URL. -const multiaddr_to_ws = (addr) => { - let parsed = addr.match(/^\/(ip4|ip6|dns4|dns6|dns)\/(.*?)\/tcp\/(.*?)\/(ws|wss|x-parity-ws\/(.*)|x-parity-wss\/(.*))(|\/p2p\/[a-zA-Z0-9]+)$/); - if (parsed != null) { - let proto = 'wss'; - if (parsed[4] == 'ws' || parsed[4].startsWith('x-parity-ws/')) { - proto = 'ws'; - } - let url = decodeURIComponent(parsed[5] || parsed[6] || ''); - if (parsed[1] == 'ip6') { - return proto + "://[" + parsed[2] + "]:" + parsed[3] + url; - } else { - return proto + "://" + parsed[2] + ":" + parsed[3] + url; - } - } - - let err = new Error("Address not supported: " + addr); - err.name = "NotSupportedError"; - throw err; -} - -// Attempt to dial a multiaddress. -const dial = (addr) => { - let ws = new WebSocket(multiaddr_to_ws(addr)); - ws.binaryType = "arraybuffer"; - let reader = read_queue(); - - return new Promise((open_resolve, open_reject) => { - ws.onerror = (ev) => { - // If `open_resolve` has been called earlier, calling `open_reject` seems to be - // silently ignored. It is easier to unconditionally call `open_reject` rather than - // check in which state the connection is, which would be error-prone. - open_reject(ev); - // Injecting an EOF is how we report to the reading side that the connection has been - // closed. Injecting multiple EOFs is harmless. - reader.inject_eof(); - }; - ws.onclose = (ev) => { - // Same remarks as above. - open_reject(ev); - reader.inject_eof(); - }; - - // We inject all incoming messages into the queue unconditionally. The caller isn't - // supposed to access this queue unless the connection is open. - ws.onmessage = (ev) => reader.inject_array_buffer(ev.data); - - ws.onopen = () => open_resolve({ - read: (function*() { while(ws.readyState == 1) { yield reader.next(); } })(), - write: (data) => { - if (ws.readyState == 1) { - // The passed in `data` is an `ArrayBufferView` [0]. If the - // underlying typed array is a `SharedArrayBuffer` (when - // using WASM threads, so multiple web workers sharing - // memory) the WebSocket's `send` method errors [1][2][3]. - // This limitation will probably be lifted in the future, - // but for now we have to make a copy here .. - // - // [0]: https://developer.mozilla.org/en-US/docs/Web/API/ArrayBufferView - // [1]: https://chromium.googlesource.com/chromium/src/+/1438f63f369fed3766fa5031e7a252c986c69be6%5E%21/ - // [2]: https://bugreports.qt.io/browse/QTBUG-78078 - // [3]: https://chromium.googlesource.com/chromium/src/+/HEAD/third_party/blink/renderer/bindings/IDLExtendedAttributes.md#AllowShared_p - ws.send(data.slice(0)); - return promise_when_send_finished(ws); - } else { - return Promise.reject("WebSocket is closed"); - } - }, - shutdown: () => ws.close(), - close: () => {} - }); - }); -} - -// Takes a WebSocket object and returns a Promise that resolves when bufferedAmount is low enough -// to allow more data to be sent. -const promise_when_send_finished = (ws) => { - return new Promise((resolve, reject) => { - function check() { - if (ws.readyState != 1) { - reject("WebSocket is closed"); - return; - } - - // We put an arbitrary threshold of 8 kiB of buffered data. - if (ws.bufferedAmount < 8 * 1024) { - resolve(); - } else { - setTimeout(check, 100); - } - } - - check(); - }) -} - -// Creates a queue reading system. -const read_queue = () => { - // State of the queue. - let state = { - // Array of promises resolving to `ArrayBuffer`s, that haven't been transmitted back with - // `next` yet. - queue: new Array(), - // If `resolve` isn't null, it is a "resolve" function of a promise that has already been - // returned by `next`. It should be called with some data. - resolve: null, - }; - - return { - // Inserts a new Blob in the queue. - inject_array_buffer: (buffer) => { - if (state.resolve != null) { - state.resolve(buffer); - state.resolve = null; - } else { - state.queue.push(Promise.resolve(buffer)); - } - }, - - // Inserts an EOF message in the queue. - inject_eof: () => { - if (state.resolve != null) { - state.resolve(null); - state.resolve = null; - } else { - state.queue.push(Promise.resolve(null)); - } - }, - - // Returns a Promise that yields the next entry as an ArrayBuffer. - next: () => { - if (state.queue.length != 0) { - return state.queue.shift(0); - } else { - if (state.resolve !== null) - throw "Internal error: already have a pending promise"; - return new Promise((resolve, reject) => { - state.resolve = resolve; - }); - } - } - }; -}; diff --git a/transports/webrtc-websys/CHANGELOG.md b/transports/webrtc-websys/CHANGELOG.md index 7c40c08f1f61..634120c53c3d 100644 --- a/transports/webrtc-websys/CHANGELOG.md +++ b/transports/webrtc-websys/CHANGELOG.md @@ -1,3 +1,15 @@ +## 0.3.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). +- Remove `libp2p_noise` from the public API. + See [PR 4969](https://github.com/libp2p/rust-libp2p/pull/4969). + +## 0.2.0-alpha + +- Rename `Error::JsError` to `Error::Js`. + See [PR 4653](https://github.com/libp2p/rust-libp2p/pull/4653) + ## 0.1.0-alpha - Initial alpha release. diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index cb90573b1fe4..781afc944752 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -8,32 +8,24 @@ license = "MIT" name = "libp2p-webrtc-websys" repository = "https://github.com/libp2p/rust-libp2p" rust-version = { workspace = true } -version = "0.1.0-alpha" +version = "0.3.0-alpha" publish = true [dependencies] bytes = "1" futures = "0.3" -futures-timer = "3" -getrandom = { version = "0.2.9", features = ["js"] } +getrandom = { version = "0.2.12", features = ["js"] } hex = "0.4.3" js-sys = { version = "0.3" } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4.19" send_wrapper = { version = "0.6.0", features = ["futures"] } -serde = { version = "1.0", features = ["derive"] } thiserror = "1" -wasm-bindgen = { version = "0.2.87" } -wasm-bindgen-futures = { version = "0.4.37" } -web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } - -[dev-dependencies] -hex-literal = "0.4" -libp2p-ping = { workspace = true } -libp2p-swarm = { workspace = true, features = ["wasm-bindgen"] } +tracing = "0.1.37" +wasm-bindgen = { version = "0.2.90" } +wasm-bindgen-futures = { version = "0.4.41" } +web-sys = { version = "0.3.67", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } [lints] workspace = true diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index dfdebbc98c04..b858237da635 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -47,16 +47,16 @@ impl Connection { let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { - log::trace!("New data channel"); + tracing::trace!("New data channel"); if let Err(e) = tx_ondatachannel.try_send(ev.channel()) { if e.is_full() { - log::warn!("Remote is opening too many data channels, we can't keep up!"); + tracing::warn!("Remote is opening too many data channels, we can't keep up!"); return; } if e.is_disconnected() { - log::warn!("Receiver is gone, are we shutting down?"); + tracing::warn!("Receiver is gone, are we shutting down?"); } } }); @@ -90,7 +90,7 @@ impl Connection { /// if they are used. fn close_connection(&mut self) { if !self.closed { - log::trace!("connection::close_connection"); + tracing::trace!("connection::close_connection"); self.inner.inner.close(); self.closed = true; } @@ -121,7 +121,7 @@ impl StreamMuxer for Connection { } None => { // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. - log::debug!("`Sender` for inbound data channels has been dropped"); + tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } } @@ -131,7 +131,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _: &mut Context<'_>, ) -> Poll> { - log::trace!("Creating outbound data channel"); + tracing::trace!("Creating outbound data channel"); let data_channel = self.inner.new_regular_data_channel(); let stream = self.new_stream_from_data_channel(data_channel); @@ -144,7 +144,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll> { - log::trace!("connection::poll_close"); + tracing::trace!("connection::poll_close"); self.close_connection(); Poll::Ready(Ok(())) @@ -158,7 +158,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); @@ -252,11 +252,11 @@ impl RtcPeerConnection { let sdp = &self .inner .local_description() - .ok_or_else(|| Error::JsError("No local description".to_string()))? + .ok_or_else(|| Error::Js("No local description".to_string()))? .sdp(); - let fingerprint = parse_fingerprint(sdp) - .ok_or_else(|| Error::JsError("No fingerprint in SDP".to_string()))?; + let fingerprint = + parse_fingerprint(sdp).ok_or_else(|| Error::Js("No fingerprint in SDP".to_string()))?; Ok(fingerprint) } @@ -297,11 +297,10 @@ mod sdp_tests { #[test] fn test_fingerprint() { - let sdp: &str = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; - let fingerprint = match parse_fingerprint(sdp) { - Some(fingerprint) => fingerprint, - None => panic!("No fingerprint found"), - }; + let sdp = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; + + let fingerprint = parse_fingerprint(sdp).unwrap(); + assert_eq!(fingerprint.algorithm(), "sha-256"); assert_eq!(fingerprint.to_sdp_format(), "A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89"); } diff --git a/transports/webrtc-websys/src/error.rs b/transports/webrtc-websys/src/error.rs index e226dea80690..a2df1a182ea4 100644 --- a/transports/webrtc-websys/src/error.rs +++ b/transports/webrtc-websys/src/error.rs @@ -8,7 +8,7 @@ pub enum Error { InvalidMultiaddr(&'static str), #[error("JavaScript error: {0}")] - JsError(String), + Js(String), #[error("JavaScript typecasting failed")] JsCastFailed, @@ -20,9 +20,14 @@ pub enum Error { Connection(String), #[error("Authentication error")] - Authentication(#[from] libp2p_noise::Error), + Authentication(#[from] AuthenticationError), } +/// New-type wrapper to hide `libp2p_noise` from the public API. +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +pub struct AuthenticationError(pub(crate) libp2p_webrtc_utils::noise::Error); + impl Error { pub(crate) fn from_js_value(value: JsValue) -> Self { let s = if value.is_instance_of::() { @@ -34,11 +39,11 @@ impl Error { "Unknown error".to_string() }; - Error::JsError(s) + Error::Js(s) } } -impl std::convert::From for Error { +impl From for Error { fn from(value: JsValue) -> Self { Error::from_js_value(value) } @@ -46,12 +51,12 @@ impl std::convert::From for Error { impl From for Error { fn from(value: String) -> Self { - Error::JsError(value) + Error::Js(value) } } impl From for Error { fn from(value: std::io::Error) -> Self { - Error::JsError(value.to_string()) + Error::Js(value.to_string()) } } diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 6f50262b988a..439182ea4dbe 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -46,7 +46,7 @@ pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionI // remove any double \r\n let munged_sdp_offer = munged_sdp_offer.replace("\r\n\r\n", "\r\n"); - log::trace!("Created SDP offer: {munged_sdp_offer}"); + tracing::trace!(offer=%munged_sdp_offer, "Created SDP offer"); let mut offer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Offer); offer_obj.sdp(&munged_sdp_offer); diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 9c9b19cdb324..0ee4f7920c9a 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -53,7 +53,7 @@ impl PollDataChannel { let open_waker = open_waker.clone(); move |_: RtcDataChannelEvent| { - log::trace!("DataChannel opened"); + tracing::trace!("DataChannel opened"); open_waker.wake(); } }); @@ -65,7 +65,7 @@ impl PollDataChannel { let write_waker = write_waker.clone(); move |_: Event| { - log::trace!("DataChannel available for writing (again)"); + tracing::trace!("DataChannel available for writing (again)"); write_waker.wake(); } }); @@ -76,7 +76,7 @@ impl PollDataChannel { let close_waker = close_waker.clone(); move |_: Event| { - log::trace!("DataChannel closed"); + tracing::trace!("DataChannel closed"); close_waker.wake(); } }); @@ -98,7 +98,7 @@ impl PollDataChannel { if read_buffer.len() + data.length() as usize > MAX_MSG_LEN { overloaded.store(true, Ordering::SeqCst); - log::warn!("Remote is overloading us with messages, resetting stream",); + tracing::warn!("Remote is overloading us with messages, resetting stream",); return; } diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index 092baed50c43..d42f2e3ae18f 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -1,5 +1,6 @@ use super::Error; use crate::connection::RtcPeerConnection; +use crate::error::AuthenticationError; use crate::sdp; use crate::Connection; use libp2p_identity::{Keypair, PeerId}; @@ -45,12 +46,14 @@ async fn outbound_inner( let local_fingerprint = rtc_peer_connection.local_fingerprint()?; - log::trace!("local_fingerprint: {:?}", local_fingerprint); - log::trace!("remote_fingerprint: {:?}", remote_fingerprint); + tracing::trace!(?local_fingerprint); + tracing::trace!(?remote_fingerprint); - let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint).await?; + let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint) + .await + .map_err(AuthenticationError)?; - log::debug!("Remote peer identified as {peer_id}"); + tracing::debug!(peer=%peer_id, "Remote peer identified"); Ok((peer_id, Connection::new(rtc_peer_connection))) } diff --git a/transports/webrtc/CHANGELOG.md b/transports/webrtc/CHANGELOG.md index 710c2e315d98..930526d58d5f 100644 --- a/transports/webrtc/CHANGELOG.md +++ b/transports/webrtc/CHANGELOG.md @@ -1,3 +1,13 @@ +## 0.7.1-alpha + +- Bump `libp2p-webrtc-utils` dependency to `0.2.0`. + See [PR 5118](https://github.com/libp2p/rust-libp2p/pull/5118). + +## 0.7.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). + ## 0.6.1-alpha - Move common dependencies to `libp2p-webrtc-utils` crate. diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 1c5d23b46528..1bd1bd57a618 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-webrtc" -version = "0.6.1-alpha" +version = "0.7.1-alpha" authors = ["Parity Technologies "] description = "WebRTC transport for libp2p" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,21 +16,21 @@ bytes = "1" futures = "0.3" futures-timer = "3" hex = "0.4" -if-watch = "3.0" +if-watch = "3.2" libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4" multihash = { workspace = true } rand = "0.8" -rcgen = "0.11.1" +rcgen = "0.11.3" serde = { version = "1.0", features = ["derive"] } stun = "0.5" thiserror = "1" tinytemplate = "1.2" -tokio = { version = "1.32", features = ["net"], optional = true } +tokio = { version = "1.36", features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } +tracing = "0.1.37" webrtc = { version = "0.9.0", optional = true } [features] @@ -38,9 +38,11 @@ tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] pem = ["webrtc?/pem"] [dev-dependencies] -env_logger = "0.10" -tokio = { version = "1.32", features = ["full"] } +libp2p-identity = { workspace = true, features = ["rand"] } +tokio = { version = "1.36", features = ["full"] } quickcheck = "1.0.3" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + [[test]] name = "smoke" diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 29983d720b5c..3bcc4c3193ef 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -101,7 +101,7 @@ impl Connection { tx: Arc>>>, ) { rtc_conn.on_data_channel(Box::new(move |data_channel: Arc| { - log::debug!("Incoming data channel {}", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Incoming data channel"); let tx = tx.clone(); @@ -109,7 +109,7 @@ impl Connection { data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -118,7 +118,7 @@ impl Connection { Ok(detached) => { let mut tx = tx.lock().await; if let Err(e) = tx.try_send(detached.clone()) { - log::error!("Can't send data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {}", e); // We're not accepting data channels fast enough => // close this channel. // @@ -126,16 +126,16 @@ impl Connection { // during the negotiation process, but it's not // possible with the current API. if let Err(e) = detached.close().await { - log::error!( - "Failed to close data channel {}: {}", - id, + tracing::error!( + channel=%id, + "Failed to close data channel: {}", e ); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) @@ -156,7 +156,7 @@ impl StreamMuxer for Connection { ) -> Poll> { match ready!(self.incoming_data_channels_rx.poll_next_unpin(cx)) { Some(detached) => { - log::trace!("Incoming stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Incoming stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -185,7 +185,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); @@ -208,7 +208,7 @@ impl StreamMuxer for Connection { // No need to hold the lock during the DTLS handshake. drop(peer_conn); - log::trace!("Opening data channel {}", data_channel.id()); + tracing::trace!(channel=%data_channel.id(), "Opening data channel"); let (tx, rx) = oneshot::channel::>(); @@ -226,7 +226,7 @@ impl StreamMuxer for Connection { Ok(detached) => { self.outbound_fut = None; - log::trace!("Outbound stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Outbound stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -244,7 +244,7 @@ impl StreamMuxer for Connection { } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - log::debug!("Closing connection"); + tracing::debug!("Closing connection"); let peer_conn = self.peer_conn.clone(); let fut = self.close_fut.get_or_insert(Box::pin(async move { @@ -275,7 +275,7 @@ pub(crate) async fn register_data_channel_open_handler( data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -283,14 +283,14 @@ pub(crate) async fn register_data_channel_open_handler( match data_channel.detach().await { Ok(detached) => { if let Err(e) = data_channel_tx.send(detached.clone()) { - log::error!("Can't send data channel {}: {:?}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {:?}", e); if let Err(e) = detached.close().await { - log::error!("Failed to close data channel {}: {}", id, e); + tracing::error!(channel=%id, "Failed to close data channel: {}", e); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index e49345a01b2e..8549a864dcc1 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -49,7 +49,7 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti client_ufrag, ); - log::trace!("Created SDP offer: {offer}"); + tracing::trace!(offer=%offer, "Created SDP offer"); RTCSessionDescription::offer(offer).unwrap() } diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 4b3f15d5978c..02cfa6f7296f 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -238,7 +238,7 @@ impl ListenStream { /// terminate the stream. fn close(&mut self, reason: Result<(), Error>) { match self.report_closed { - Some(_) => log::debug!("Listener was already closed."), + Some(_) => tracing::debug!("Listener was already closed"), None => { // Report the listener event as closed. let _ = self @@ -257,9 +257,8 @@ impl ListenStream { } fn poll_if_watcher(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - let if_watcher = match self.if_watcher.as_mut() { - Some(w) => w, - None => return Poll::Pending, + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; while let Poll::Ready(event) = if_watcher.poll_if_event(cx) { @@ -412,12 +411,11 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { _ => return None, }; - let port = iter.next()?; - let webrtc = iter.next()?; - - let port = match (port, webrtc) { - (Protocol::Udp(port), Protocol::WebRTCDirect) => port, - _ => return None, + let Protocol::Udp(port) = iter.next()? else { + return None; + }; + let Protocol::WebRTCDirect = iter.next()? else { + return None; }; if iter.next().is_some() { diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index f978121d01cd..7a8d960826d8 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -175,7 +175,7 @@ impl UDPMuxNewAddr { None } Err(e) => { - log::debug!("{} (addr={})", e, addr); + tracing::debug!(address=%addr, "{}", e); None } } @@ -337,12 +337,12 @@ impl UDPMuxNewAddr { let conn = match conn { // If we couldn't find the connection based on source address, see if - // this is a STUN mesage and if so if we can find the connection based on ufrag. + // this is a STUN message and if so if we can find the connection based on ufrag. None if is_stun_message(read.filled()) => { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), Some(Err(e)) => { - log::debug!("addr={}: Error when querying existing connections: {}", &addr, e); + tracing::debug!(address=%&addr, "Error when querying existing connections: {}", e); continue; } None => None, @@ -357,20 +357,20 @@ impl UDPMuxNewAddr { if !self.new_addrs.contains(&addr) { match ufrag_from_stun_message(read.filled(), false) { Ok(ufrag) => { - log::trace!( - "Notifying about new address addr={} from ufrag={}", - &addr, - ufrag - ); + tracing::trace!( + address=%&addr, + %ufrag, + "Notifying about new address from ufrag", + ); self.new_addrs.insert(addr); return Poll::Ready(UDPMuxEvent::NewAddr( NewAddr { addr, ufrag }, )); } Err(e) => { - log::debug!( - "Unknown address addr={} (non STUN packet: {})", - &addr, + tracing::debug!( + address=%&addr, + "Unknown address (non STUN packet: {})", e ); } @@ -384,10 +384,10 @@ impl UDPMuxNewAddr { async move { if let Err(err) = conn.write_packet(&packet, addr).await { - log::error!( - "Failed to write packet: {} (addr={})", + tracing::error!( + address=%addr, + "Failed to write packet: {}", err, - addr ); } } @@ -401,10 +401,10 @@ impl UDPMuxNewAddr { Poll::Pending => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::TimedOut => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::ConnectionReset => { - log::debug!("ConnectionReset by remote client {err:?}") + tracing::debug!("ConnectionReset by remote client {err:?}") } Poll::Ready(Err(err)) => { - log::error!("Could not read udp packet: {}", err); + tracing::error!("Could not read udp packet: {}", err); return Poll::Ready(UDPMuxEvent::Error(err)); } } @@ -470,7 +470,7 @@ impl UDPMux for UdpMuxHandle { async fn remove_conn_by_ufrag(&self, ufrag: &str) { if let Err(e) = self.remove_sender.send(ufrag.to_owned()).await { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); } } } @@ -511,12 +511,12 @@ impl UDPMuxWriter for UdpMuxWriterHandle { { Ok(()) => {} Err(e) => { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); return; } } - log::debug!("Registered {} for {}", addr, conn.key()); + tracing::debug!(address=%addr, connection=%conn.key(), "Registered address for connection"); } async fn send_to(&self, buf: &[u8], target: &SocketAddr) -> Result { diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 414fc2721d05..4145a5e75106 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -49,19 +49,16 @@ pub(crate) async fn outbound( server_fingerprint: Fingerprint, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new outbound connection to {addr})"); + tracing::debug!(address=%addr, "new outbound connection to address"); let (peer_connection, ufrag) = new_outbound_connection(addr, config, udp_mux).await?; let offer = peer_connection.create_offer(None).await?; - log::debug!("created SDP offer for outbound connection: {:?}", offer.sdp); + tracing::debug!(offer=%offer.sdp, "created SDP offer for outbound connection"); peer_connection.set_local_description(offer).await?; let answer = sdp::answer(addr, server_fingerprint, &ufrag); - log::debug!( - "calculated SDP answer for outbound connection: {:?}", - answer - ); + tracing::debug!(?answer, "calculated SDP answer for outbound connection"); peer_connection.set_remote_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; @@ -85,16 +82,16 @@ pub(crate) async fn inbound( remote_ufrag: String, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new inbound connection from {addr} (ufrag: {remote_ufrag})"); + tracing::debug!(address=%addr, ufrag=%remote_ufrag, "new inbound connection from address"); let peer_connection = new_inbound_connection(addr, config, udp_mux, &remote_ufrag).await?; let offer = sdp::offer(addr, &remote_ufrag); - log::debug!("calculated SDP offer for inbound connection: {:?}", offer); + tracing::debug!(?offer, "calculated SDP offer for inbound connection"); peer_connection.set_remote_description(offer).await?; let answer = peer_connection.create_answer(None).await?; - log::debug!("created SDP answer for inbound connection: {:?}", answer); + tracing::debug!(?answer, "created SDP answer for inbound connection"); peer_connection.set_local_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index 8e56b99723dd..76e168edfd6f 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -33,10 +33,13 @@ use std::num::NonZeroU8; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_transport(); let (b_peer_id, mut b_transport) = create_transport(); @@ -53,7 +56,9 @@ async fn smoke() { // Note: This test should likely be ported to the muxer compliance test suite. #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -102,7 +107,11 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -169,15 +178,13 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T async fn answer_inbound_streams(mut connection: StreamMuxerBox) { loop { - let mut inbound_stream = match future::poll_fn(|cx| { + let Ok(mut inbound_stream) = future::poll_fn(|cx| { let _ = connection.poll_unpin(cx)?; - connection.poll_inbound_unpin(cx) }) .await - { - Ok(s) => s, - Err(_) => return, + else { + return; }; tokio::spawn(async move { @@ -244,7 +251,7 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!(stream_count=%number_streams, "Created streams"); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await @@ -344,7 +351,7 @@ impl Future for ListenUpgrade<'_> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { - match dbg!(self.listener.poll_next_unpin(cx)) { + match self.listener.poll_next_unpin(cx) { Poll::Ready(Some(TransportEvent::Incoming { upgrade, send_back_addr, diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md new file mode 100644 index 000000000000..3cfb1b2fbf97 --- /dev/null +++ b/transports/websocket-websys/CHANGELOG.md @@ -0,0 +1,16 @@ +## 0.3.1 + +- Add support for different WASM environments by introducing a `WebContext` that + detects and abstracts the `Window` vs the `WorkerGlobalScope` API. + See [PR 4889](https://github.com/libp2p/rust-libp2p/pull/4889). + +## 0.3.0 + + +## 0.2.0 + +- Add Websys Websocket transport. + +## 0.1.0 + +- Crate claimed. diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml new file mode 100644 index 000000000000..38ef70a9d0b1 --- /dev/null +++ b/transports/websocket-websys/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "libp2p-websocket-websys" +edition = "2021" +rust-version = "1.60.0" +description = "WebSocket for libp2p under WASM environment" +version = "0.3.1" +authors = ["Vince Vasta "] +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +bytes = "1.4.0" +futures = "0.3.30" +js-sys = "0.3.67" +libp2p-core = { workspace = true } +tracing = "0.1.37" +parking_lot = "0.12.1" +send_wrapper = "0.6.0" +thiserror = "1.0.57" +wasm-bindgen = "0.2.90" +web-sys = { version = "0.3.67", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window", "WorkerGlobalScope"] } + +# Passing arguments to the docsrs builder in order to properly document cfg's. +# More information: https://docs.rs/about/builds#cross-compiling +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +rustc-args = ["--cfg", "docsrs"] + +[dev-dependencies] +libp2p-yamux = { workspace = true } +libp2p-noise = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs new file mode 100644 index 000000000000..5c1a6ebf1c46 --- /dev/null +++ b/transports/websocket-websys/src/lib.rs @@ -0,0 +1,450 @@ +// Copyright (C) 2023 Vince Vasta +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +//! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). + +mod web_context; + +use bytes::BytesMut; +use futures::task::AtomicWaker; +use futures::{future::Ready, io, prelude::*}; +use js_sys::Array; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{ListenerId, TransportError, TransportEvent}, +}; +use send_wrapper::SendWrapper; +use std::cmp::min; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; +use std::{pin::Pin, task::Context, task::Poll}; +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; + +use crate::web_context::WebContext; + +/// A Websocket transport that can be used in a wasm environment. +/// +/// ## Example +/// +/// To create an authenticated transport instance with Noise protocol and Yamux: +/// +/// ``` +/// # use libp2p_core::{upgrade::Version, Transport}; +/// # use libp2p_identity::Keypair; +/// # use libp2p_yamux as yamux; +/// # use libp2p_noise as noise; +/// let local_key = Keypair::generate_ed25519(); +/// let transport = libp2p_websocket_websys::Transport::default() +/// .upgrade(Version::V1) +/// .authenticate(noise::Config::new(&local_key).unwrap()) +/// .multiplex(yamux::Config::default()) +/// .boxed(); +/// ``` +/// +#[derive(Default)] +pub struct Transport { + _private: (), +} + +/// Arbitrary, maximum amount we are willing to buffer before we throttle our user. +const MAX_BUFFER: usize = 1024 * 1024; + +impl libp2p_core::Transport for Transport { + type Output = Connection; + type Error = Error; + type ListenerUpgrade = Ready>; + type Dial = Pin> + Send>>; + + fn listen_on( + &mut self, + _: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn remove_listener(&mut self, _id: ListenerId) -> bool { + false + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + let url = extract_websocket_url(&addr) + .ok_or_else(|| TransportError::MultiaddrNotSupported(addr))?; + + Ok(async move { + let socket = match WebSocket::new(&url) { + Ok(ws) => ws, + Err(_) => return Err(Error::invalid_websocket_url(&url)), + }; + + Ok(Connection::new(socket)) + } + .boxed()) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> std::task::Poll> { + Poll::Pending + } + + fn address_translation(&self, _listen: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } +} + +// Try to convert Multiaddr to a Websocket url. +fn extract_websocket_url(addr: &Multiaddr) -> Option { + let mut protocols = addr.iter(); + let host_port = match (protocols.next(), protocols.next()) { + (Some(Protocol::Ip4(ip)), Some(Protocol::Tcp(port))) => { + format!("{ip}:{port}") + } + (Some(Protocol::Ip6(ip)), Some(Protocol::Tcp(port))) => { + format!("[{ip}]:{port}") + } + (Some(Protocol::Dns(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dnsaddr(h)), Some(Protocol::Tcp(port))) => { + format!("{}:{}", &h, port) + } + _ => return None, + }; + + let (scheme, wspath) = match protocols.next() { + Some(Protocol::Ws(path)) => ("ws", path.into_owned()), + Some(Protocol::Wss(path)) => ("wss", path.into_owned()), + _ => return None, + }; + + Some(format!("{scheme}://{host_port}{wspath}")) +} + +#[derive(thiserror::Error, Debug)] +#[error("{msg}")] +pub struct Error { + msg: String, +} + +impl Error { + fn invalid_websocket_url(url: &str) -> Self { + Self { + msg: format!("Invalid websocket url: {url}"), + } + } +} + +/// A Websocket connection created by the [`Transport`]. +pub struct Connection { + inner: SendWrapper, +} + +struct Inner { + socket: WebSocket, + + new_data_waker: Rc, + read_buffer: Rc>, + + /// Waker for when we are waiting for the WebSocket to be opened. + open_waker: Rc, + + /// Waker for when we are waiting to write (again) to the WebSocket because we previously exceeded the [`MAX_BUFFER`] threshold. + write_waker: Rc, + + /// Waker for when we are waiting for the WebSocket to be closed. + close_waker: Rc, + + /// Whether the connection errored. + errored: Rc, + + // Store the closures for proper garbage collection. + // These are wrapped in an [`Rc`] so we can implement [`Clone`]. + _on_open_closure: Rc>, + _on_buffered_amount_low_closure: Rc>, + _on_close_closure: Rc>, + _on_error_closure: Rc>, + _on_message_closure: Rc>, + buffered_amount_low_interval: i32, +} + +impl Inner { + fn ready_state(&self) -> ReadyState { + match self.socket.ready_state() { + 0 => ReadyState::Connecting, + 1 => ReadyState::Open, + 2 => ReadyState::Closing, + 3 => ReadyState::Closed, + unknown => unreachable!("invalid `ReadyState` value: {unknown}"), + } + } + + fn poll_open(&mut self, cx: &Context<'_>) -> Poll> { + match self.ready_state() { + ReadyState::Connecting => { + self.open_waker.register(cx.waker()); + Poll::Pending + } + ReadyState::Open => Poll::Ready(Ok(())), + ReadyState::Closed | ReadyState::Closing => { + Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + } + } + + fn error_barrier(&self) -> io::Result<()> { + if self.errored.load(Ordering::SeqCst) { + return Err(io::ErrorKind::BrokenPipe.into()); + } + + Ok(()) + } +} + +/// The state of the WebSocket. +/// +/// See . +#[derive(PartialEq)] +enum ReadyState { + Connecting, + Open, + Closing, + Closed, +} + +impl Connection { + fn new(socket: WebSocket) -> Self { + socket.set_binary_type(web_sys::BinaryType::Arraybuffer); + + let open_waker = Rc::new(AtomicWaker::new()); + let onopen_closure = Closure::::new({ + let open_waker = open_waker.clone(); + move |_| { + open_waker.wake(); + } + }); + socket.set_onopen(Some(onopen_closure.as_ref().unchecked_ref())); + + let close_waker = Rc::new(AtomicWaker::new()); + let onclose_closure = Closure::::new({ + let close_waker = close_waker.clone(); + move |_| { + close_waker.wake(); + } + }); + socket.set_onclose(Some(onclose_closure.as_ref().unchecked_ref())); + + let errored = Rc::new(AtomicBool::new(false)); + let onerror_closure = Closure::::new({ + let errored = errored.clone(); + move |_| { + errored.store(true, Ordering::SeqCst); + } + }); + socket.set_onerror(Some(onerror_closure.as_ref().unchecked_ref())); + + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); + let new_data_waker = Rc::new(AtomicWaker::new()); + let onmessage_closure = Closure::::new({ + let read_buffer = read_buffer.clone(); + let new_data_waker = new_data_waker.clone(); + let errored = errored.clone(); + move |e: MessageEvent| { + let data = js_sys::Uint8Array::new(&e.data()); + + let mut read_buffer = read_buffer.lock().unwrap(); + + if read_buffer.len() + data.length() as usize > MAX_BUFFER { + tracing::warn!("Remote is overloading us with messages, closing connection"); + errored.store(true, Ordering::SeqCst); + + return; + } + + read_buffer.extend_from_slice(&data.to_vec()); + new_data_waker.wake(); + } + }); + socket.set_onmessage(Some(onmessage_closure.as_ref().unchecked_ref())); + + let write_waker = Rc::new(AtomicWaker::new()); + let on_buffered_amount_low_closure = Closure::::new({ + let write_waker = write_waker.clone(); + let socket = socket.clone(); + move |_| { + if socket.buffered_amount() == 0 { + write_waker.wake(); + } + } + }); + let buffered_amount_low_interval = WebContext::new() + .expect("to have a window or worker context") + .set_interval_with_callback_and_timeout_and_arguments( + on_buffered_amount_low_closure.as_ref().unchecked_ref(), + 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. + &Array::new(), + ) + .expect("to be able to set an interval"); + + Self { + inner: SendWrapper::new(Inner { + socket, + new_data_waker, + read_buffer, + open_waker, + write_waker, + close_waker, + errored, + _on_open_closure: Rc::new(onopen_closure), + _on_buffered_amount_low_closure: Rc::new(on_buffered_amount_low_closure), + _on_close_closure: Rc::new(onclose_closure), + _on_error_closure: Rc::new(onerror_closure), + _on_message_closure: Rc::new(onmessage_closure), + buffered_amount_low_interval, + }), + } + } + + fn buffered_amount(&self) -> usize { + self.inner.socket.buffered_amount() as usize + } +} + +impl AsyncRead for Connection { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.get_mut(); + this.inner.error_barrier()?; + futures::ready!(this.inner.poll_open(cx))?; + + let mut read_buffer = this.inner.read_buffer.lock().unwrap(); + + if read_buffer.is_empty() { + this.inner.new_data_waker.register(cx.waker()); + return Poll::Pending; + } + + // Ensure that we: + // - at most return what the caller can read (`buf.len()`) + // - at most what we have (`read_buffer.len()`) + let split_index = min(buf.len(), read_buffer.len()); + + let bytes_to_return = read_buffer.split_to(split_index); + let len = bytes_to_return.len(); + buf[..len].copy_from_slice(&bytes_to_return); + + Poll::Ready(Ok(len)) + } +} + +impl AsyncWrite for Connection { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.get_mut(); + + this.inner.error_barrier()?; + futures::ready!(this.inner.poll_open(cx))?; + + debug_assert!(this.buffered_amount() <= MAX_BUFFER); + let remaining_space = MAX_BUFFER - this.buffered_amount(); + + if remaining_space == 0 { + this.inner.write_waker.register(cx.waker()); + return Poll::Pending; + } + + let bytes_to_send = min(buf.len(), remaining_space); + + if this + .inner + .socket + .send_with_u8_array(&buf[..bytes_to_send]) + .is_err() + { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); + } + + Poll::Ready(Ok(bytes_to_send)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.buffered_amount() == 0 { + return Poll::Ready(Ok(())); + } + + self.inner.error_barrier()?; + + self.inner.write_waker.register(cx.waker()); + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + const REGULAR_CLOSE: u16 = 1000; // See https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1. + + if self.inner.ready_state() == ReadyState::Closed { + return Poll::Ready(Ok(())); + } + + self.inner.error_barrier()?; + + if self.inner.ready_state() != ReadyState::Closing { + let _ = self + .inner + .socket + .close_with_code_and_reason(REGULAR_CLOSE, "user initiated"); + } + + self.inner.close_waker.register(cx.waker()); + Poll::Pending + } +} + +impl Drop for Connection { + fn drop(&mut self) { + const GO_AWAY_STATUS_CODE: u16 = 1001; // See https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1. + + if let ReadyState::Connecting | ReadyState::Open = self.inner.ready_state() { + let _ = self + .inner + .socket + .close_with_code_and_reason(GO_AWAY_STATUS_CODE, "connection dropped"); + } + + WebContext::new() + .expect("to have a window or worker context") + .clear_interval_with_handle(self.inner.buffered_amount_low_interval); + } +} diff --git a/transports/websocket-websys/src/web_context.rs b/transports/websocket-websys/src/web_context.rs new file mode 100644 index 000000000000..c514435d2bb5 --- /dev/null +++ b/transports/websocket-websys/src/web_context.rs @@ -0,0 +1,57 @@ +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::window; + +/// Web context that abstract the window vs web worker API +#[derive(Debug)] +pub(crate) enum WebContext { + Window(web_sys::Window), + Worker(web_sys::WorkerGlobalScope), +} + +impl WebContext { + pub(crate) fn new() -> Option { + match window() { + Some(window) => Some(Self::Window(window)), + None => { + #[wasm_bindgen] + extern "C" { + type Global; + + #[wasm_bindgen(method, getter, js_name = WorkerGlobalScope)] + fn worker(this: &Global) -> JsValue; + } + let global: Global = js_sys::global().unchecked_into(); + if !global.worker().is_undefined() { + Some(Self::Worker(global.unchecked_into())) + } else { + None + } + } + } + } + + /// The `setInterval()` method. + pub(crate) fn set_interval_with_callback_and_timeout_and_arguments( + &self, + handler: &::js_sys::Function, + timeout: i32, + arguments: &::js_sys::Array, + ) -> Result { + match self { + WebContext::Window(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + WebContext::Worker(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + } + } + + /// The `clearInterval()` method. + pub(crate) fn clear_interval_with_handle(&self, handle: i32) { + match self { + WebContext::Window(w) => w.clear_interval_with_handle(handle), + WebContext::Worker(w) => w.clear_interval_with_handle(handle), + } + } +} diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index a93b14849465..192b1fa094ef 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,6 @@ +## 0.43.0 + + ## 0.42.1 - Bump `futures-rustls` to `0.24.0`. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 62a5129cbfa0..385e292103c4 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = { workspace = true } description = "WebSocket transport for libp2p" -version = "0.42.1" +version = "0.43.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,22 +13,23 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures-rustls = "0.24.0" either = "1.9.0" -futures = "0.3.28" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" parking_lot = "0.12.0" -quicksink = "0.1" +pin-project-lite = "0.2.13" rw-stream-sink = { workspace = true } soketto = "0.7.0" -url = "2.4" +tracing = "0.1.37" +url = "2.5" webpki-roots = "0.25" [dev-dependencies] libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-dns = { workspace = true, features = ["async-std"] } +libp2p-identity = { workspace = true, features = ["rand"] } async-std = { version = "1.6.5", features = ["attributes"] } -rcgen = "0.10.0" +rcgen = "0.11.3" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index fc3f6514f12b..3593e1eaff2d 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{error::Error, tls}; +use crate::{error::Error, quicksink, tls}; use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; use futures_rustls::{client, rustls, server}; @@ -28,7 +28,6 @@ use libp2p_core::{ transport::{ListenerId, TransportError, TransportEvent}, Transport, }; -use log::{debug, trace}; use parking_lot::Mutex; use soketto::{ connection::{self, CloseReason}, @@ -127,13 +126,13 @@ where if self.tls_config.server.is_some() { p } else { - debug!("/wss address but TLS server support is not configured"); + tracing::debug!("/wss address but TLS server support is not configured"); return Err(TransportError::MultiaddrNotSupported(addr)); } } Some(p @ Protocol::Ws(_)) => p, _ => { - debug!("{} is not a websocket multiaddr", addr); + tracing::debug!(address=%addr, "Address is not a websocket multiaddr"); return Err(TransportError::MultiaddrNotSupported(addr)); } }; @@ -187,7 +186,7 @@ where .get(&listener_id) .expect("Protocol was inserted in Transport::listen_on."); listen_addr.push(proto.clone()); - debug!("Listening on {}", listen_addr); + tracing::debug!(address=%listen_addr, "Listening on address"); TransportEvent::NewAddress { listener_id, listen_addr, @@ -288,7 +287,7 @@ where { Ok(Either::Left(redirect)) => { if remaining_redirects == 0 { - debug!("Too many redirects (> {})", max_redirects); + tracing::debug!(%max_redirects, "Too many redirects"); return Err(Error::TooManyRedirects); } remaining_redirects -= 1; @@ -310,7 +309,7 @@ where tls_config: tls::Config, role_override: Endpoint, ) -> Result>, Error> { - trace!("Dialing websocket address: {:?}", addr); + tracing::trace!(address=?addr, "Dialing websocket address"); let dial = match role_override { Endpoint::Dialer => transport.lock().dial(addr.tcp_addr), @@ -322,19 +321,19 @@ where })?; let stream = dial.map_err(Error::Transport).await?; - trace!("TCP connection to {} established.", addr.host_port); + tracing::trace!(port=%addr.host_port, "TCP connection established"); let stream = if addr.use_tls { // begin TLS session let dns_name = addr .dns_name .expect("for use_tls we have checked that dns_name is some"); - trace!("Starting TLS handshake with {:?}", dns_name); + tracing::trace!(?dns_name, "Starting TLS handshake"); let stream = tls_config .client .connect(dns_name.clone(), stream) .map_err(|e| { - debug!("TLS handshake with {:?} failed: {}", dns_name, e); + tracing::debug!(?dns_name, "TLS handshake failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -346,7 +345,7 @@ where future::Either::Right(stream) }; - trace!("Sending websocket handshake to {}", addr.host_port); + tracing::trace!(port=%addr.host_port, "Sending websocket handshake"); let mut client = handshake::Client::new(stream, &addr.host_port, addr.path.as_ref()); @@ -359,9 +358,10 @@ where status_code, location, } => { - debug!( - "received redirect ({}); location: {}", - status_code, location + tracing::debug!( + %status_code, + %location, + "received redirect" ); Ok(Either::Left(location)) } @@ -370,7 +370,7 @@ where Err(Error::Handshake(msg.into())) } handshake::ServerResponse::Accepted { .. } => { - trace!("websocket handshake with {} successful", addr.host_port); + tracing::trace!(port=%addr.host_port, "websocket handshake successful"); Ok(Either::Right(Connection::new(client.into_builder()))) } } @@ -388,7 +388,7 @@ where async move { let stream = upgrade.map_err(Error::Transport).await?; - trace!("incoming connection from {}", remote_addr); + tracing::trace!(address=%remote_addr, "incoming connection from address"); let stream = if use_tls { // begin TLS session @@ -396,12 +396,12 @@ where .server .expect("for use_tls we checked server is not none"); - trace!("awaiting TLS handshake with {}", remote_addr); + tracing::trace!(address=%remote_addr, "awaiting TLS handshake with address"); let stream = server .accept(stream) .map_err(move |e| { - debug!("TLS handshake with {} failed: {}", remote_addr, e); + tracing::debug!(address=%remote_addr, "TLS handshake with address failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -414,9 +414,9 @@ where future::Either::Right(stream) }; - trace!( - "receiving websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "receiving websocket handshake request from address" ); let mut server = handshake::Server::new(stream); @@ -429,9 +429,9 @@ where request.key() }; - trace!( - "accepting websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "accepting websocket handshake request from address" ); let response = handshake::server::Response::Accept { @@ -511,7 +511,7 @@ fn parse_ws_dial_addr(addr: Multiaddr) -> Result> { Some(Protocol::Ws(path)) => break (false, path.into_owned()), Some(Protocol::Wss(path)) => { if dns_name.is_none() { - debug!("Missing DNS name in WSS address: {}", addr); + tracing::debug!(addrress=%addr, "Missing DNS name in WSS address"); return Err(Error::InvalidMultiaddr(addr)); } break (true, path.into_owned()); @@ -556,13 +556,13 @@ fn location_to_multiaddr(location: &str) -> Result> { } else if s.eq_ignore_ascii_case("http") | s.eq_ignore_ascii_case("ws") { a.push(Protocol::Ws(url.path().into())) } else { - debug!("unsupported scheme: {}", s); + tracing::debug!(scheme=%s, "unsupported scheme"); return Err(Error::InvalidRedirectLocation); } Ok(a) } Err(e) => { - debug!("failed to parse url as multi-address: {:?}", e); + tracing::debug!("failed to parse url as multi-address: {:?}", e); Err(Error::InvalidRedirectLocation) } } diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index d7dd7628888b..e0b3d09ca256 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -24,6 +24,7 @@ pub mod error; pub mod framed; +mod quicksink; pub mod tls; use error::Error; diff --git a/transports/websocket/src/quicksink.rs b/transports/websocket/src/quicksink.rs new file mode 100644 index 000000000000..d9edb4dfe0de --- /dev/null +++ b/transports/websocket/src/quicksink.rs @@ -0,0 +1,350 @@ +// Copyright (c) 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 +// or the MIT +// license , at your +// option. All files in the project carrying such notice may not be copied, +// modified, or distributed except according to those terms. +// +// Forked into rust-libp2p and further distributed under the MIT license. + +// Create a [`Sink`] implementation from an initial value and a closure +// returning a [`Future`]. +// +// This is very similar to how `futures::stream::unfold` creates a `Stream` +// implementation from a seed value and a future-returning closure. +// +// # Examples +// +// ```no_run +// use async_std::io; +// use futures::prelude::*; +// use crate::quicksink::Action; +// +// crate::quicksink::make_sink(io::stdout(), |mut stdout, action| async move { +// match action { +// Action::Send(x) => stdout.write_all(x).await?, +// Action::Flush => stdout.flush().await?, +// Action::Close => stdout.close().await? +// } +// Ok::<_, io::Error>(stdout) +// }); +// ``` +// +// # Panics +// +// - If any of the [`Sink`] methods produce an error, the sink transitions +// to a failure state and none of its methods must be called afterwards or +// else a panic will occur. +// - If [`Sink::poll_close`] has been called, no other sink method must be +// called afterwards or else a panic will be caused. + +use futures::{ready, sink::Sink}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +/// Returns a `Sink` impl based on the initial value and the given closure. +/// +/// The closure will be applied to the initial value and an [`Action`] that +/// informs it about the action it should perform. The returned [`Future`] +/// will resolve to another value and the process starts over using this +/// output. +pub(crate) fn make_sink(init: S, f: F) -> SinkImpl +where + F: FnMut(S, Action) -> T, + T: Future>, +{ + SinkImpl { + lambda: f, + future: None, + param: Some(init), + state: State::Empty, + _mark: std::marker::PhantomData, + } +} + +/// The command given to the closure so that it can perform appropriate action. +/// +/// Presumably the closure encapsulates a resource to perform I/O. The commands +/// correspond to methods of the [`Sink`] trait and provide the closure with +/// sufficient information to know what kind of action to perform with it. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum Action { + /// Send the given value. + /// Corresponds to [`Sink::start_send`]. + Send(A), + /// Flush the resource. + /// Corresponds to [`Sink::poll_flush`]. + Flush, + /// Close the resource. + /// Corresponds to [`Sink::poll_close`]. + Close, +} + +/// The various states the `Sink` may be in. +#[derive(Debug, PartialEq, Eq)] +enum State { + /// The `Sink` is idle. + Empty, + /// The `Sink` is sending a value. + Sending, + /// The `Sink` is flushing its resource. + Flushing, + /// The `Sink` is closing its resource. + Closing, + /// The `Sink` is closed (terminal state). + Closed, + /// The `Sink` experienced an error (terminal state). + Failed, +} + +pin_project! { + /// `SinkImpl` implements the `Sink` trait. + #[derive(Debug)] + pub(crate) struct SinkImpl { + lambda: F, + #[pin] future: Option, + param: Option, + state: State, + _mark: std::marker::PhantomData<(A, E)> + } +} + +impl Sink for SinkImpl +where + F: FnMut(S, Action) -> T, + T: Future>, +{ + type Error = E; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + match this.state { + State::Sending | State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty; + Poll::Ready(Ok(())) + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + Poll::Ready(Err(e)) + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + panic!("SinkImpl::poll_ready called on a closing sink.") + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + Poll::Ready(Err(e)) + } + }, + State::Empty => { + assert!(this.param.is_some()); + Poll::Ready(Ok(())) + } + State::Closed => panic!("SinkImpl::poll_ready called on a closed sink."), + State::Failed => panic!("SinkImpl::poll_ready called after error."), + } + } + + fn start_send(self: Pin<&mut Self>, item: A) -> Result<(), Self::Error> { + assert_eq!(State::Empty, self.state); + let mut this = self.project(); + let param = this.param.take().unwrap(); + let future = (this.lambda)(param, Action::Send(item)); + this.future.set(Some(future)); + *this.state = State::Sending; + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + let mut this = self.as_mut().project(); + match this.state { + State::Empty => { + if let Some(p) = this.param.take() { + let future = (this.lambda)(p, Action::Flush); + this.future.set(Some(future)); + *this.state = State::Flushing + } else { + return Poll::Ready(Ok(())); + } + } + State::Sending => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Closed => return Poll::Ready(Ok(())), + State::Failed => panic!("SinkImpl::poll_flush called after error."), + } + } + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + let mut this = self.as_mut().project(); + match this.state { + State::Empty => { + if let Some(p) = this.param.take() { + let future = (this.lambda)(p, Action::Close); + this.future.set(Some(future)); + *this.state = State::Closing; + } else { + return Poll::Ready(Ok(())); + } + } + State::Sending => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Closed => return Poll::Ready(Ok(())), + State::Failed => panic!("SinkImpl::poll_closed called after error."), + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::quicksink::{make_sink, Action}; + use async_std::{io, task}; + use futures::{channel::mpsc, prelude::*, stream}; + + #[test] + fn smoke_test() { + task::block_on(async { + let sink = make_sink(io::stdout(), |mut stdout, action| async move { + match action { + Action::Send(x) => stdout.write_all(x).await?, + Action::Flush => stdout.flush().await?, + Action::Close => stdout.close().await?, + } + Ok::<_, io::Error>(stdout) + }); + + let values = vec![Ok(&b"hello\n"[..]), Ok(&b"world\n"[..])]; + assert!(stream::iter(values).forward(sink).await.is_ok()) + }) + } + + #[test] + fn replay() { + task::block_on(async { + let (tx, rx) = mpsc::channel(5); + + let sink = make_sink(tx, |mut tx, action| async move { + tx.send(action.clone()).await?; + if action == Action::Close { + tx.close().await? + } + Ok::<_, mpsc::SendError>(tx) + }); + + futures::pin_mut!(sink); + + let expected = [ + Action::Send("hello\n"), + Action::Flush, + Action::Send("world\n"), + Action::Flush, + Action::Close, + ]; + + for &item in &["hello\n", "world\n"] { + sink.send(item).await.unwrap() + } + + sink.close().await.unwrap(); + + let actual = rx.collect::>().await; + + assert_eq!(&expected[..], &actual[..]) + }); + } +} diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md index 13ca1b5ed0a9..b368a943395c 100644 --- a/transports/webtransport-websys/CHANGELOG.md +++ b/transports/webtransport-websys/CHANGELOG.md @@ -1,3 +1,6 @@ +## 0.2.0 + + ## 0.1.0 * Initial implementation of WebTranport transport using web-sys bindings. See [PR 4015]. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 54ecc5b9b405..7e797a1671e2 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-webtransport-websys" edition = "2021" rust-version = { workspace = true } description = "WebTransport for libp2p under WASM environment" -version = "0.1.0" +version = "0.2.0" authors = [ "Yiannis Marangos ", "oblique ", @@ -14,19 +14,19 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" -js-sys = "0.3.64" +futures = "0.3.30" +js-sys = "0.3.67" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.20" multiaddr = { workspace = true } multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } -thiserror = "1.0.49" -wasm-bindgen = "0.2.87" -wasm-bindgen-futures = "0.4.37" -web-sys = { version = "0.3.64", features = [ +thiserror = "1.0.57" +tracing = "0.1.37" +wasm-bindgen = "0.2.90" +wasm-bindgen-futures = "0.4.41" +web-sys = { version = "0.3.67", features = [ "ReadableStreamDefaultReader", "WebTransport", "WebTransportBidirectionalStream", diff --git a/transports/webtransport-websys/src/connection.rs b/transports/webtransport-websys/src/connection.rs index 9ea1dbefd1c8..982f9e5a32c1 100644 --- a/transports/webtransport-websys/src/connection.rs +++ b/transports/webtransport-websys/src/connection.rs @@ -1,6 +1,7 @@ use futures::FutureExt; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::{OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::OutboundConnectionUpgrade; +use libp2p_core::UpgradeInfo; use libp2p_identity::{Keypair, PeerId}; use multihash::Multihash; use send_wrapper::SendWrapper; diff --git a/transports/webtransport-websys/src/error.rs b/transports/webtransport-websys/src/error.rs index ad85cab75370..704cfffb1fd1 100644 --- a/transports/webtransport-websys/src/error.rs +++ b/transports/webtransport-websys/src/error.rs @@ -11,6 +11,7 @@ pub enum Error { Noise(#[from] libp2p_noise::Error), #[error("JavaScript error: {0}")] + #[allow(clippy::enum_variant_names)] JsError(String), #[error("JavaScript typecasting failed")] diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index dcb3639a1942..cb556ffef99e 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -65,7 +65,7 @@ impl libp2p_core::Transport for Transport { fn dial(&mut self, addr: Multiaddr) -> Result> { let endpoint = Endpoint::from_multiaddr(&addr).map_err(|e| match e { e @ Error::InvalidMultiaddr(_) => { - log::warn!("{}", e); + tracing::warn!("{}", e); TransportError::MultiaddrNotSupported(addr) } e => TransportError::Other(e), diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml index 8d3f756ecb78..f860260a996b 100644 --- a/wasm-tests/webtransport-tests/Cargo.toml +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -5,19 +5,22 @@ edition = "2021" license = "MIT" publish = false +[package.metadata.release] +release = false + [dependencies] -futures = "0.3.28" -getrandom = { version = "0.2.9", features = ["js"] } +futures = "0.3.30" +getrandom = { version = "0.2.12", features = ["js"] } libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-noise = { workspace = true } libp2p-webtransport-websys = { workspace = true } multiaddr = { workspace = true } multihash = { workspace = true } -wasm-bindgen = "0.2.87" -wasm-bindgen-futures = "0.4.37" -wasm-bindgen-test = "0.3.37" -web-sys = { version = "0.3.64", features = ["Response", "Window"] } +wasm-bindgen = "0.2.90" +wasm-bindgen-futures = "0.4.41" +wasm-bindgen-test = "0.3.41" +web-sys = { version = "0.3.67", features = ["Response", "Window"] } [lints] workspace = true diff --git a/wasm-tests/webtransport-tests/echo-server/go.mod b/wasm-tests/webtransport-tests/echo-server/go.mod index 669124e1f690..9dde12fdcfe3 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.mod +++ b/wasm-tests/webtransport-tests/echo-server/go.mod @@ -52,12 +52,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.7.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.7.0 // indirect google.golang.org/protobuf v1.30.0 // indirect lukechampine.com/blake3 v1.1.7 // indirect diff --git a/wasm-tests/webtransport-tests/echo-server/go.sum b/wasm-tests/webtransport-tests/echo-server/go.sum index 2e4b66d2ec09..95c1618a0cbb 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.sum +++ b/wasm-tests/webtransport-tests/echo-server/go.sum @@ -224,8 +224,8 @@ golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -251,8 +251,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -277,14 +277,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

where - TCodec: Codec, + P: AsRef + Clone, { - pub(crate) codec: TCodec, - pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, - pub(crate) request_id: RequestId, - pub(crate) request: TCodec::Request, -} + type Output = (Stream, P); + type Error = void::Void; + type Future = Ready>; -impl fmt::Debug for RequestProtocol -where - TCodec: Codec, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RequestProtocol") - .field("request_id", &self.request_id) - .finish() - } -} - -impl UpgradeInfo for RequestProtocol -where - TCodec: Codec, -{ - type Info = TCodec::Protocol; - type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; - - fn protocol_info(&self) -> Self::InfoIter { - self.protocols.clone().into_iter() - } -} - -impl OutboundUpgrade for RequestProtocol -where - TCodec: Codec + Send + 'static, -{ - type Output = TCodec::Response; - type Error = io::Error; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(mut self, mut io: Stream, protocol: Self::Info) -> Self::Future { - async move { - let write = self.codec.write_request(&protocol, &mut io, self.request); - write.await?; - io.close().await?; - let read = self.codec.read_response(&protocol, &mut io); - let response = read.await?; - Ok(response) - } - .boxed() + fn upgrade_outbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) } } diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index 7b1a80884430..4362b3255ad3 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -76,21 +76,21 @@ pub mod json; pub use codec::Codec; pub use handler::ProtocolSupport; -use crate::handler::protocol::RequestProtocol; +use crate::handler::OutboundMessage; use futures::channel::oneshot; use handler::Handler; use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + behaviour::{AddressChange, ConnectionClosed, DialFailure, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, + PeerAddresses, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; use std::{ collections::{HashMap, HashSet, VecDeque}, - fmt, + fmt, io, sync::{atomic::AtomicU64, Arc}, task::{Context, Poll}, time::Duration, @@ -102,7 +102,7 @@ pub enum Message { /// A request message. Request { /// The ID of this request. - request_id: RequestId, + request_id: InboundRequestId, /// The request message. request: TRequest, /// The channel waiting for the response. @@ -117,7 +117,7 @@ pub enum Message { /// The ID of the request that produced this response. /// /// See [`Behaviour::send_request`]. - request_id: RequestId, + request_id: OutboundRequestId, /// The response message. response: TResponse, }, @@ -138,7 +138,7 @@ pub enum Event { /// The peer to whom the request was sent. peer: PeerId, /// The (local) ID of the failed request. - request_id: RequestId, + request_id: OutboundRequestId, /// The error that occurred. error: OutboundFailure, }, @@ -147,7 +147,7 @@ pub enum Event { /// The peer from whom the request was received. peer: PeerId, /// The ID of the failed inbound request. - request_id: RequestId, + request_id: InboundRequestId, /// The error that occurred. error: InboundFailure, }, @@ -159,13 +159,13 @@ pub enum Event { /// The peer to whom the response was sent. peer: PeerId, /// The ID of the inbound request whose response was sent. - request_id: RequestId, + request_id: InboundRequestId, }, } /// Possible failures occurring in the context of sending /// an outbound request and receiving the response. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundFailure { /// The request could not be sent because a dialing attempt failed. DialFailure, @@ -181,6 +181,8 @@ pub enum OutboundFailure { ConnectionClosed, /// The remote supports none of the requested protocols. UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + Io(io::Error), } impl fmt::Display for OutboundFailure { @@ -194,6 +196,7 @@ impl fmt::Display for OutboundFailure { OutboundFailure::UnsupportedProtocols => { write!(f, "The remote supports none of the requested protocols") } + OutboundFailure::Io(e) => write!(f, "IO error on outbound stream: {e}"), } } } @@ -202,7 +205,7 @@ impl std::error::Error for OutboundFailure {} /// Possible failures occurring in the context of receiving an /// inbound request and sending a response. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundFailure { /// The inbound request timed out, either while reading the /// incoming request or before a response is sent, e.g. if @@ -218,6 +221,8 @@ pub enum InboundFailure { /// due to the [`ResponseChannel`] being dropped instead of /// being passed to [`Behaviour::send_response`]. ResponseOmission, + /// An IO failure happened on an inbound stream. + Io(io::Error), } impl fmt::Display for InboundFailure { @@ -237,6 +242,7 @@ impl fmt::Display for InboundFailure { f, "The response channel was dropped without sending a response to the remote" ), + InboundFailure::Io(e) => write!(f, "IO error on inbound stream: {e}"), } } } @@ -264,17 +270,27 @@ impl ResponseChannel { } } -/// The ID of an inbound or outbound request. +/// The ID of an inbound request. /// -/// Note: [`RequestId`]'s uniqueness is only guaranteed between two -/// inbound and likewise between two outbound requests. There is no -/// uniqueness guarantee in a set of both inbound and outbound -/// [`RequestId`]s nor in a set of inbound or outbound requests -/// originating from different [`Behaviour`]'s. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct RequestId(u64); - -impl fmt::Display for RequestId { +/// Note: [`InboundRequestId`]'s uniqueness is only guaranteed between +/// inbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct InboundRequestId(u64); + +impl fmt::Display for InboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The ID of an outbound request. +/// +/// Note: [`OutboundRequestId`]'s uniqueness is only guaranteed between +/// outbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct OutboundRequestId(u64); + +impl fmt::Display for OutboundRequestId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } @@ -284,30 +300,37 @@ impl fmt::Display for RequestId { #[derive(Debug, Clone)] pub struct Config { request_timeout: Duration, - connection_keep_alive: Duration, + max_concurrent_streams: usize, } impl Default for Config { fn default() -> Self { Self { - connection_keep_alive: Duration::from_secs(10), request_timeout: Duration::from_secs(10), + max_concurrent_streams: 100, } } } impl Config { - /// Sets the keep-alive timeout of idle connections. - pub fn set_connection_keep_alive(&mut self, v: Duration) -> &mut Self { - self.connection_keep_alive = v; + /// Sets the timeout for inbound and outbound requests. + #[deprecated(note = "Use `Config::with_request_timeout` for one-liner constructions.")] + pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + self.request_timeout = v; self } /// Sets the timeout for inbound and outbound requests. - pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + pub fn with_request_timeout(mut self, v: Duration) -> Self { self.request_timeout = v; self } + + /// Sets the upper bound for the number of concurrent inbound + outbound streams. + pub fn with_max_concurrent_streams(mut self, num_streams: usize) -> Self { + self.max_concurrent_streams = num_streams; + self + } } /// A request/response protocol for some message codec. @@ -320,24 +343,24 @@ where /// The supported outbound protocols. outbound_protocols: SmallVec<[TCodec::Protocol; 2]>, /// The next (local) request ID. - next_request_id: RequestId, + next_outbound_request_id: OutboundRequestId, /// The next (inbound) request ID. - next_inbound_id: Arc, + next_inbound_request_id: Arc, /// The protocol configuration. config: Config, /// The protocol codec for reading and writing requests and responses. codec: TCodec, /// Pending events to return from `poll`. pending_events: - VecDeque, RequestProtocol>>, + VecDeque, OutboundMessage>>, /// The currently connected peers, their pending outbound and inbound responses and their known, /// reachable addresses, if any. connected: HashMap>, /// Externally managed addresses via `add_address` and `remove_address`. - addresses: HashMap>, + addresses: PeerAddresses, /// Requests that have not yet been sent and are waiting for a connection /// to be established. - pending_outbound_requests: HashMap; 10]>>, + pending_outbound_requests: HashMap; 10]>>, } impl Behaviour @@ -376,14 +399,14 @@ where Behaviour { inbound_protocols, outbound_protocols, - next_request_id: RequestId(1), - next_inbound_id: Arc::new(AtomicU64::new(1)), + next_outbound_request_id: OutboundRequestId(1), + next_inbound_request_id: Arc::new(AtomicU64::new(1)), config: cfg, codec, pending_events: VecDeque::new(), connected: HashMap::new(), pending_outbound_requests: HashMap::new(), - addresses: HashMap::new(), + addresses: PeerAddresses::default(), } } @@ -399,13 +422,12 @@ where /// > address discovery, or known addresses of peers must be /// > managed via [`Behaviour::add_address`] and /// > [`Behaviour::remove_address`]. - pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> RequestId { - let request_id = self.next_request_id(); - let request = RequestProtocol { + pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> OutboundRequestId { + let request_id = self.next_outbound_request_id(); + let request = OutboundMessage { request_id, - codec: self.codec.clone(), - protocols: self.outbound_protocols.clone(), request, + protocols: self.outbound_protocols.clone(), }; if let Some(request) = self.try_send_request(peer, request) { @@ -445,20 +467,18 @@ where /// by [`NetworkBehaviour::handle_pending_outbound_connection`]. /// /// Addresses added in this way are only removed by `remove_address`. - pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) { - self.addresses.entry(*peer).or_default().push(address); + /// + /// Returns true if the address was added, false otherwise (i.e. if the + /// address is already in the list). + #[deprecated(note = "Use `Swarm::add_peer_address` instead.")] + pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> bool { + self.addresses.add(*peer, address) } - /// Removes an address of a peer previously added via `add_address`. + /// Removes an address of a peer previously added via [`Behaviour::add_address`]. + #[deprecated(note = "Will be removed with the next breaking release and won't be replaced.")] pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) { - let mut last = false; - if let Some(addresses) = self.addresses.get_mut(peer) { - addresses.retain(|a| a != address); - last = addresses.is_empty(); - } - if last { - self.addresses.remove(peer); - } + self.addresses.remove(peer, address); } /// Checks whether a peer is currently connected. @@ -473,14 +493,14 @@ where /// Checks whether an outbound request to the peer with the provided /// [`PeerId`] initiated by [`Behaviour::send_request`] is still /// pending, i.e. waiting for a response. - pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &RequestId) -> bool { + pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &OutboundRequestId) -> bool { // Check if request is already sent on established connection. let est_conn = self .connected .get(peer) .map(|cs| { cs.iter() - .any(|c| c.pending_inbound_responses.contains(request_id)) + .any(|c| c.pending_outbound_responses.contains(request_id)) }) .unwrap_or(false); // Check if request is still pending to be sent. @@ -496,20 +516,20 @@ where /// Checks whether an inbound request from the peer with the provided /// [`PeerId`] is still pending, i.e. waiting for a response by the local /// node through [`Behaviour::send_response`]. - pub fn is_pending_inbound(&self, peer: &PeerId, request_id: &RequestId) -> bool { + pub fn is_pending_inbound(&self, peer: &PeerId, request_id: &InboundRequestId) -> bool { self.connected .get(peer) .map(|cs| { cs.iter() - .any(|c| c.pending_outbound_responses.contains(request_id)) + .any(|c| c.pending_inbound_responses.contains(request_id)) }) .unwrap_or(false) } - /// Returns the next request ID. - fn next_request_id(&mut self) -> RequestId { - let request_id = self.next_request_id; - self.next_request_id.0 += 1; + /// Returns the next outbound request ID. + fn next_outbound_request_id(&mut self) -> OutboundRequestId { + let request_id = self.next_outbound_request_id; + self.next_outbound_request_id.0 += 1; request_id } @@ -519,15 +539,15 @@ where fn try_send_request( &mut self, peer: &PeerId, - request: RequestProtocol, - ) -> Option> { + request: OutboundMessage, + ) -> Option> { if let Some(connections) = self.connected.get_mut(peer) { if connections.is_empty() { return Some(request); } let ix = (request.request_id.0 as usize) % connections.len(); let conn = &mut connections[ix]; - conn.pending_inbound_responses.insert(request.request_id); + conn.pending_outbound_responses.insert(request.request_id); self.pending_events.push_back(ToSwarm::NotifyHandler { peer_id: *peer, handler: NotifyHandler::One(conn.id), @@ -542,13 +562,13 @@ where /// Remove pending outbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`RequestId`] was previously present and is now removed. + /// alive and the [`OutboundRequestId`] was previously present and is now removed. /// Returns `false` otherwise. fn remove_pending_outbound_response( &mut self, peer: &PeerId, connection: ConnectionId, - request: RequestId, + request: OutboundRequestId, ) -> bool { self.get_connection_mut(peer, connection) .map(|c| c.pending_outbound_responses.remove(&request)) @@ -558,16 +578,16 @@ where /// Remove pending inbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`RequestId`] was previously present and is now removed. + /// alive and the [`InboundRequestId`] was previously present and is now removed. /// Returns `false` otherwise. fn remove_pending_inbound_response( &mut self, peer: &PeerId, connection: ConnectionId, - request: &RequestId, + request: InboundRequestId, ) -> bool { self.get_connection_mut(peer, connection) - .map(|c| c.pending_inbound_responses.remove(request)) + .map(|c| c.pending_inbound_responses.remove(&request)) .unwrap_or(false) } @@ -605,36 +625,7 @@ where .iter_mut() .find(|c| c.id == connection_id) .expect("Address change can only happen on an established connection."); - connection.address = new_address; - } - - fn on_connection_established( - &mut self, - ConnectionEstablished { - peer_id, - connection_id, - endpoint, - other_established, - .. - }: ConnectionEstablished, - ) { - let address = match endpoint { - ConnectedPoint::Dialer { address, .. } => Some(address.clone()), - ConnectedPoint::Listener { .. } => None, - }; - self.connected - .entry(peer_id) - .or_default() - .push(Connection::new(connection_id, address)); - - if other_established == 0 { - if let Some(pending) = self.pending_outbound_requests.remove(&peer_id) { - for request in pending { - let request = self.try_send_request(&peer_id, request); - assert!(request.is_none()); - } - } - } + connection.remote_address = new_address; } fn on_connection_closed( @@ -644,7 +635,7 @@ where connection_id, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { let connections = self .connected @@ -662,7 +653,7 @@ where self.connected.remove(&peer_id); } - for request_id in connection.pending_outbound_responses { + for request_id in connection.pending_inbound_responses { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer: peer_id, @@ -671,7 +662,7 @@ where })); } - for request_id in connection.pending_inbound_responses { + for request_id in connection.pending_outbound_responses { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer: peer_id, @@ -701,6 +692,28 @@ where } } } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + peer: PeerId, + connection_id: ConnectionId, + remote_address: Option, + ) { + let mut connection = Connection::new(connection_id, remote_address); + + if let Some(pending_requests) = self.pending_outbound_requests.remove(&peer) { + for request in pending_requests { + connection + .pending_outbound_responses + .insert(request.request_id); + handler.on_behaviour_event(request); + } + } + + self.connected.entry(peer).or_default().push(connection); + } } impl NetworkBehaviour for Behaviour @@ -712,18 +725,22 @@ where fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new( + let mut handler = Handler::new( self.inbound_protocols.clone(), self.codec.clone(), - self.config.connection_keep_alive, self.config.request_timeout, - self.next_inbound_id.clone(), - )) + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, None); + + Ok(handler) } fn handle_pending_outbound_connection( @@ -740,50 +757,50 @@ where let mut addresses = Vec::new(); if let Some(connections) = self.connected.get(&peer) { - addresses.extend(connections.iter().filter_map(|c| c.address.clone())) - } - if let Some(more) = self.addresses.get(&peer) { - addresses.extend(more.into_iter().cloned()); + addresses.extend(connections.iter().filter_map(|c| c.remote_address.clone())) } + let cached_addrs = self.addresses.get(&peer); + addresses.extend(cached_addrs); + Ok(addresses) } fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, + connection_id: ConnectionId, + peer: PeerId, + remote_address: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new( + let mut handler = Handler::new( self.inbound_protocols.clone(), self.codec.clone(), - self.config.connection_keep_alive, self.config.request_timeout, - self.next_inbound_id.clone(), - )) + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler( + &mut handler, + peer, + connection_id, + Some(remote_address.clone()), + ); + + Ok(handler) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { + self.addresses.on_swarm_event(&event); match event { - FromSwarm::ConnectionEstablished(connection_established) => { - self.on_connection_established(connection_established) - } + FromSwarm::ConnectionEstablished(_) => {} FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } @@ -798,7 +815,7 @@ where request_id, response, } => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before receiving response.", @@ -815,35 +832,26 @@ where request_id, request, sender, - } => { - let channel = ResponseChannel { sender }; - let message = Message::Request { - request_id, - request, - channel, - }; - self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + } => match self.get_connection_mut(&peer, connection) { + Some(connection) => { + let inserted = connection.pending_inbound_responses.insert(request_id); + debug_assert!(inserted, "Expect id of new request to be unknown."); - match self.get_connection_mut(&peer, connection) { - Some(connection) => { - let inserted = connection.pending_outbound_responses.insert(request_id); - debug_assert!(inserted, "Expect id of new request to be unknown."); - } - // Connection closed after `Event::Request` has been emitted. - None => { - self.pending_events.push_back(ToSwarm::GenerateEvent( - Event::InboundFailure { - peer, - request_id, - error: InboundFailure::ConnectionClosed, - }, - )); - } + let channel = ResponseChannel { sender }; + let message = Message::Request { + request_id, + request, + channel, + }; + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); } - } + None => { + tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + } + }, handler::Event::ResponseSent(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before response is sent." @@ -856,7 +864,7 @@ where })); } handler::Event::ResponseOmission(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before response is omitted.", @@ -870,7 +878,7 @@ where })); } handler::Event::OutboundTimeout(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before request times out." @@ -884,7 +892,7 @@ where })); } handler::Event::OutboundUnsupportedProtocols(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before failing to connect.", @@ -897,14 +905,54 @@ where error: OutboundFailure::UnsupportedProtocols, })); } + handler::Event::OutboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending upon failure"); + + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Io(error), + })) + } + handler::Event::InboundTimeout(request_id) => { + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + + if removed { + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Timeout, + })); + } else { + // This happens when timeout is emitted before `read_request` finishes. + tracing::debug!( + "Inbound request timeout for an unknown request_id ({request_id})" + ); + } + } + handler::Event::InboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + + if removed { + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Io(error), + })); + } else { + // This happens when `read_request` fails. + tracing::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); + } + } } } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ev); } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { @@ -924,21 +972,21 @@ const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; /// Internal information tracked for an established connection. struct Connection { id: ConnectionId, - address: Option, + remote_address: Option, /// Pending outbound responses where corresponding inbound requests have /// been received on this connection and emitted via `poll` but have not yet /// been answered. - pending_outbound_responses: HashSet, + pending_outbound_responses: HashSet, /// Pending inbound responses for previously sent requests on this /// connection. - pending_inbound_responses: HashSet, + pending_inbound_responses: HashSet, } impl Connection { - fn new(id: ConnectionId, address: Option) -> Self { + fn new(id: ConnectionId, remote_address: Option) -> Self { Self { id, - address, + remote_address, pending_outbound_responses: Default::default(), pending_inbound_responses: Default::default(), } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs new file mode 100644 index 000000000000..2dc82b2e0c52 --- /dev/null +++ b/protocols/request-response/tests/error_reporting.rs @@ -0,0 +1,568 @@ +use anyhow::{bail, Result}; +use async_std::task::sleep; +use async_trait::async_trait; +use futures::prelude::*; +use libp2p_identity::PeerId; +use libp2p_request_response as request_response; +use libp2p_request_response::ProtocolSupport; +use libp2p_swarm::{StreamProtocol, Swarm}; +use libp2p_swarm_test::SwarmExt; +use request_response::{ + Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel, +}; +use std::pin::pin; +use std::time::Duration; +use std::{io, iter}; +use tracing_subscriber::EnvFilter; + +#[async_std::test] +async fn report_outbound_failure_on_read_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnReadResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::FailOnReadResponse) + .unwrap(); + + let (peer, req_id_done) = wait_response_sent(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + wait_no_events(&mut swarm1).await; + }; + + // Expects OutboundFailure::Io failure with `FailOnReadResponse` error + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnReadResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnReadResponse" + ); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_outbound_failure_on_write_request() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + let server_task = wait_no_events(&mut swarm1); + + // Expects OutboundFailure::Io failure with `FailOnWriteRequest` error. + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnWriteRequest" + ); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_outbound_timeout_on_read_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnReadResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::TimeoutOnReadResponse) + .unwrap(); + + let (peer, req_id_done) = wait_response_sent(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + wait_no_events(&mut swarm1).await; + }; + + // Expects OutboundFailure::Timeout + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::TimeoutOnReadResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_failure_on_read_request() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + let server_task = wait_no_events(&mut swarm1); + + // Expects io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnReadRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + }; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_failure_on_write_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects OutboundFailure::Io failure with `FailOnWriteResponse` error + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnWriteResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::FailOnWriteResponse) + .unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + InboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnWriteResponse" + ); + }; + + // Expects OutboundFailure::ConnectionClosed or io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnWriteResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::ConnectionClosed => { + // ConnectionClosed is allowed here because we mainly test the behavior + // of `server_task`. + } + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + }; + + // Keep alive the task, so only `server_task` can finish + wait_no_events(&mut swarm2).await; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_timeout_on_write_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(200)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects InboundFailure::Timeout + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnWriteResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::TimeoutOnWriteResponse) + .unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::Timeout)); + }; + + // Expects OutboundFailure::ConnectionClosed or io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::TimeoutOnWriteResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::ConnectionClosed => { + // ConnectionClosed is allowed here because we mainly test the behavior + // of `server_task`. + } + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + } + + // Keep alive the task, so only `server_task` can finish + wait_no_events(&mut swarm2).await; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[derive(Clone, Default)] +struct TestCodec; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Action { + FailOnReadRequest, + FailOnReadResponse, + TimeoutOnReadResponse, + FailOnWriteRequest, + FailOnWriteResponse, + TimeoutOnWriteResponse, +} + +impl From for u8 { + fn from(value: Action) -> Self { + match value { + Action::FailOnReadRequest => 0, + Action::FailOnReadResponse => 1, + Action::TimeoutOnReadResponse => 2, + Action::FailOnWriteRequest => 3, + Action::FailOnWriteResponse => 4, + Action::TimeoutOnWriteResponse => 5, + } + } +} + +impl TryFrom for Action { + type Error = io::Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(Action::FailOnReadRequest), + 1 => Ok(Action::FailOnReadResponse), + 2 => Ok(Action::TimeoutOnReadResponse), + 3 => Ok(Action::FailOnWriteRequest), + 4 => Ok(Action::FailOnWriteResponse), + 5 => Ok(Action::TimeoutOnWriteResponse), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid action")), + } + } +} + +#[async_trait] +impl Codec for TestCodec { + type Protocol = StreamProtocol; + type Request = Action; + type Response = Action; + + async fn read_request( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = Vec::new(); + io.read_to_end(&mut buf).await?; + + if buf.is_empty() { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + assert_eq!(buf.len(), 1); + + match buf[0].try_into()? { + Action::FailOnReadRequest => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnReadRequest")) + } + action => Ok(action), + } + } + + async fn read_response( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = Vec::new(); + io.read_to_end(&mut buf).await?; + + if buf.is_empty() { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + assert_eq!(buf.len(), 1); + + match buf[0].try_into()? { + Action::FailOnReadResponse => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnReadResponse")) + } + Action::TimeoutOnReadResponse => loop { + sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn write_request( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match req { + Action::FailOnWriteRequest => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteRequest")) + } + action => { + let bytes = [action.into()]; + io.write_all(&bytes).await?; + Ok(()) + } + } + } + + async fn write_response( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + res: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match res { + Action::FailOnWriteResponse => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteResponse")) + } + Action::TimeoutOnWriteResponse => loop { + sleep(Duration::MAX).await; + }, + action => { + let bytes = [action.into()]; + io.write_all(&bytes).await?; + Ok(()) + } + } + } +} + +fn new_swarm_with_timeout( + timeout: Duration, +) -> (PeerId, Swarm>) { + let protocols = iter::once((StreamProtocol::new("/test/1"), ProtocolSupport::Full)); + let cfg = request_response::Config::default().with_request_timeout(timeout); + + let swarm = + Swarm::new_ephemeral(|_| request_response::Behaviour::::new(protocols, cfg)); + let peed_id = *swarm.local_peer_id(); + + (peed_id, swarm) +} + +fn new_swarm() -> (PeerId, Swarm>) { + new_swarm_with_timeout(Duration::from_millis(100)) +} + +async fn wait_no_events(swarm: &mut Swarm>) { + loop { + if let Ok(ev) = swarm.select_next_some().await.try_into_behaviour_event() { + panic!("Unexpected event: {ev:?}") + } + } +} + +async fn wait_request( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, Action, ResponseChannel)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::Message { + peer, + message: + request_response::Message::Request { + request_id, + request, + channel, + }, + }) => { + return Ok((peer, request_id, request, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_response_sent( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::ResponseSent { + peer, request_id, .. + }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_inbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, InboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::InboundFailure { + peer, + request_id, + error, + }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_outbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, OutboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::OutboundFailure { + peer, + request_id, + error, + }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} diff --git a/protocols/request-response/tests/peer_address.rs b/protocols/request-response/tests/peer_address.rs new file mode 100644 index 000000000000..2a120931dcdd --- /dev/null +++ b/protocols/request-response/tests/peer_address.rs @@ -0,0 +1,60 @@ +use libp2p_core::ConnectedPoint; +use libp2p_request_response as request_response; +use libp2p_request_response::ProtocolSupport; +use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use serde::{Deserialize, Serialize}; +use std::iter; +use tracing_subscriber::EnvFilter; + +#[async_std::test] +async fn dial_succeeds_after_adding_peers_address() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let protocols = iter::once((StreamProtocol::new("/ping/1"), ProtocolSupport::Full)); + let config = request_response::Config::default(); + + let mut swarm = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new(protocols.clone(), config.clone()) + }); + + let mut swarm2 = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new(protocols.clone(), config.clone()) + }); + + let peer_id2 = *swarm2.local_peer_id(); + + let (listen_addr, _) = swarm2.listen().with_memory_addr_external().await; + + swarm.add_peer_address(peer_id2, listen_addr.clone()); + + swarm.dial(peer_id2).unwrap(); + + async_std::task::spawn(swarm2.loop_on_next()); + + let (connected_peer_id, connected_address) = swarm + .wait(|event| match event { + SwarmEvent::ConnectionEstablished { + peer_id, endpoint, .. + } => { + let address = match endpoint { + ConnectedPoint::Dialer { address, .. } => Some(address), + _ => None, + }; + Some((peer_id, address)) + } + _ => None, + }) + .await; + let expected_address = listen_addr.with_p2p(peer_id2).unwrap(); + + assert_eq!(connected_peer_id, peer_id2); + assert_eq!(expected_address, connected_address.unwrap()); +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct Ping(Vec); +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct Pong(Vec); diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index e0424488f48c..b9e7878a78b7 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -28,12 +28,15 @@ use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use rand::{self, Rng}; use serde::{Deserialize, Serialize}; -use std::iter; +use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] #[cfg(feature = "cbor")] async fn is_response_outbound() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ping = Ping("ping".to_string().into_bytes()); let offline_peer = PeerId::random(); @@ -97,7 +100,7 @@ async fn ping_protocol() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let expected_ping = ping.clone(); @@ -190,7 +193,7 @@ async fn emits_inbound_connection_closed_failure() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); @@ -255,7 +258,7 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); @@ -288,7 +291,10 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { e => panic!("unexpected event from peer 2: {e:?}"), }; - assert_eq!(error, request_response::OutboundFailure::ConnectionClosed); + assert!(matches!( + error, + request_response::OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof, + )); } // Simple Ping-Pong Protocol diff --git a/protocols/stream/CHANGELOG.md b/protocols/stream/CHANGELOG.md new file mode 100644 index 000000000000..2e177e2f1bc3 --- /dev/null +++ b/protocols/stream/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.1.0-alpha + +Initial release. diff --git a/protocols/stream/Cargo.toml b/protocols/stream/Cargo.toml new file mode 100644 index 000000000000..be340939720d --- /dev/null +++ b/protocols/stream/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "libp2p-stream" +version = "0.1.0-alpha" +edition = "2021" +rust-version.workspace = true +description = "Generic stream protocols for libp2p" +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +futures = "0.3.29" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true, features = ["peerid"] } +libp2p-swarm = { workspace = true } +tracing = "0.1.37" +void = "1" +rand = "0.8" + +[dev-dependencies] +libp2p-swarm-test = { workspace = true } +tokio = { version = "1", features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/protocols/stream/README.md b/protocols/stream/README.md new file mode 100644 index 000000000000..c8a56e119caa --- /dev/null +++ b/protocols/stream/README.md @@ -0,0 +1,69 @@ +# Generic (stream) protocols + +This module provides a generic [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) for stream-oriented protocols. +Streams are the fundamental primitive of libp2p and all other protocols are implemented using streams. +In contrast to other [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour)s, this module takes a different design approach. +All interaction happens through a [`Control`] that can be obtained via [`Behaviour::new_control`]. +[`Control`]s can be cloned and thus shared across your application. + +## Inbound + +To accept streams for a particular [`StreamProtocol`](libp2p_swarm::StreamProtocol) using this module, use [`Control::accept`]: + +### Example + +```rust,no_run +# fn main() { +# use libp2p_swarm::{Swarm, StreamProtocol}; +# use libp2p_stream as stream; +# use futures::StreamExt as _; +let mut swarm: Swarm = todo!(); + +let mut control = swarm.behaviour().new_control(); +let mut incoming = control.accept(StreamProtocol::new("/my-protocol")).unwrap(); + +let handler_future = async move { + while let Some((peer, stream)) = incoming.next().await { + // Execute your protocol using `stream`. + } +}; +# } +``` + +### Resource management + +[`Control::accept`] returns you an instance of [`IncomingStreams`]. +This struct implements [`Stream`](futures::Stream) and like other streams, is lazy. +You must continuously poll it to make progress. +In the example above, this taken care of by using the [`StreamExt::next`](futures::StreamExt::next) helper. + +Internally, we will drop streams if your application falls behind in processing these incoming streams, i.e. if whatever loop calls `.next()` is not fast enough. + +### Drop + +As soon as you drop [`IncomingStreams`], the protocol will be de-registered. +Any further attempt by remote peers to open a stream using the provided protocol will result in a negotiation error. + +## Outbound + +To open a new outbound stream for a particular protocol, use [`Control::open_stream`]. + +### Example + +```rust,no_run +# fn main() { +# use libp2p_swarm::{Swarm, StreamProtocol}; +# use libp2p_stream as stream; +# use libp2p_identity::PeerId; +let mut swarm: Swarm = todo!(); +let peer_id: PeerId = todo!(); + +let mut control = swarm.behaviour().new_control(); + +let protocol_future = async move { + let stream = control.open_stream(peer_id, StreamProtocol::new("/my-protocol")).await.unwrap(); + + // Execute your protocol here using `stream`. +}; +# } +``` \ No newline at end of file diff --git a/protocols/stream/src/behaviour.rs b/protocols/stream/src/behaviour.rs new file mode 100644 index 000000000000..e02aca884b77 --- /dev/null +++ b/protocols/stream/src/behaviour.rs @@ -0,0 +1,143 @@ +use core::fmt; +use std::{ + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use futures::{channel::mpsc, StreamExt}; +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + self as swarm, dial_opts::DialOpts, ConnectionDenied, ConnectionId, FromSwarm, + NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, +}; +use swarm::{ + behaviour::ConnectionEstablished, dial_opts::PeerCondition, ConnectionClosed, DialError, + DialFailure, +}; + +use crate::{handler::Handler, shared::Shared, Control}; + +/// A generic behaviour for stream-oriented protocols. +pub struct Behaviour { + shared: Arc>, + dial_receiver: mpsc::Receiver, +} + +impl Default for Behaviour { + fn default() -> Self { + Self::new() + } +} + +impl Behaviour { + pub fn new() -> Self { + let (dial_sender, dial_receiver) = mpsc::channel(0); + + Self { + shared: Arc::new(Mutex::new(Shared::new(dial_sender))), + dial_receiver, + } + } + + /// Obtain a new [`Control`]. + pub fn new_control(&self) -> Control { + Control::new(self.shared.clone()) + } +} + +/// The protocol is already registered. +#[derive(Debug)] +pub struct AlreadyRegistered; + +impl fmt::Display for AlreadyRegistered { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "The protocol is already registered") + } +} + +impl std::error::Error for AlreadyRegistered {} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = Handler; + type ToSwarm = (); + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(Handler::new( + peer, + self.shared.clone(), + Shared::lock(&self.shared).receiver(peer, connection_id), + )) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(Handler::new( + peer, + self.shared.clone(), + Shared::lock(&self.shared).receiver(peer, connection_id), + )) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + connection_id, + .. + }) => Shared::lock(&self.shared).on_connection_established(connection_id, peer_id), + FromSwarm::ConnectionClosed(ConnectionClosed { connection_id, .. }) => { + Shared::lock(&self.shared).on_connection_closed(connection_id) + } + FromSwarm::DialFailure(DialFailure { + peer_id: Some(peer_id), + error: + error @ (DialError::Transport(_) + | DialError::Denied { .. } + | DialError::NoAddresses + | DialError::WrongPeerId { .. }), + .. + }) => { + let reason = error.to_string(); // We can only forward the string repr but it is better than nothing. + + Shared::lock(&self.shared).on_dial_failure(peer_id, reason) + } + _ => {} + } + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + void::unreachable(event); + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + if let Poll::Ready(Some(peer)) = self.dial_receiver.poll_next_unpin(cx) { + return Poll::Ready(ToSwarm::Dial { + opts: DialOpts::peer_id(peer) + .condition(PeerCondition::DisconnectedAndNotDialing) + .build(), + }); + } + + Poll::Pending + } +} diff --git a/protocols/stream/src/control.rs b/protocols/stream/src/control.rs new file mode 100644 index 000000000000..6aabaaff30ee --- /dev/null +++ b/protocols/stream/src/control.rs @@ -0,0 +1,124 @@ +use core::fmt; +use std::{ + io, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use crate::AlreadyRegistered; +use crate::{handler::NewStream, shared::Shared}; + +use futures::{ + channel::{mpsc, oneshot}, + SinkExt as _, StreamExt as _, +}; +use libp2p_identity::PeerId; +use libp2p_swarm::{Stream, StreamProtocol}; + +/// A (remote) control for opening new streams and registration of inbound protocols. +/// +/// A [`Control`] can be cloned and thus allows for concurrent access. +#[derive(Clone)] +pub struct Control { + shared: Arc>, +} + +impl Control { + pub(crate) fn new(shared: Arc>) -> Self { + Self { shared } + } + + /// Attempt to open a new stream for the given protocol and peer. + /// + /// In case we are currently not connected to the peer, we will attempt to make a new connection. + /// + /// ## Backpressure + /// + /// [`Control`]s support backpressure similarly to bounded channels: + /// Each [`Control`] has a guaranteed slot for internal messages. + /// A single control will always open one stream at a time which is enforced by requiring `&mut self`. + /// + /// This backpressure mechanism breaks if you clone [`Control`]s excessively. + pub async fn open_stream( + &mut self, + peer: PeerId, + protocol: StreamProtocol, + ) -> Result { + tracing::debug!(%peer, "Requesting new stream"); + + let mut new_stream_sender = Shared::lock(&self.shared).sender(peer); + + let (sender, receiver) = oneshot::channel(); + + new_stream_sender + .send(NewStream { protocol, sender }) + .await + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionReset, e))?; + + let stream = receiver + .await + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionReset, e))??; + + Ok(stream) + } + + /// Accept inbound streams for the provided protocol. + /// + /// To stop accepting streams, simply drop the returned [`IncomingStreams`] handle. + pub fn accept( + &mut self, + protocol: StreamProtocol, + ) -> Result { + Shared::lock(&self.shared).accept(protocol) + } +} + +/// Errors while opening a new stream. +#[derive(Debug)] +#[non_exhaustive] +pub enum OpenStreamError { + /// The remote does not support the requested protocol. + UnsupportedProtocol(StreamProtocol), + /// IO Error that occurred during the protocol handshake. + Io(std::io::Error), +} + +impl From for OpenStreamError { + fn from(v: std::io::Error) -> Self { + Self::Io(v) + } +} + +impl fmt::Display for OpenStreamError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + OpenStreamError::UnsupportedProtocol(p) => { + write!(f, "failed to open stream: remote peer does not support {p}") + } + OpenStreamError::Io(e) => { + write!(f, "failed to open stream: io error: {e}") + } + } + } +} + +/// A handle to inbound streams for a particular protocol. +#[must_use = "Streams do nothing unless polled."] +pub struct IncomingStreams { + receiver: mpsc::Receiver<(PeerId, Stream)>, +} + +impl IncomingStreams { + pub(crate) fn new(receiver: mpsc::Receiver<(PeerId, Stream)>) -> Self { + Self { receiver } + } +} + +impl futures::Stream for IncomingStreams { + type Item = (PeerId, Stream); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.receiver.poll_next_unpin(cx) + } +} diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs new file mode 100644 index 000000000000..f63b93c17612 --- /dev/null +++ b/protocols/stream/src/handler.rs @@ -0,0 +1,165 @@ +use std::{ + io, + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use futures::{ + channel::{mpsc, oneshot}, + StreamExt as _, +}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + self as swarm, + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, Stream, StreamProtocol, +}; + +use crate::{shared::Shared, upgrade::Upgrade, OpenStreamError}; + +pub struct Handler { + remote: PeerId, + shared: Arc>, + + receiver: mpsc::Receiver, + pending_upgrade: Option<( + StreamProtocol, + oneshot::Sender>, + )>, +} + +impl Handler { + pub(crate) fn new( + remote: PeerId, + shared: Arc>, + receiver: mpsc::Receiver, + ) -> Self { + Self { + shared, + receiver, + pending_upgrade: None, + remote, + } + } +} + +impl ConnectionHandler for Handler { + type FromBehaviour = void::Void; + type ToBehaviour = void::Void; + type InboundProtocol = Upgrade; + type OutboundProtocol = Upgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = (); + + fn listen_protocol( + &self, + ) -> swarm::SubstreamProtocol { + swarm::SubstreamProtocol::new( + Upgrade { + supported_protocols: Shared::lock(&self.shared).supported_inbound_protocols(), + }, + (), + ) + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll< + swarm::ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::ToBehaviour, + >, + > { + if self.pending_upgrade.is_some() { + return Poll::Pending; + } + + match self.receiver.poll_next_unpin(cx) { + Poll::Ready(Some(new_stream)) => { + self.pending_upgrade = Some((new_stream.protocol.clone(), new_stream.sender)); + return Poll::Ready(swarm::ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: swarm::SubstreamProtocol::new( + Upgrade { + supported_protocols: vec![new_stream.protocol], + }, + (), + ), + }); + } + Poll::Ready(None) => {} // Sender is gone, no more work to do. + Poll::Pending => {} + } + + Poll::Pending + } + + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + void::unreachable(event) + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: (stream, protocol), + info: (), + }) => { + Shared::lock(&self.shared).on_inbound_stream(self.remote, stream, protocol); + } + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol: (stream, actual_protocol), + info: (), + }) => { + let Some((expected_protocol, sender)) = self.pending_upgrade.take() else { + debug_assert!( + false, + "Negotiated an outbound stream without a back channel" + ); + return; + }; + debug_assert_eq!(expected_protocol, actual_protocol); + + let _ = sender.send(Ok(stream)); + } + ConnectionEvent::DialUpgradeError(DialUpgradeError { error, info: () }) => { + let Some((p, sender)) = self.pending_upgrade.take() else { + debug_assert!( + false, + "Received a `DialUpgradeError` without a back channel" + ); + return; + }; + + let error = match error { + swarm::StreamUpgradeError::Timeout => { + OpenStreamError::Io(io::Error::from(io::ErrorKind::TimedOut)) + } + swarm::StreamUpgradeError::Apply(v) => void::unreachable(v), + swarm::StreamUpgradeError::NegotiationFailed => { + OpenStreamError::UnsupportedProtocol(p) + } + swarm::StreamUpgradeError::Io(io) => OpenStreamError::Io(io), + }; + + let _ = sender.send(Err(error)); + } + _ => {} + } + } +} + +/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to negotiate a new outbound stream. +#[derive(Debug)] +pub(crate) struct NewStream { + pub(crate) protocol: StreamProtocol, + pub(crate) sender: oneshot::Sender>, +} diff --git a/protocols/stream/src/lib.rs b/protocols/stream/src/lib.rs new file mode 100644 index 000000000000..d498a1b71e56 --- /dev/null +++ b/protocols/stream/src/lib.rs @@ -0,0 +1,10 @@ +#![doc = include_str!("../README.md")] + +mod behaviour; +mod control; +mod handler; +mod shared; +mod upgrade; + +pub use behaviour::{AlreadyRegistered, Behaviour}; +pub use control::{Control, IncomingStreams, OpenStreamError}; diff --git a/protocols/stream/src/shared.rs b/protocols/stream/src/shared.rs new file mode 100644 index 000000000000..48aa6613d833 --- /dev/null +++ b/protocols/stream/src/shared.rs @@ -0,0 +1,167 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + io, + sync::{Arc, Mutex, MutexGuard}, +}; + +use futures::channel::mpsc; +use libp2p_identity::PeerId; +use libp2p_swarm::{ConnectionId, Stream, StreamProtocol}; +use rand::seq::IteratorRandom as _; + +use crate::{handler::NewStream, AlreadyRegistered, IncomingStreams}; + +pub(crate) struct Shared { + /// Tracks the supported inbound protocols created via [`Control::accept`](crate::Control::accept). + /// + /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the [`mpsc::Receiver`] in [`IncomingStreams`]. + supported_inbound_protocols: HashMap>, + + connections: HashMap, + senders: HashMap>, + + /// Tracks channel pairs for a peer whilst we are dialing them. + pending_channels: HashMap, mpsc::Receiver)>, + + /// Sender for peers we want to dial. + /// + /// We manage this through a channel to avoid locks as part of [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). + dial_sender: mpsc::Sender, +} + +impl Shared { + pub(crate) fn lock(shared: &Arc>) -> MutexGuard<'_, Shared> { + shared.lock().unwrap_or_else(|e| e.into_inner()) + } +} + +impl Shared { + pub(crate) fn new(dial_sender: mpsc::Sender) -> Self { + Self { + dial_sender, + connections: Default::default(), + senders: Default::default(), + pending_channels: Default::default(), + supported_inbound_protocols: Default::default(), + } + } + + pub(crate) fn accept( + &mut self, + protocol: StreamProtocol, + ) -> Result { + if self.supported_inbound_protocols.contains_key(&protocol) { + return Err(AlreadyRegistered); + } + + let (sender, receiver) = mpsc::channel(0); + self.supported_inbound_protocols + .insert(protocol.clone(), sender); + + Ok(IncomingStreams::new(receiver)) + } + + /// Lists the protocols for which we have an active [`IncomingStreams`] instance. + pub(crate) fn supported_inbound_protocols(&mut self) -> Vec { + self.supported_inbound_protocols + .retain(|_, sender| !sender.is_closed()); + + self.supported_inbound_protocols.keys().cloned().collect() + } + + pub(crate) fn on_inbound_stream( + &mut self, + remote: PeerId, + stream: Stream, + protocol: StreamProtocol, + ) { + match self.supported_inbound_protocols.entry(protocol.clone()) { + Entry::Occupied(mut entry) => match entry.get_mut().try_send((remote, stream)) { + Ok(()) => {} + Err(e) if e.is_full() => { + tracing::debug!(%protocol, "Channel is full, dropping inbound stream"); + } + Err(e) if e.is_disconnected() => { + tracing::debug!(%protocol, "Channel is gone, dropping inbound stream"); + entry.remove(); + } + _ => unreachable!(), + }, + Entry::Vacant(_) => { + tracing::debug!(%protocol, "channel is gone, dropping inbound stream"); + } + } + } + + pub(crate) fn on_connection_established(&mut self, conn: ConnectionId, peer: PeerId) { + self.connections.insert(conn, peer); + } + + pub(crate) fn on_connection_closed(&mut self, conn: ConnectionId) { + self.connections.remove(&conn); + } + + pub(crate) fn on_dial_failure(&mut self, peer: PeerId, reason: String) { + let Some((_, mut receiver)) = self.pending_channels.remove(&peer) else { + return; + }; + + while let Ok(Some(new_stream)) = receiver.try_next() { + let _ = new_stream + .sender + .send(Err(crate::OpenStreamError::Io(io::Error::new( + io::ErrorKind::NotConnected, + reason.clone(), + )))); + } + } + + pub(crate) fn sender(&mut self, peer: PeerId) -> mpsc::Sender { + let maybe_sender = self + .connections + .iter() + .filter_map(|(c, p)| (p == &peer).then_some(c)) + .choose(&mut rand::thread_rng()) + .and_then(|c| self.senders.get(c)); + + match maybe_sender { + Some(sender) => { + tracing::debug!("Returning sender to existing connection"); + + sender.clone() + } + None => { + tracing::debug!(%peer, "Not connected to peer, initiating dial"); + + let (sender, _) = self + .pending_channels + .entry(peer) + .or_insert_with(|| mpsc::channel(0)); + + let _ = self.dial_sender.try_send(peer); + + sender.clone() + } + } + } + + pub(crate) fn receiver( + &mut self, + peer: PeerId, + connection: ConnectionId, + ) -> mpsc::Receiver { + if let Some((sender, receiver)) = self.pending_channels.remove(&peer) { + tracing::debug!(%peer, %connection, "Returning existing pending receiver"); + + self.senders.insert(connection, sender); + return receiver; + } + + tracing::debug!(%peer, %connection, "Creating new channel pair"); + + let (sender, receiver) = mpsc::channel(0); + self.senders.insert(connection, sender); + + receiver + } +} diff --git a/protocols/stream/src/upgrade.rs b/protocols/stream/src/upgrade.rs new file mode 100644 index 000000000000..ac9fb3ed992c --- /dev/null +++ b/protocols/stream/src/upgrade.rs @@ -0,0 +1,42 @@ +use std::future::{ready, Ready}; + +use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_swarm::{Stream, StreamProtocol}; + +pub struct Upgrade { + pub(crate) supported_protocols: Vec, +} + +impl UpgradeInfo for Upgrade { + type Info = StreamProtocol; + + type InfoIter = std::vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.supported_protocols.clone().into_iter() + } +} + +impl InboundUpgrade for Upgrade { + type Output = (Stream, StreamProtocol); + + type Error = void::Void; + + type Future = Ready>; + + fn upgrade_inbound(self, socket: Stream, info: Self::Info) -> Self::Future { + ready(Ok((socket, info))) + } +} + +impl OutboundUpgrade for Upgrade { + type Output = (Stream, StreamProtocol); + + type Error = void::Void; + + type Future = Ready>; + + fn upgrade_outbound(self, socket: Stream, info: Self::Info) -> Self::Future { + ready(Ok((socket, info))) + } +} diff --git a/protocols/stream/tests/lib.rs b/protocols/stream/tests/lib.rs new file mode 100644 index 000000000000..cd6caaced5e5 --- /dev/null +++ b/protocols/stream/tests/lib.rs @@ -0,0 +1,80 @@ +use std::io; + +use futures::{AsyncReadExt as _, AsyncWriteExt as _, StreamExt as _}; +use libp2p_identity::PeerId; +use libp2p_stream as stream; +use libp2p_swarm::{StreamProtocol, Swarm}; +use libp2p_swarm_test::SwarmExt as _; +use stream::OpenStreamError; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; + +const PROTOCOL: StreamProtocol = StreamProtocol::new("/test"); + +#[tokio::test] +async fn dropping_incoming_streams_deregisters() { + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env() + .unwrap(), + ) + .with_test_writer() + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|_| stream::Behaviour::new()); + let mut swarm2 = Swarm::new_ephemeral(|_| stream::Behaviour::new()); + + let mut control = swarm1.behaviour().new_control(); + let mut incoming = swarm2.behaviour().new_control().accept(PROTOCOL).unwrap(); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + let swarm2_peer_id = *swarm2.local_peer_id(); + + let handle = tokio::spawn(async move { + while let Some((_, mut stream)) = incoming.next().await { + stream.write_all(&[42]).await.unwrap(); + stream.close().await.unwrap(); + } + }); + tokio::spawn(swarm1.loop_on_next()); + tokio::spawn(swarm2.loop_on_next()); + + let mut stream = control.open_stream(swarm2_peer_id, PROTOCOL).await.unwrap(); + + let mut buf = [0u8; 1]; + stream.read_exact(&mut buf).await.unwrap(); + assert_eq!([42], buf); + + handle.abort(); + let _ = handle.await; + + let error = control + .open_stream(swarm2_peer_id, PROTOCOL) + .await + .unwrap_err(); + assert!(matches!(error, OpenStreamError::UnsupportedProtocol(_))); +} + +#[tokio::test] +async fn dial_errors_are_propagated() { + let swarm1 = Swarm::new_ephemeral(|_| stream::Behaviour::new()); + + let mut control = swarm1.behaviour().new_control(); + tokio::spawn(swarm1.loop_on_next()); + + let error = control + .open_stream(PeerId::random(), PROTOCOL) + .await + .unwrap_err(); + + let OpenStreamError::Io(e) = error else { + panic!("Unexpected error: {error}") + }; + + assert_eq!(e.kind(), io::ErrorKind::NotConnected); + assert_eq!("Dial error: no addresses for peer.", e.to_string()); +} diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md index 8ebea5e728ec..ba031264a0fd 100644 --- a/protocols/upnp/CHANGELOG.md +++ b/protocols/upnp/CHANGELOG.md @@ -1,4 +1,11 @@ -## 0.1.1 - unreleased +## 0.2.1 +- Fix a panic caused when dropping `upnp::Behaviour` such as when used together with `Toggle`. + See [PR 5096](https://github.com/libp2p/rust-libp2p/pull/5096). + +## 0.2.0 + + +## 0.1.1 - Fix high CPU usage due to repeated generation of failure events. See [PR 4569](https://github.com/libp2p/rust-libp2p/pull/4569). diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml index 8a3c5e0ee902..a9c9a3c86212 100644 --- a/protocols/upnp/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-upnp" edition = "2021" rust-version = "1.60.0" description = "UPnP support for libp2p transports" -version = "0.1.1" +version = "0.2.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -11,14 +11,14 @@ categories = ["network-programming", "asynchronous"] publish = true [dependencies] -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0.2" -igd-next = "0.14.2" +igd-next = "0.14.3" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } -log = "0.4.19" +tokio = { version = "1.36", default-features = false, features = ["rt"], optional = true } +tracing = "0.1.37" void = "1.0.2" -tokio = { version = "1.29", default-features = false, features = ["rt"], optional = true } [features] tokio = ["igd-next/aio_tokio", "dep:tokio"] diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index 45b82edc562a..a94ef9526dd7 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -39,7 +39,7 @@ use igd_next::PortMappingProtocol; use libp2p_core::{multiaddr, transport::ListenerId, Endpoint, Multiaddr}; use libp2p_swarm::{ derive_prelude::PeerId, dummy, ConnectionDenied, ConnectionId, ExpiredListenAddr, FromSwarm, - NetworkBehaviour, NewListenAddr, PollParameters, ToSwarm, + NetworkBehaviour, NewListenAddr, ToSwarm, }; /// The duration in seconds of a port mapping on the gateway. @@ -175,9 +175,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -190,9 +190,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -252,7 +252,7 @@ impl NetworkBehaviour for Behaviour { Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::NewListenAddr(NewListenAddr { listener_id, @@ -261,7 +261,7 @@ impl NetworkBehaviour for Behaviour { let (addr, protocol) = match multiaddr_to_socketaddr_protocol(multiaddr.clone()) { Ok(addr_port) => addr_port, Err(()) => { - log::debug!("multiaddress not supported for UPnP {multiaddr}"); + tracing::debug!("multiaddress not supported for UPnP {multiaddr}"); return; } }; @@ -271,7 +271,11 @@ impl NetworkBehaviour for Behaviour { .iter() .find(|(mapping, _state)| mapping.internal_addr.port() == addr.port()) { - log::debug!("port from multiaddress {multiaddr} is already being mapped to another multiaddr: {}", mapping.multiaddr); + tracing::debug!( + multiaddress=%multiaddr, + mapped_multiaddress=%mapping.multiaddr, + "port from multiaddress is already being mapped" + ); return; } @@ -302,9 +306,9 @@ impl NetworkBehaviour for Behaviour { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -312,14 +316,17 @@ impl NetworkBehaviour for Behaviour { self.mappings.insert(mapping, MappingState::Pending); } GatewayState::GatewayNotFound => { - log::debug!( - "network gateway not found, UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddres=%multiaddr, + "network gateway not found, UPnP port mapping of multiaddres discarded" ); } GatewayState::NonRoutableGateway(addr) => { - log::debug!( - "the network gateway is not exposed to the public network, \ - it's ip is {addr}. UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddress=%multiaddr, + network_gateway_ip=%addr, + "the network gateway is not exposed to the public network. / + UPnP port mapping of multiaddress discarded" ); } }; @@ -334,9 +341,9 @@ impl NetworkBehaviour for Behaviour { .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } @@ -344,17 +351,7 @@ impl NetworkBehaviour for Behaviour { } } } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrConfirmed(_) - | FromSwarm::ExternalAddrExpired(_) => {} + _ => {} } } @@ -367,10 +364,10 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _params: &mut impl PollParameters, ) -> Poll>> { // If there are pending addresses to be emitted we emit them. if let Some(event) = self.pending_events.pop_front() { @@ -388,9 +385,9 @@ impl NetworkBehaviour for Behaviour { if !is_addr_global(gateway.external_addr) { self.state = GatewayState::NonRoutableGateway(gateway.external_addr); - log::debug!( - "the gateway is not routable, its address is {}", - gateway.external_addr + tracing::debug!( + gateway_address=%gateway.external_addr, + "the gateway is not routable" ); return Poll::Ready(ToSwarm::GenerateEvent( Event::NonRoutableGateway, @@ -399,7 +396,7 @@ impl NetworkBehaviour for Behaviour { self.state = GatewayState::Available(gateway); } Err(err) => { - log::debug!("could not find gateway: {err}"); + tracing::debug!("could not find gateway: {err}"); self.state = GatewayState::GatewayNotFound; return Poll::Ready(ToSwarm::GenerateEvent(Event::GatewayNotFound)); } @@ -427,20 +424,20 @@ impl NetworkBehaviour for Behaviour { self.pending_events.push_back(Event::NewExternalAddr( external_multiaddr.clone(), )); - log::debug!( - "succcessfully mapped UPnP {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully mapped UPnP for protocol" ); return Poll::Ready(ToSwarm::ExternalAddrConfirmed( external_multiaddr, )); } MappingState::Active(_) => { - log::debug!( - "succcessfully renewed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully renewed UPnP mapping for protocol" ); } _ => unreachable!(), @@ -453,10 +450,10 @@ impl NetworkBehaviour for Behaviour { .expect("mapping should exist") { MappingState::Active(_) => { - log::debug!( - "failed to remap UPnP mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to remap UPnP mapped for protocol: {err}" ); let external_multiaddr = mapping.external_addr(gateway.external_addr); @@ -468,10 +465,10 @@ impl NetworkBehaviour for Behaviour { )); } MappingState::Pending => { - log::debug!( - "failed to map upnp mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to map UPnP mapped for protocol: {err}" ); } _ => { @@ -480,28 +477,28 @@ impl NetworkBehaviour for Behaviour { } } GatewayEvent::Removed(mapping) => { - log::debug!( - "succcessfully removed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully removed UPnP mapping for protocol" ); self.mappings .remove(&mapping) .expect("mapping should exist"); } GatewayEvent::RemovalFailure(mapping, err) => { - log::debug!( - "could not remove UPnP mapping {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "could not remove UPnP mapping for protocol: {err}" ); if let Err(err) = gateway .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs index c6a40182b33c..9c8b2cafef94 100644 --- a/protocols/upnp/src/tokio.rs +++ b/protocols/upnp/src/tokio.rs @@ -100,9 +100,7 @@ pub(crate) fn search_gateway() -> oneshot::Receiver gateway, Err(err) => { - search_result_sender - .send(Err(err.into())) - .expect("receiver shouldn't have been dropped"); + let _ = search_result_sender.send(Err(err.into())); return; } }; @@ -110,20 +108,22 @@ pub(crate) fn search_gateway() -> oneshot::Receiver addr, Err(err) => { - search_result_sender - .send(Err(err.into())) - .expect("receiver shouldn't have been dropped"); + let _ = search_result_sender.send(Err(err.into())); return; } }; - search_result_sender + // Check if receiver dropped. + if search_result_sender .send(Ok(Gateway { sender: events_sender, receiver: events_queue, external_addr, })) - .expect("receiver shouldn't have been dropped"); + .is_err() + { + return; + } loop { // The task sender has dropped so we can return. diff --git a/scripts/add-changelog-header.sh b/scripts/add-changelog-header.sh new file mode 100755 index 000000000000..4717940c8d77 --- /dev/null +++ b/scripts/add-changelog-header.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +header=$(head -n 1 "$CRATE_ROOT/CHANGELOG.md") +prefix="## $NEW_VERSION" + +if [[ $header == $prefix* ]]; then + exit +fi + +sed -i "1i ## ${NEW_VERSION}\n\n" "$CRATE_ROOT/CHANGELOG.md" diff --git a/scripts/ensure-version-bump-and-changelog.sh b/scripts/ensure-version-bump-and-changelog.sh new file mode 100755 index 000000000000..a7a0992005ab --- /dev/null +++ b/scripts/ensure-version-bump-and-changelog.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -ex; + +MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') +DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") + +MERGE_BASE=$(git merge-base "$HEAD_SHA" "$PR_BASE") # Find the merge base. This ensures we only diff what was actually added in the PR. + +SRC_DIFF_TO_BASE=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") +CHANGELOG_DIFF=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-only -- "$DIR_TO_CRATE/CHANGELOG.md") + +# If the source files of this crate weren't touched in this PR, exit early. +if [ -z "$SRC_DIFF_TO_BASE" ]; then + exit 0; +fi + +# Code was touched, ensure changelog is updated too. +if [ -z "$CHANGELOG_DIFF" ]; then + echo "Files in $DIR_TO_CRATE have changed, please write a changelog entry in $DIR_TO_CRATE/CHANGELOG.md" + exit 1 +fi + +# Code was touched, ensure the version used in the manifest hasn't been released yet. +if git tag | grep -q "^$CRATE-v${CRATE_VERSION}$"; then + echo "v$CRATE_VERSION of '$CRATE' has already been released, please bump the version." + exit 1 +fi diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 3a33771b0991..55f5e571664e 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -1,4 +1,27 @@ -## 0.33.0 +## 0.34.3 + +- Generate code for `libp2p-swarm`'s `FromSwarm::NewExternalAddrOfPeer` enum variant. + See [PR 4371](https://github.com/libp2p/rust-libp2p/pull/4371). + +## 0.34.2 + +- Restore support for generic constraints on behaviours combined with `out_event` generated by `NetworkBehaviour` where no where clause is used. + See [PR 5003](https://github.com/libp2p/rust-libp2p/pull/5003). + +## 0.34.1 + +- Always forward all variants of `FromSwarm`. + See [PR 4825](https://github.com/libp2p/rust-libp2p/pull/4825). + +## 0.34.0 + +- Adapt to interface changes in `libp2p-swarm`. + See [PR 4706](https://github.com/libp2p/rust-libp2p/pull/4076). +- Remove supported for deprecated `#[behaviour(out_event = "...")]`. + To same functionality is available using `#[behaviour(to_swarm = "...")]` + See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). + +## 0.33.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 77589a1956b9..32f35a701081 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm-derive" edition = "2021" rust-version = { workspace = true } description = "Procedural macros of libp2p-swarm" -version = "0.33.0" +version = "0.34.3" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,8 +16,7 @@ proc-macro = true [dependencies] heck = "0.4" quote = "1.0" -syn = { version = "2.0.38", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } -proc-macro-warning = "0.4.2" +syn = { version = "2.0.49", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } proc-macro2 = "1.0" # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index e54cd058dafe..2e7daf7acc45 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -28,7 +28,6 @@ use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; use syn::punctuated::Punctuated; -use syn::spanned::Spanned; use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token}; /// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See @@ -61,7 +60,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result syn::Result syn::Result syn::Result quote! { - self.#i.on_swarm_event(#from_swarm::ConnectionEstablished(#connection_established { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })); + self.#i.on_swarm_event(event); }, None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ConnectionEstablished(#connection_established { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::AddressChange variant`. - let on_address_change_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::AddressChange(#address_change { - peer_id, - connection_id, - old, - new, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::AddressChange(#address_change { - peer_id, - connection_id, - old, - new, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ConnectionClosed` variant. - let on_connection_closed_stmts = { - data_struct - .fields - .iter() - .enumerate() - // The outmost handler belongs to the last behaviour. - .rev() - .enumerate() - .map(|(enum_n, (field_n, field))| { - let handler = if field_n == 0 { - // Given that the iterator is reversed, this is the innermost handler only. - quote! { let handler = handlers } - } else { - quote! { - let (handlers, handler) = handlers.into_inner() - } - }; - let inject = match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ConnectionClosed(#connection_closed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::ConnectionClosed(#connection_closed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - }, - }; - - quote! { - #handler; - #inject; - } - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::DialFailure` variant. - let on_dial_failure_stmts = data_struct - .fields - .iter() - .enumerate() - .map(|(enum_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::DialFailure(#dial_failure { - peer_id, - connection_id, - error, - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::DialFailure(#dial_failure { - peer_id, - connection_id, - error, - })); - }, - }); - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenFailure` variant. - let on_listen_failure_stmts = data_struct - .fields - .iter() - .enumerate() - .map(|(enum_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenFailure(#listen_failure { - local_addr, - send_back_addr, - connection_id, - error - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::ListenFailure(#listen_failure { - local_addr, - send_back_addr, - connection_id, - error - })); - }, - }); - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewListener` variant. - let on_new_listener_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewListener(#new_listener { - listener_id, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewListener(#new_listener { - listener_id, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewListenAddr` variant. - let on_new_listen_addr_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewListenAddr(#new_listen_addr { - listener_id, - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewListenAddr(#new_listen_addr { - listener_id, - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ExpiredListenAddr` variant. - let on_expired_listen_addr_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ExpiredListenAddr(#expired_listen_addr { - listener_id, - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ExpiredListenAddr(#expired_listen_addr { - listener_id, - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewExternalAddr` variant. - let on_new_external_addr_candidate_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewExternalAddrCandidate(#new_external_addr_candidate { - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewExternalAddrCandidate(#new_external_addr_candidate { - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ExternalAddrExpired` variant. - let on_external_addr_expired_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ExternalAddrExpired(#external_addr_expired { - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ExternalAddrExpired(#external_addr_expired { - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ExternalAddrConfirmed` variant. - let on_external_addr_confirmed_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ExternalAddrConfirmed(#external_addr_confirmed { - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ExternalAddrConfirmed(#external_addr_confirmed { - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenerError` variant. - let on_listener_error_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenerError(#listener_error { - listener_id, - err, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ListenerError(#listener_error { - listener_id, - err, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenerClosed` variant. - let on_listener_closed_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenerClosed(#listener_closed { - listener_id, - reason, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ListenerClosed(#listener_closed { - listener_id, - reason, - })); + self.#field_n.on_swarm_event(event); }, }) }; @@ -688,79 +361,47 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result { - return std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(#into_out_event)) - } - } - }; + let map_in_event = quote! { |event| #wrapped_event }; - quote!{ - match #trait_to_impl::poll(&mut self.#field, cx, poll_params) { - #generate_event_match_arm - std::task::Poll::Ready(#network_behaviour_action::Dial { opts }) => { - return std::task::Poll::Ready(#network_behaviour_action::Dial { opts }); - } - std::task::Poll::Ready(#network_behaviour_action::ListenOn { opts }) => { - return std::task::Poll::Ready(#network_behaviour_action::ListenOn { opts }); - } - std::task::Poll::Ready(#network_behaviour_action::RemoveListener { id }) => { - return std::task::Poll::Ready(#network_behaviour_action::RemoveListener { id }); - } - std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { peer_id, handler, event }) => { - return std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { - peer_id, - handler, - event: #wrapped_event, - }); - } - std::task::Poll::Ready(#network_behaviour_action::NewExternalAddrCandidate(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::NewExternalAddrCandidate(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::ExternalAddrConfirmed(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::ExternalAddrConfirmed(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::ExternalAddrExpired(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::ExternalAddrExpired(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }) => { - return std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }); + quote! { + match #trait_to_impl::poll(&mut self.#field, cx) { + std::task::Poll::Ready(e) => return std::task::Poll::Ready(e.map_out(#map_out_event).map_in(#map_in_event)), + std::task::Poll::Pending => {}, } - std::task::Poll::Pending => {}, } - } - }); + }); let out_event_reference = if out_event_definition.is_some() { quote! { #out_event_name #ty_generics } @@ -770,8 +411,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result syn::Result std::task::Poll<#network_behaviour_action>> { - use #prelude_path::futures::*; + fn poll(&mut self, cx: &mut std::task::Context) -> std::task::Poll<#network_behaviour_action>> { #(#poll_stmts)* std::task::Poll::Pending } - fn on_swarm_event(&mut self, event: #from_swarm) { - match event { - #from_swarm::ConnectionEstablished( - #connection_established { peer_id, connection_id, endpoint, failed_addresses, other_established }) - => { #(#on_connection_established_stmts)* } - #from_swarm::AddressChange( - #address_change { peer_id, connection_id, old, new }) - => { #(#on_address_change_stmts)* } - #from_swarm::ConnectionClosed( - #connection_closed { peer_id, connection_id, endpoint, handler: handlers, remaining_established }) - => { #(#on_connection_closed_stmts)* } - #from_swarm::DialFailure( - #dial_failure { peer_id, connection_id, error }) - => { #(#on_dial_failure_stmts)* } - #from_swarm::ListenFailure( - #listen_failure { local_addr, send_back_addr, connection_id, error }) - => { #(#on_listen_failure_stmts)* } - #from_swarm::NewListener( - #new_listener { listener_id }) - => { #(#on_new_listener_stmts)* } - #from_swarm::NewListenAddr( - #new_listen_addr { listener_id, addr }) - => { #(#on_new_listen_addr_stmts)* } - #from_swarm::ExpiredListenAddr( - #expired_listen_addr { listener_id, addr }) - => { #(#on_expired_listen_addr_stmts)* } - #from_swarm::NewExternalAddrCandidate( - #new_external_addr_candidate { addr }) - => { #(#on_new_external_addr_candidate_stmts)* } - #from_swarm::ExternalAddrExpired( - #external_addr_expired { addr }) - => { #(#on_external_addr_expired_stmts)* } - #from_swarm::ExternalAddrConfirmed( - #external_addr_confirmed { addr }) - => { #(#on_external_addr_confirmed_stmts)* } - #from_swarm::ListenerError( - #listener_error { listener_id, err }) - => { #(#on_listener_error_stmts)* } - #from_swarm::ListenerClosed( - #listener_closed { listener_id, reason }) - => { #(#on_listener_closed_stmts)* } - _ => {} - } + fn on_swarm_event(&mut self, event: #from_swarm) { + #(#on_swarm_event_stmts)* } } }; @@ -895,7 +492,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result, - deprecation_tokenstream: proc_macro2::TokenStream, } /// Parses the `value` of a key=value pair in the `#[behaviour]` attribute into the requested type. @@ -903,7 +499,6 @@ fn parse_attributes(ast: &DeriveInput) -> syn::Result { let mut attributes = BehaviourAttributes { prelude_path: syn::parse_quote! { ::libp2p::swarm::derive_prelude }, user_specified_out_event: None, - deprecation_tokenstream: proc_macro2::TokenStream::new(), }; for attr in ast @@ -923,16 +518,6 @@ fn parse_attributes(ast: &DeriveInput) -> syn::Result { } if meta.path().is_ident("to_swarm") || meta.path().is_ident("out_event") { - if meta.path().is_ident("out_event") { - let warning = proc_macro_warning::FormattedWarning::new_deprecated( - "out_event_renamed_to_to_swarm", - "The `out_event` attribute has been renamed to `to_swarm`.", - meta.span(), - ); - - attributes.deprecation_tokenstream = quote::quote! { #warning }; - } - let value = meta.require_name_value()?.value.require_str_lit()?; attributes.user_specified_out_event = Some(syn::parse_str(&value)?); diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index e46a94e981ac..95223e602723 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.2.0 +## 0.3.0 + + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index 8c953b2b52f8..ca66e1157463 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-swarm-test" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = { workspace = true } license = "MIT" @@ -12,16 +12,16 @@ categories = ["network-programming", "asynchronous"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.77" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-plaintext = { workspace = true } -libp2p-swarm = { workspace = true } +libp2p-swarm = { workspace = true, features = ["async-std"] } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } -futures = "0.3.28" -log = "0.4.20" +futures = "0.3.30" rand = "0.8.5" +tracing = "0.1.37" futures-timer = "3.0.2" [lints] diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 5cc85728b3a1..48f5bcbf4ef2 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -19,19 +19,18 @@ // DEALINGS IN THE SOFTWARE. use async_trait::async_trait; -use futures::future::Either; -use futures::StreamExt; +use futures::future::{BoxFuture, Either}; +use futures::{FutureExt, StreamExt}; use libp2p_core::{ multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, }; use libp2p_identity::{Keypair, PeerId}; use libp2p_plaintext as plaintext; use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{ - dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, -}; +use libp2p_swarm::{self as swarm, dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_yamux as yamux; use std::fmt::Debug; +use std::future::IntoFuture; use std::time::Duration; /// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. @@ -49,6 +48,10 @@ pub trait SwarmExt { Self: Sized; /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// + /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. + /// By default, this iterator will not yield any addresses. + /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, @@ -65,22 +68,18 @@ pub trait SwarmExt { /// Wait for specified condition to return `Some`. async fn wait(&mut self, predicate: P) -> E where - P: Fn( - SwarmEvent<::ToSwarm, THandlerErr>, - ) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. /// /// The first address is for the memory transport, the second one for the TCP transport. - async fn listen(&mut self) -> (Multiaddr, Multiaddr); + fn listen(&mut self) -> ListenFuture<&mut Self>; /// Returns the next [`SwarmEvent`] or times out after 10 seconds. /// /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::ToSwarm, THandlerErr>; + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm>; /// Returns the next behaviour event or times out after 10 seconds. /// @@ -137,8 +136,8 @@ where TBehaviour2::ToSwarm: Debug, TBehaviour1: NetworkBehaviour + Send, TBehaviour1::ToSwarm: Debug, - SwarmEvent>: TryIntoOutput, - SwarmEvent>: TryIntoOutput, + SwarmEvent: TryIntoOutput, + SwarmEvent: TryIntoOutput, Out1: Debug, Out2: Debug, { @@ -180,15 +179,15 @@ pub trait TryIntoOutput: Sized { fn try_into_output(self) -> Result; } -impl TryIntoOutput for SwarmEvent { +impl TryIntoOutput for SwarmEvent { fn try_into_output(self) -> Result { self.try_into_behaviour_event() } } -impl TryIntoOutput> - for SwarmEvent +impl TryIntoOutput> + for SwarmEvent { - fn try_into_output(self) -> Result, Self> { + fn try_into_output(self) -> Result, Self> { Ok(self) } } @@ -216,9 +215,13 @@ where .timeout(Duration::from_secs(20)) .boxed(); - SwarmBuilder::without_executor(transport, behaviour_fn(identity), peer_id) - .idle_connection_timeout(Duration::from_secs(5)) // Some tests need connections to be kept alive beyond what the individual behaviour configures. - .build() + Swarm::new( + transport, + behaviour_fn(identity), + peer_id, + swarm::Config::with_async_std_executor() + .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + ) } async fn connect(&mut self, other: &mut Swarm) @@ -247,10 +250,16 @@ where listener_done = true; } Either::Left((other, _)) => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); } Either::Right((other, _)) => { - log::debug!("Ignoring event from listener {:?}", other); + tracing::debug!( + listener=?other, + "Ignoring event from listener" + ); } } @@ -268,7 +277,10 @@ where endpoint, peer_id, .. } => (endpoint.get_remote_address() == &addr).then_some(peer_id), other => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); None } }) @@ -277,7 +289,7 @@ where async fn wait(&mut self, predicate: P) -> E where - P: Fn(SwarmEvent<::ToSwarm, THandlerErr>) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send, { loop { @@ -288,58 +300,15 @@ where } } - async fn listen(&mut self) -> (Multiaddr, Multiaddr) { - let memory_addr_listener_id = self.listen_on(Protocol::Memory(0).into()).unwrap(); - - // block until we are actually listening - let memory_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == memory_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // Memory addresses are externally reachable because they all share the same memory-space. - self.add_external_address(memory_multiaddr.clone()); - - let tcp_addr_listener_id = self - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - - let tcp_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == tcp_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // We purposely don't add the TCP addr as an external one because we want to only use the memory transport for making connections in here. - // The TCP transport is only supported for protocols that manage their own connections. - - (memory_multiaddr, tcp_multiaddr) + fn listen(&mut self) -> ListenFuture<&mut Self> { + ListenFuture { + add_memory_external: false, + add_tcp_external: false, + swarm: self, + } } - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::ToSwarm, THandlerErr> { + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm> { match futures::future::select( futures_timer::Delay::new(Duration::from_secs(10)), self.select_next_some(), @@ -348,7 +317,7 @@ where { Either::Left(((), _)) => panic!("Swarm did not emit an event within 10s"), Either::Right((event, _)) => { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); event } @@ -365,7 +334,91 @@ where async fn loop_on_next(mut self) { while let Some(event) = self.next().await { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); + } + } +} + +pub struct ListenFuture { + add_memory_external: bool, + add_tcp_external: bool, + swarm: S, +} + +impl ListenFuture { + /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_memory_addr_external(mut self) -> Self { + self.add_memory_external = true; + + self + } + + /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_tcp_addr_external(mut self) -> Self { + self.add_tcp_external = true; + + self + } +} + +impl<'s, B> IntoFuture for ListenFuture<&'s mut Swarm> +where + B: NetworkBehaviour + Send, + ::ToSwarm: Debug, +{ + type Output = (Multiaddr, Multiaddr); + type IntoFuture = BoxFuture<'s, Self::Output>; + + fn into_future(self) -> Self::IntoFuture { + async move { + let swarm = self.swarm; + + let memory_addr_listener_id = swarm.listen_on(Protocol::Memory(0).into()).unwrap(); + + // block until we are actually listening + let memory_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == memory_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + let tcp_addr_listener_id = swarm + .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + .unwrap(); + + let tcp_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == tcp_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + if self.add_memory_external { + swarm.add_external_address(memory_multiaddr.clone()); + } + if self.add_tcp_external { + swarm.add_external_address(tcp_multiaddr.clone()); + } + + (memory_multiaddr, tcp_multiaddr) } + .boxed() } } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 230134862df6..3d8e3981c96e 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,70 @@ +## 0.44.2 + +- Allow `NetworkBehaviour`s to share addresses of peers. + This is enabled via the new `ToSwarm::NewExternalAddrOfPeer` event. + The address is broadcast to all behaviours via `FromSwarm::NewExternalAddrOfPeer`. + Protocols that want to collect these addresses can use the new `PeerAddresses` utility. + See [PR 4371](https://github.com/libp2p/rust-libp2p/pull/4371). + +## 0.44.1 + +- Implement `Clone` & `Copy` for `FromSwarm. + This makes it easier to forward these events when wrapping other behaviours. + See [PR 4825](https://github.com/libp2p/rust-libp2p/pull/4825). + +## 0.44.0 + +- Add `#[non_exhaustive]` to `FromSwarm`, `ToSwarm`, `SwarmEvent`, `ConnectionHandlerEvent`, `ConnectionEvent`. + See [PR 4581](https://github.com/libp2p/rust-libp2p/pull/4581). +- Remove `handler` field from `ConnectionClosed`. + If you need to transfer state from a `ConnectionHandler` to its `NetworkBehaviour` when a connection closes, use `ConnectionHandler::poll_close`. + See [PR 4076](https://github.com/libp2p/rust-libp2p/pull/4076). +- Remove deprecated `PollParameters` from `NetworkBehaviour::poll` function. + See [PR 4490](https://github.com/libp2p/rust-libp2p/pull/4490). +- Remove deprecated `ConnectionHandlerEvent::Close` and `ConnectionHandler::Error`. + `ConnectionHandler`s should not close connections directly as the connection might still be in use by other handlers. + See [PR 4755](https://github.com/libp2p/rust-libp2p/pull/4755). +- Add `PeerCondition::DisconnectedAndNotDialing` variant, combining pre-existing conditions. + This is the new default. + A new dialing attempt is iniated _only if_ the peer is both considered disconnected and there is currently no ongoing dialing attempt. + See [PR 4225](https://github.com/libp2p/rust-libp2p/pull/4225). +- Remove deprecated `keep_alive_timeout` in `OneShotHandlerConfig`. + See [PR 4677](https://github.com/libp2p/rust-libp2p/pull/4677). +- Don't close entire connection upon `DialUpgradeError`s within `OneShotHandler`. + Instead, the error is reported as `Err(e)` via `ConnectionHandler::ToBehaviour`. + See [PR 4715](https://github.com/libp2p/rust-libp2p/pull/4715). +- Log `PeerId` of `Swarm` even when constructed with new `SwarmBuilder`. + See [PR 4671](https://github.com/libp2p/rust-libp2p/pull/4671). +- Add `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}` variants. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). +- Remove deprecated symbols. + See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). + +## 0.43.7 + +- Deprecate `ConnectionHandlerEvent::Close`. + See [issue 3591](https://github.com/libp2p/rust-libp2p/issues/3591) for details. + See [PR 4714](https://github.com/libp2p/rust-libp2p/pull/4714). + +## 0.43.6 + +- Deprecate `libp2p::swarm::SwarmBuilder`. + Most users should use `libp2p::SwarmBuilder`. + In some special cases, users may need to use `Swarm::new` and `Config` instead of the new `libp2p::SwarmBuilder`. + See [PR 4120]. +- Make the `Debug` implementation of `StreamProtocol` more concise. + See [PR 4631](https://github.com/libp2p/rust-libp2p/pull/4631). +- Fix overflow in `KeepAlive` computation that could occur panic at `Delay::new` if `SwarmBuilder::idle_connection_timeout` is configured too large. + See [PR 4644](https://github.com/libp2p/rust-libp2p/pull/4644). +- Deprecate `KeepAlive::Until`. + Individual protocols should not keep connections alive for longer than necessary. + Users should use `swarm::Config::idle_connection_timeout` instead. + See [PR 4656](https://github.com/libp2p/rust-libp2p/pull/4656). +- Deprecate `keep_alive_timeout` in `OneShotHandlerConfig`. + See [PR 4680](https://github.com/libp2p/rust-libp2p/pull/4680). + +[PR 4120]: https://github.com/libp2p/rust-libp2p/pull/4120 + ## 0.43.5 - Fix overflow in `KeepAlive` computation that could occur if `SwarmBuilder::idle_connection_timeout` is configured with `u64::MAX`. diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 038041c536f7..a700c1c1c5ab 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = { workspace = true } description = "The libp2p swarm" -version = "0.43.5" +version = "0.44.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,24 +13,25 @@ categories = ["network-programming", "asynchronous"] [dependencies] either = "1.9.0" fnv = "1.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0.2" +getrandom = { version = "0.2.12", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-swarm-derive = { workspace = true, optional = true } -log = "0.4" +lru = "0.12.1" +multistream-select = { workspace = true } +once_cell = "1.19.0" rand = "0.8" -smallvec = "1.11.1" +smallvec = "1.12.0" +tracing = "0.1.37" void = "1" -wasm-bindgen-futures = { version = "0.4.37", optional = true } -getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature -once_cell = "1.18.0" -multistream-select = { workspace = true } +wasm-bindgen-futures = { version = "0.4.41", optional = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } -tokio = { version = "1.32", features = ["rt"], optional = true } +tokio = { version = "1.36", features = ["rt"], optional = true } [features] macros = ["dep:libp2p-swarm-derive"] @@ -41,8 +42,7 @@ wasm-bindgen = ["dep:wasm-bindgen-futures", "dep:getrandom"] [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } either = "1.9.0" -env_logger = "0.10" -futures = "0.3.28" +futures = "0.3.30" libp2p-identify = { path = "../protocols/identify" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. libp2p-identity = { workspace = true, features = ["ed25519"] } libp2p-kad = { path = "../protocols/kad" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. @@ -53,9 +53,10 @@ libp2p-swarm-test = { path = "../swarm-test" } # Using `pat libp2p-yamux = { path = "../muxers/yamux" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. quickcheck = { workspace = true } void = "1" -once_cell = "1.18.0" -trybuild = "1.0.85" -tokio = { version = "1.29.1", features = ["time", "rt", "macros"] } +once_cell = "1.19.0" +trybuild = "1.0.89" +tokio = { version = "1.36.0", features = ["time", "rt", "macros", "rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "swarm_derive" diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 3662bf5c48d2..5070871a4c1e 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -21,10 +21,12 @@ mod either; mod external_addresses; mod listen_addresses; +mod peer_addresses; pub mod toggle; pub use external_addresses::ExternalAddresses; pub use listen_addresses::ListenAddresses; +pub use peer_addresses::PeerAddresses; use crate::connection::ConnectionId; use crate::dial_opts::DialOpts; @@ -191,7 +193,7 @@ pub trait NetworkBehaviour: 'static { ) -> Result, ConnectionDenied>; /// Informs the behaviour about an event from the [`Swarm`](crate::Swarm). - fn on_swarm_event(&mut self, event: FromSwarm); + fn on_swarm_event(&mut self, event: FromSwarm); /// Informs the behaviour about an event generated by the [`ConnectionHandler`] /// dedicated to the peer identified by `peer_id`. for the behaviour. @@ -209,34 +211,15 @@ pub trait NetworkBehaviour: 'static { /// /// This API mimics the API of the `Stream` trait. The method may register the current task in /// order to wake it up at a later point in time. - fn poll( - &mut self, - cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>>; -} - -/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to. -pub trait PollParameters { - /// Iterator returned by [`supported_protocols`](PollParameters::supported_protocols). - type SupportedProtocolsIter: ExactSizeIterator>; - - /// Returns the list of protocol the behaviour supports when a remote negotiates a protocol on - /// an inbound substream. - /// - /// The iterator's elements are the ASCII names as reported on the wire. - /// - /// Note that the list is computed once at initialization and never refreshed. - #[deprecated( - note = "Use `libp2p_swarm::SupportedProtocols` in your `ConnectionHandler` instead." - )] - fn supported_protocols(&self) -> Self::SupportedProtocolsIter; + fn poll(&mut self, cx: &mut Context<'_>) + -> Poll>>; } /// A command issued from a [`NetworkBehaviour`] for the [`Swarm`]. /// /// [`Swarm`]: super::Swarm #[derive(Debug)] +#[non_exhaustive] pub enum ToSwarm { /// Instructs the `Swarm` to return an event when it is being polled. GenerateEvent(TOutEvent), @@ -281,15 +264,20 @@ pub enum ToSwarm { event: TInEvent, }, - /// Reports a new candidate for an external address to the [`Swarm`](crate::Swarm). + /// Reports a **new** candidate for an external address to the [`Swarm`](crate::Swarm). + /// + /// The emphasis on a **new** candidate is important. + /// Protocols MUST take care to only emit a candidate once per "source". + /// For example, the observed address of a TCP connection does not change throughout its lifetime. + /// Thus, only one candidate should be emitted per connection. /// + /// This makes the report frequency of an address a meaningful data-point for consumers of this event. /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. /// /// This address could come from a variety of sources: /// - A protocol such as identify obtained it from a remote. /// - The user provided it based on configuration. /// - We made an educated guess based on one of our listen addresses. - /// - We established a new relay connection. NewExternalAddrCandidate(Multiaddr), /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. @@ -304,22 +292,22 @@ pub enum ToSwarm { /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. ExternalAddrExpired(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections - /// with the given peer. + /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. + /// + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. + /// In most cases, stopping to "use" a connection is enough to have it closed. + /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. /// - /// Note: Closing a connection via - /// [`ToSwarm::CloseConnection`] does not inform the - /// corresponding [`ConnectionHandler`]. - /// Closing a connection via a [`ConnectionHandler`] can be done - /// either in a collaborative manner across [`ConnectionHandler`]s - /// with [`ConnectionHandler::connection_keep_alive`] or directly with - /// [`ConnectionHandlerEvent::Close`](crate::ConnectionHandlerEvent::Close). + /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, /// Whether to close a specific or all connections to the given peer. connection: CloseConnection, }, + + /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that to other [`NetworkBehaviour`]s. + NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } impl ToSwarm { @@ -352,6 +340,13 @@ impl ToSwarm { ToSwarm::NewExternalAddrCandidate(addr) => ToSwarm::NewExternalAddrCandidate(addr), ToSwarm::ExternalAddrConfirmed(addr) => ToSwarm::ExternalAddrConfirmed(addr), ToSwarm::ExternalAddrExpired(addr) => ToSwarm::ExternalAddrExpired(addr), + ToSwarm::NewExternalAddrOfPeer { + address: addr, + peer_id, + } => ToSwarm::NewExternalAddrOfPeer { + address: addr, + peer_id, + }, } } } @@ -383,6 +378,13 @@ impl ToSwarm { peer_id, connection, }, + ToSwarm::NewExternalAddrOfPeer { + address: addr, + peer_id, + } => ToSwarm::NewExternalAddrOfPeer { + address: addr, + peer_id, + }, } } } @@ -408,8 +410,9 @@ pub enum CloseConnection { /// Enumeration with the list of the possible events /// to pass to [`on_swarm_event`](NetworkBehaviour::on_swarm_event). -#[derive(Debug)] -pub enum FromSwarm<'a, Handler> { +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub enum FromSwarm<'a> { /// Informs the behaviour about a newly established connection to a peer. ConnectionEstablished(ConnectionEstablished<'a>), /// Informs the behaviour about a closed connection to a peer. @@ -417,7 +420,7 @@ pub enum FromSwarm<'a, Handler> { /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID /// and endpoint. - ConnectionClosed(ConnectionClosed<'a, Handler>), + ConnectionClosed(ConnectionClosed<'a>), /// Informs the behaviour that the [`ConnectedPoint`] of an existing /// connection has changed. AddressChange(AddressChange<'a>), @@ -448,6 +451,8 @@ pub enum FromSwarm<'a, Handler> { ExternalAddrConfirmed(ExternalAddrConfirmed<'a>), /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed. ExternalAddrExpired(ExternalAddrExpired<'a>), + /// Informs the behaviour that we have discovered a new external address for a remote peer. + NewExternalAddrOfPeer(NewExternalAddrOfPeer<'a>), } /// [`FromSwarm`] variant that informs the behaviour about a newly established connection to a peer. @@ -465,12 +470,11 @@ pub struct ConnectionEstablished<'a> { /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID /// and endpoint. -#[derive(Debug)] -pub struct ConnectionClosed<'a, Handler> { +#[derive(Debug, Clone, Copy)] +pub struct ConnectionClosed<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, pub endpoint: &'a ConnectedPoint, - pub handler: Handler, pub remaining_established: usize, } @@ -561,105 +565,9 @@ pub struct ExternalAddrExpired<'a> { pub addr: &'a Multiaddr, } -impl<'a, Handler> FromSwarm<'a, Handler> { - fn map_handler( - self, - map_handler: impl FnOnce(Handler) -> NewHandler, - ) -> FromSwarm<'a, NewHandler> { - self.maybe_map_handler(|h| Some(map_handler(h))) - .expect("To return Some as all closures return Some.") - } - - fn maybe_map_handler( - self, - map_handler: impl FnOnce(Handler) -> Option, - ) -> Option> { - match self { - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - }) => Some(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler: map_handler(handler)?, - remaining_established, - })), - FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - }) => Some(FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })), - FromSwarm::AddressChange(AddressChange { - peer_id, - connection_id, - old, - new, - }) => Some(FromSwarm::AddressChange(AddressChange { - peer_id, - connection_id, - old, - new, - })), - FromSwarm::DialFailure(DialFailure { - peer_id, - error, - connection_id, - }) => Some(FromSwarm::DialFailure(DialFailure { - peer_id, - error, - connection_id, - })), - FromSwarm::ListenFailure(ListenFailure { - local_addr, - send_back_addr, - connection_id, - error, - }) => Some(FromSwarm::ListenFailure(ListenFailure { - local_addr, - send_back_addr, - connection_id, - error, - })), - FromSwarm::NewListener(NewListener { listener_id }) => { - Some(FromSwarm::NewListener(NewListener { listener_id })) - } - FromSwarm::NewListenAddr(NewListenAddr { listener_id, addr }) => { - Some(FromSwarm::NewListenAddr(NewListenAddr { - listener_id, - addr, - })) - } - FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { - Some(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })) - } - FromSwarm::ListenerError(ListenerError { listener_id, err }) => { - Some(FromSwarm::ListenerError(ListenerError { listener_id, err })) - } - FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - }) => Some(FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - })), - FromSwarm::NewExternalAddrCandidate(e) => Some(FromSwarm::NewExternalAddrCandidate(e)), - FromSwarm::ExternalAddrExpired(e) => Some(FromSwarm::ExternalAddrExpired(e)), - FromSwarm::ExternalAddrConfirmed(e) => Some(FromSwarm::ExternalAddrConfirmed(e)), - } - } +/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer was detected. +#[derive(Clone, Copy, Debug)] +pub struct NewExternalAddrOfPeer<'a> { + pub peer_id: PeerId, + pub addr: &'a Multiaddr, } diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index c6e0870d11c7..25da83fa11f6 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{self, NetworkBehaviour, PollParameters, ToSwarm}; +use crate::behaviour::{self, NetworkBehaviour, ToSwarm}; use crate::connection::ConnectionId; use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; use either::Either; @@ -122,16 +122,10 @@ where Ok(handler) } - fn on_swarm_event(&mut self, event: behaviour::FromSwarm) { + fn on_swarm_event(&mut self, event: behaviour::FromSwarm) { match self { - Either::Left(b) => b.on_swarm_event(event.map_handler(|h| match h { - Either::Left(h) => h, - Either::Right(_) => unreachable!(), - })), - Either::Right(b) => b.on_swarm_event(event.map_handler(|h| match h { - Either::Right(h) => h, - Either::Left(_) => unreachable!(), - })), + Either::Left(b) => b.on_swarm_event(event), + Either::Right(b) => b.on_swarm_event(event), } } @@ -155,13 +149,12 @@ where fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, ) -> Poll>> { let event = match self { - Either::Left(behaviour) => futures::ready!(behaviour.poll(cx, params)) + Either::Left(behaviour) => futures::ready!(behaviour.poll(cx)) .map_out(Either::Left) .map_in(Either::Left), - Either::Right(behaviour) => futures::ready!(behaviour.poll(cx, params)) + Either::Right(behaviour) => futures::ready!(behaviour.poll(cx)) .map_out(Either::Right) .map_in(Either::Right), }; diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 307f0f938dde..579f46fe4866 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -25,7 +25,7 @@ impl ExternalAddresses { /// Feed a [`FromSwarm`] event to this struct. /// /// Returns whether the event changed our set of external addresses. - pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { + pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { match event { FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { addr }) => { if let Some(pos) = self @@ -37,7 +37,7 @@ impl ExternalAddresses { self.addresses.remove(pos); self.push_front(addr); - log::debug!("Refreshed external address {addr}"); + tracing::debug!(address=%addr, "Refreshed external address"); return false; // No changes to our external addresses. } @@ -47,7 +47,11 @@ impl ExternalAddresses { if self.addresses.len() > MAX_LOCAL_EXTERNAL_ADDRS { let expired = self.addresses.pop().expect("list to be not empty"); - log::debug!("Removing previously confirmed external address {expired} because we reached the limit of {MAX_LOCAL_EXTERNAL_ADDRS} addresses"); + tracing::debug!( + external_address=%expired, + address_limit=%MAX_LOCAL_EXTERNAL_ADDRS, + "Removing previously confirmed external address because we reached the address limit" + ); } return true; @@ -81,7 +85,6 @@ impl ExternalAddresses { #[cfg(test)] mod tests { use super::*; - use crate::dummy; use libp2p_core::multiaddr::Protocol; use once_cell::sync::Lazy; use rand::Rng; @@ -129,13 +132,9 @@ mod tests { while addresses.as_slice().len() < MAX_LOCAL_EXTERNAL_ADDRS { let random_address = Multiaddr::empty().with(Protocol::Memory(rand::thread_rng().gen_range(0..1000))); - addresses.on_swarm_event( - &FromSwarm::<'_, dummy::ConnectionHandler>::ExternalAddrConfirmed( - ExternalAddrConfirmed { - addr: &random_address, - }, - ), - ); + addresses.on_swarm_event(&FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { + addr: &random_address, + })); } addresses.on_swarm_event(&new_external_addr2()); @@ -158,19 +157,19 @@ mod tests { ); } - fn new_external_addr1() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn new_external_addr1() -> FromSwarm<'static> { FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { addr: &MEMORY_ADDR_1000, }) } - fn new_external_addr2() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn new_external_addr2() -> FromSwarm<'static> { FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { addr: &MEMORY_ADDR_2000, }) } - fn expired_external_addr1() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn expired_external_addr1() -> FromSwarm<'static> { FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr: &MEMORY_ADDR_1000, }) diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs index 8882db64a504..6076f5e7923a 100644 --- a/swarm/src/behaviour/listen_addresses.rs +++ b/swarm/src/behaviour/listen_addresses.rs @@ -17,7 +17,7 @@ impl ListenAddresses { /// Feed a [`FromSwarm`] event to this struct. /// /// Returns whether the event changed our set of listen addresses. - pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { + pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { match event { FromSwarm::NewListenAddr(NewListenAddr { addr, .. }) => { self.addresses.insert((*addr).clone()) @@ -33,7 +33,6 @@ impl ListenAddresses { #[cfg(test)] mod tests { use super::*; - use crate::dummy; use libp2p_core::{multiaddr::Protocol, transport::ListenerId}; use once_cell::sync::Lazy; @@ -60,14 +59,14 @@ mod tests { assert!(!changed) } - fn new_listen_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn new_listen_addr() -> FromSwarm<'static> { FromSwarm::NewListenAddr(NewListenAddr { listener_id: ListenerId::next(), addr: &MEMORY_ADDR, }) } - fn expired_listen_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn expired_listen_addr() -> FromSwarm<'static> { FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id: ListenerId::next(), addr: &MEMORY_ADDR, diff --git a/swarm/src/behaviour/peer_addresses.rs b/swarm/src/behaviour/peer_addresses.rs new file mode 100644 index 000000000000..a011867dcdf1 --- /dev/null +++ b/swarm/src/behaviour/peer_addresses.rs @@ -0,0 +1,338 @@ +use crate::behaviour::FromSwarm; +use crate::{DialError, DialFailure, NewExternalAddrOfPeer}; + +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; + +use lru::LruCache; + +use std::num::NonZeroUsize; + +/// Struct for tracking peers' external addresses of the [`Swarm`](crate::Swarm). +#[derive(Debug)] +pub struct PeerAddresses(LruCache>); + +impl PeerAddresses { + /// Creates a [`PeerAddresses`] cache with capacity for the given number of peers. + /// + /// For each peer, we will at most store 10 addresses. + pub fn new(number_of_peers: NonZeroUsize) -> Self { + Self(LruCache::new(number_of_peers)) + } + + /// Feed a [`FromSwarm`] event to this struct. + /// + /// Returns whether the event changed peer's known external addresses. + pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { + match event { + FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { peer_id, addr }) => { + self.add(*peer_id, (*addr).clone()) + } + FromSwarm::DialFailure(DialFailure { + peer_id: Some(peer_id), + error: DialError::Transport(errors), + .. + }) => { + for (addr, _error) in errors { + self.remove(peer_id, addr); + } + true + } + _ => false, + } + } + + /// Adds address to cache. + /// Appends address to the existing set if peer addresses already exist. + /// Creates a new cache entry for peer_id if no addresses are present. + /// Returns true if the newly added address was not previously in the cache. + /// + pub fn add(&mut self, peer: PeerId, address: Multiaddr) -> bool { + match prepare_addr(&peer, &address) { + Ok(address) => { + if let Some(cached) = self.0.get_mut(&peer) { + cached.put(address, ()).is_none() + } else { + let mut set = LruCache::new(NonZeroUsize::new(10).expect("10 > 0")); + set.put(address, ()); + self.0.put(peer, set); + + true + } + } + Err(_) => false, + } + } + + /// Returns peer's external addresses. + pub fn get(&mut self, peer: &PeerId) -> impl Iterator + '_ { + self.0 + .get(peer) + .into_iter() + .flat_map(|c| c.iter().map(|(m, ())| m)) + .cloned() + } + + /// Removes address from peer addresses cache. + /// Returns true if the address was removed. + pub fn remove(&mut self, peer: &PeerId, address: &Multiaddr) -> bool { + match self.0.get_mut(peer) { + Some(addrs) => match prepare_addr(peer, address) { + Ok(address) => addrs.pop(&address).is_some(), + Err(_) => false, + }, + None => false, + } + } +} + +fn prepare_addr(peer: &PeerId, addr: &Multiaddr) -> Result { + addr.clone().with_p2p(*peer) +} + +impl Default for PeerAddresses { + fn default() -> Self { + Self(LruCache::new(NonZeroUsize::new(100).unwrap())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io; + + use crate::{ConnectionId, DialError}; + use libp2p_core::{ + multiaddr::Protocol, + transport::{memory::MemoryTransportError, TransportError}, + }; + + use once_cell::sync::Lazy; + + #[test] + fn new_peer_addr_returns_correct_changed_value() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + + let event = new_external_addr_of_peer1(peer_id); + + let changed = cache.on_swarm_event(&event); + assert!(changed); + + let changed = cache.on_swarm_event(&event); + assert!(!changed); + } + + #[test] + fn new_peer_addr_saves_peer_addrs() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let event = new_external_addr_of_peer1(peer_id); + + let changed = cache.on_swarm_event(&event); + assert!(changed); + + let addr1 = MEMORY_ADDR_1000.clone().with_p2p(peer_id).unwrap(); + let expected = cache.get(&peer_id).collect::>(); + assert_eq!(expected, vec![addr1]); + + let event = new_external_addr_of_peer2(peer_id); + let changed = cache.on_swarm_event(&event); + + let addr1 = MEMORY_ADDR_1000.clone().with_p2p(peer_id).unwrap(); + let addr2 = MEMORY_ADDR_2000.clone().with_p2p(peer_id).unwrap(); + + let expected_addrs = cache.get(&peer_id).collect::>(); + assert!(expected_addrs.contains(&addr1)); + assert!(expected_addrs.contains(&addr2)); + + let expected = cache.get(&peer_id).collect::>().len(); + assert_eq!(expected, 2); + + assert!(changed); + } + + #[test] + fn existing_addr_is_not_added_to_cache() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + + let event = new_external_addr_of_peer1(peer_id); + + let addr1 = MEMORY_ADDR_1000.clone().with_p2p(peer_id).unwrap(); + let changed = cache.on_swarm_event(&event); + let expected = cache.get(&peer_id).collect::>(); + assert!(changed); + assert_eq!(expected, vec![addr1]); + + let addr1 = MEMORY_ADDR_1000.clone().with_p2p(peer_id).unwrap(); + let changed = cache.on_swarm_event(&event); + let expected = cache.get(&peer_id).collect::>(); + assert!(!changed); + assert_eq!(expected, [addr1]); + } + + #[test] + fn addresses_of_peer_are_removed_when_received_dial_failure() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + let addr2: Multiaddr = "/ip4/127.0.0.1/tcp/8081".parse().unwrap(); + let addr3: Multiaddr = "/ip4/127.0.0.1/tcp/8082".parse().unwrap(); + + cache.add(peer_id, addr.clone()); + cache.add(peer_id, addr2.clone()); + cache.add(peer_id, addr3.clone()); + + let error = DialError::Transport(prepare_errors(vec![addr, addr3])); + + let event = FromSwarm::DialFailure(DialFailure { + peer_id: Some(peer_id), + error: &error, + connection_id: ConnectionId::new_unchecked(8), + }); + + let changed = cache.on_swarm_event(&event); + + assert!(changed); + + let cached = cache.get(&peer_id).collect::>(); + let expected = prepare_expected_addrs(peer_id, [addr2].into_iter()); + + assert_eq!(cached, expected); + } + + #[test] + fn remove_removes_address_if_present() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + + cache.add(peer_id, addr.clone()); + + assert!(cache.remove(&peer_id, &addr)); + } + + #[test] + fn remove_returns_false_if_address_not_present() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + + assert!(!cache.remove(&peer_id, &addr)); + } + + #[test] + fn remove_returns_false_if_peer_not_present() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + + assert!(!cache.remove(&peer_id, &addr)); + } + + #[test] + fn remove_removes_address_provided_in_param() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + let addr2: Multiaddr = "/ip4/127.0.0.1/tcp/8081".parse().unwrap(); + let addr3: Multiaddr = "/ip4/127.0.0.1/tcp/8082".parse().unwrap(); + + cache.add(peer_id, addr.clone()); + cache.add(peer_id, addr2.clone()); + cache.add(peer_id, addr3.clone()); + + assert!(cache.remove(&peer_id, &addr2)); + + let mut cached = cache.get(&peer_id).collect::>(); + cached.sort(); + + let expected = prepare_expected_addrs(peer_id, [addr, addr3].into_iter()); + + assert_eq!(cached, expected); + } + + #[test] + fn add_adds_new_address_to_cache() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + + assert!(cache.add(peer_id, addr.clone())); + + let mut cached = cache.get(&peer_id).collect::>(); + cached.sort(); + let expected = prepare_expected_addrs(peer_id, [addr].into_iter()); + + assert_eq!(cached, expected); + } + + #[test] + fn add_adds_address_to_cache_to_existing_key() { + let mut cache = PeerAddresses::default(); + let peer_id = PeerId::random(); + let addr: Multiaddr = "/ip4/127.0.0.1/tcp/8080".parse().unwrap(); + let addr2: Multiaddr = "/ip4/127.0.0.1/tcp/8081".parse().unwrap(); + let addr3: Multiaddr = "/ip4/127.0.0.1/tcp/8082".parse().unwrap(); + + assert!(cache.add(peer_id, addr.clone())); + + cache.add(peer_id, addr2.clone()); + cache.add(peer_id, addr3.clone()); + + let expected = prepare_expected_addrs(peer_id, [addr, addr2, addr3].into_iter()); + + let mut cached = cache.get(&peer_id).collect::>(); + cached.sort(); + + assert_eq!(cached, expected); + } + + fn prepare_expected_addrs( + peer_id: PeerId, + addrs: impl Iterator, + ) -> Vec { + let mut addrs = addrs + .filter_map(|a| a.with_p2p(peer_id).ok()) + .collect::>(); + addrs.sort(); + addrs + } + + fn new_external_addr_of_peer1(peer_id: PeerId) -> FromSwarm<'static> { + FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { + peer_id, + addr: &MEMORY_ADDR_1000, + }) + } + + fn new_external_addr_of_peer2(peer_id: PeerId) -> FromSwarm<'static> { + FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { + peer_id, + addr: &MEMORY_ADDR_2000, + }) + } + + fn prepare_errors(addrs: Vec) -> Vec<(Multiaddr, TransportError)> { + let errors: Vec<(Multiaddr, TransportError)> = addrs + .iter() + .map(|addr| { + ( + addr.clone(), + TransportError::Other(io::Error::new( + io::ErrorKind::Other, + MemoryTransportError::Unreachable, + )), + ) + }) + .collect(); + errors + } + + static MEMORY_ADDR_1000: Lazy = + Lazy::new(|| Multiaddr::empty().with(Protocol::Memory(1000))); + static MEMORY_ADDR_2000: Lazy = + Lazy::new(|| Multiaddr::empty().with(Protocol::Memory(2000))); +} diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 92bd89635020..e81c53437011 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -22,13 +22,11 @@ use crate::behaviour::FromSwarm; use crate::connection::ConnectionId; use crate::handler::{ AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, ListenUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; use crate::{ - ConnectionDenied, NetworkBehaviour, PollParameters, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use either::Either; use futures::future; @@ -159,11 +157,9 @@ where }) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { if let Some(behaviour) = &mut self.inner { - if let Some(event) = event.maybe_map_handler(|h| h.inner) { - behaviour.on_swarm_event(event); - } + behaviour.on_swarm_event(event); } } @@ -181,10 +177,9 @@ where fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, ) -> Poll>> { if let Some(inner) = self.inner.as_mut() { - inner.poll(cx, params) + inner.poll(cx) } else { Poll::Pending } @@ -269,7 +264,6 @@ where { type FromBehaviour = TInner::FromBehaviour; type ToBehaviour = TInner::ToBehaviour; - type Error = TInner::Error; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = TInner::OutboundProtocol; type OutboundOpenInfo = TInner::OutboundOpenInfo; @@ -293,23 +287,18 @@ where .on_behaviour_event(event) } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner .as_ref() .map(|h| h.connection_keep_alive()) - .unwrap_or(KeepAlive::No) + .unwrap_or(false) } fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(inner) = self.inner.as_mut() { inner.poll(cx) @@ -374,4 +363,12 @@ where } } } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(inner) = self.inner.as_mut() else { + return Poll::Ready(None); + }; + + inner.poll_close(cx) + } } diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 97251039abb2..15c49bb7bd55 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -34,15 +34,15 @@ use crate::handler::{ FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsAdded, ProtocolsChange, UpgradeInfoSend, }; +use crate::stream::ActiveStreamCounter; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; use crate::{ - ConnectionHandlerEvent, KeepAlive, Stream, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, + ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; use futures::future::BoxFuture; use futures::stream::FuturesUnordered; -use futures::FutureExt; use futures::StreamExt; +use futures::{stream, FutureExt}; use futures_timer::Delay; use instant::Instant; use libp2p_core::connection::ConnectedPoint; @@ -52,7 +52,6 @@ use libp2p_core::upgrade; use libp2p_core::upgrade::{NegotiationError, ProtocolError}; use libp2p_core::Endpoint; use libp2p_identity::PeerId; -use std::cmp::max; use std::collections::HashSet; use std::fmt::{Display, Formatter}; use std::future::Future; @@ -157,6 +156,7 @@ where local_supported_protocols: HashSet, remote_supported_protocols: HashSet, idle_timeout: Duration, + stream_counter: ActiveStreamCounter, } impl fmt::Debug for Connection @@ -192,7 +192,6 @@ where ProtocolsChange::Added(ProtocolsAdded::from_set(&initial_protocols)), )); } - Connection { muxing: muxer, handler, @@ -205,6 +204,7 @@ where local_supported_protocols: initial_protocols, remote_supported_protocols: Default::default(), idle_timeout, + stream_counter: ActiveStreamCounter::default(), } } @@ -213,18 +213,32 @@ where self.handler.on_behaviour_event(event); } - /// Begins an orderly shutdown of the connection, returning the connection - /// handler and a `Future` that resolves when connection shutdown is complete. - pub(crate) fn close(self) -> (THandler, impl Future>) { - (self.handler, self.muxing.close()) + /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete. + pub(crate) fn close( + self, + ) -> ( + impl futures::Stream, + impl Future>, + ) { + let Connection { + mut handler, + muxing, + .. + } = self; + + ( + stream::poll_fn(move |cx| handler.poll_close(cx)), + muxing.close(), + ) } /// Polls the handler and the substream, forwarding events from the former to the latter and /// vice versa. + #[tracing::instrument(level = "debug", name = "Connection::poll", skip(self, cx))] pub(crate) fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, ConnectionError>> { + ) -> Poll, ConnectionError>> { let Self { requested_substreams, muxing, @@ -237,6 +251,8 @@ where local_supported_protocols: supported_protocols, remote_supported_protocols, idle_timeout, + stream_counter, + .. } = self.get_mut(); loop { @@ -267,9 +283,6 @@ where Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { return Poll::Ready(Ok(Event::Handler(event))); } - Poll::Ready(ConnectionHandlerEvent::Close(err)) => { - return Poll::Ready(Err(ConnectionError::Handler(err))); - } Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols( ProtocolSupport::Added(protocols), )) => { @@ -331,68 +344,32 @@ where continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Io(e))))) => { - log::debug!("failed to upgrade inbound stream: {e}"); + tracing::debug!("failed to upgrade inbound stream: {e}"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::NegotiationFailed)))) => { - log::debug!("no protocol could be agreed upon for inbound stream"); + tracing::debug!("no protocol could be agreed upon for inbound stream"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Timeout)))) => { - log::debug!("inbound stream upgrade timed out"); + tracing::debug!("inbound stream upgrade timed out"); continue; } } - // Ask the handler whether it wants the connection (and the handler itself) - // to be kept alive, which determines the planned shutdown, if any. - let keep_alive = handler.connection_keep_alive(); - match (&mut *shutdown, keep_alive) { - (Shutdown::Later(timer, deadline), KeepAlive::Until(t)) => { - if *deadline != t { - *deadline = t; - if let Some(new_duration) = deadline.checked_duration_since(Instant::now()) - { - let effective_keep_alive = max(new_duration, *idle_timeout); - - timer.reset(effective_keep_alive) - } - } - } - (_, KeepAlive::Until(earliest_shutdown)) => { - let now = Instant::now(); - - if let Some(requested) = earliest_shutdown.checked_duration_since(now) { - let effective_keep_alive = max(requested, *idle_timeout); - - let safe_keep_alive = checked_add_fraction(now, effective_keep_alive); - - // Important: We store the _original_ `Instant` given by the `ConnectionHandler` in the `Later` instance to ensure we can compare it in the above branch. - // This is quite subtle but will hopefully become simpler soon once `KeepAlive::Until` is fully deprecated. See / - *shutdown = Shutdown::Later(Delay::new(safe_keep_alive), earliest_shutdown) - } - } - (_, KeepAlive::No) if idle_timeout == &Duration::ZERO => { - *shutdown = Shutdown::Asap; - } - (Shutdown::Later(_, _), KeepAlive::No) => { - // Do nothing, i.e. let the shutdown timer continue to tick. - } - (_, KeepAlive::No) => { - let now = Instant::now(); - let safe_keep_alive = checked_add_fraction(now, *idle_timeout); - - *shutdown = Shutdown::Later(Delay::new(safe_keep_alive), now + safe_keep_alive); - } - (_, KeepAlive::Yes) => *shutdown = Shutdown::None, - }; - // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams, shutdown is always postponed. + // As long as we're still negotiating substreams or have any active streams shutdown is always postponed. if negotiating_in.is_empty() && negotiating_out.is_empty() && requested_substreams.is_empty() + && stream_counter.has_no_active_streams() { + if let Some(new_timeout) = + compute_new_shutdown(handler.connection_keep_alive(), shutdown, *idle_timeout) + { + *shutdown = new_timeout; + } + match shutdown { Shutdown::None => {} Shutdown::Asap => return Poll::Ready(Err(ConnectionError::KeepAliveTimeout)), @@ -403,6 +380,8 @@ where Poll::Pending => {} }, } + } else { + *shutdown = Shutdown::None; } match muxing.poll_unpin(cx)? { @@ -427,6 +406,7 @@ where timeout, upgrade, *substream_upgrade_protocol_override, + stream_counter.clone(), )); continue; // Go back to the top, handler can potentially make progress again. @@ -440,7 +420,11 @@ where Poll::Ready(substream) => { let protocol = handler.listen_protocol(); - negotiating_in.push(StreamUpgrade::new_inbound(substream, protocol)); + negotiating_in.push(StreamUpgrade::new_inbound( + substream, + protocol, + stream_counter.clone(), + )); continue; // Go back to the top, handler can potentially make progress again. } @@ -465,9 +449,7 @@ where } #[cfg(test)] - fn poll_noop_waker( - &mut self, - ) -> Poll, ConnectionError>> { + fn poll_noop_waker(&mut self) -> Poll, ConnectionError>> { Pin::new(self).poll(&mut Context::from_waker(futures::task::noop_waker_ref())) } } @@ -481,13 +463,34 @@ fn gather_supported_protocols(handler: &impl ConnectionHandler) -> HashSet Option { + match (current_shutdown, handler_keep_alive) { + (_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap), + (Shutdown::Later(_, _), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick. + (_, false) => { + let now = Instant::now(); + let safe_keep_alive = checked_add_fraction(now, idle_timeout); + + Some(Shutdown::Later( + Delay::new(safe_keep_alive), + now + safe_keep_alive, + )) + } + (_, true) => Some(Shutdown::None), + } +} + /// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds. /// /// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent. /// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { - log::debug!("{start:?} + {duration:?} cannot be presented, halving duration"); + tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); duration /= 2; } @@ -527,13 +530,14 @@ impl StreamUpgrade { timeout: Delay, upgrade: Upgrade, version_override: Option, + counter: ActiveStreamCounter, ) -> Self where Upgrade: OutboundUpgradeSend, { let effective_version = match version_override { Some(version_override) if version_override != upgrade::Version::default() => { - log::debug!( + tracing::debug!( "Substream upgrade protocol override: {:?} -> {:?}", upgrade::Version::default(), version_override @@ -558,7 +562,7 @@ impl StreamUpgrade { .map_err(to_stream_upgrade_error)?; let output = upgrade - .upgrade_outbound(Stream::new(stream), info) + .upgrade_outbound(Stream::new(stream, counter), info) .await .map_err(StreamUpgradeError::Apply)?; @@ -572,6 +576,7 @@ impl StreamUpgrade { fn new_inbound( substream: SubstreamBox, protocol: SubstreamProtocol, + counter: ActiveStreamCounter, ) -> Self where Upgrade: InboundUpgradeSend, @@ -590,7 +595,7 @@ impl StreamUpgrade { .map_err(to_stream_upgrade_error)?; let output = upgrade - .upgrade_inbound(Stream::new(stream), info) + .upgrade_inbound(Stream::new(stream, counter), info) .await .map_err(StreamUpgradeError::Apply)?; @@ -744,10 +749,15 @@ mod tests { use quickcheck::*; use std::sync::{Arc, Weak}; use std::time::Instant; + use tracing_subscriber::EnvFilter; use void::Void; #[test] fn max_negotiating_inbound_streams() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + fn prop(max_negotiating_inbound_streams: u8) { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); @@ -756,7 +766,7 @@ mod tests { StreamMuxerBox::new(DummyStreamMuxer { counter: alive_substream_counter.clone(), }), - MockConnectionHandler::new(Duration::ZERO), + MockConnectionHandler::new(Duration::from_secs(10)), None, max_negotiating_inbound_streams, Duration::ZERO, @@ -911,71 +921,11 @@ mod tests { )); } - #[tokio::test] - async fn idle_timeout_with_keep_alive_until_greater_than_idle_timeout() { - let idle_timeout = Duration::from_millis(100); - - let mut connection = Connection::new( - StreamMuxerBox::new(PendingStreamMuxer), - KeepAliveUntilConnectionHandler { - until: Instant::now() + idle_timeout * 2, - }, - None, - 0, - idle_timeout, - ); - - assert!(connection.poll_noop_waker().is_pending()); - - tokio::time::sleep(idle_timeout).await; - - assert!( - connection.poll_noop_waker().is_pending(), - "`KeepAlive::Until` is greater than idle-timeout, continue sleeping" - ); - - tokio::time::sleep(idle_timeout).await; - - assert!(matches!( - connection.poll_noop_waker(), - Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) - )); - } - - #[tokio::test] - async fn idle_timeout_with_keep_alive_until_less_than_idle_timeout() { - let idle_timeout = Duration::from_millis(100); - - let mut connection = Connection::new( - StreamMuxerBox::new(PendingStreamMuxer), - KeepAliveUntilConnectionHandler { - until: Instant::now() + idle_timeout / 2, - }, - None, - 0, - idle_timeout, - ); - - assert!(connection.poll_noop_waker().is_pending()); - - tokio::time::sleep(idle_timeout / 2).await; - - assert!( - connection.poll_noop_waker().is_pending(), - "`KeepAlive::Until` is less than idle-timeout, honor idle-timeout" - ); - - tokio::time::sleep(idle_timeout / 2).await; - - assert!(matches!( - connection.poll_noop_waker(), - Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) - )); - } - #[test] fn checked_add_fraction_can_add_u64_max() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); let start = Instant::now(); let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); @@ -983,55 +933,59 @@ mod tests { assert!(start.checked_add(duration).is_some()) } - struct KeepAliveUntilConnectionHandler { - until: Instant, - } - - impl ConnectionHandler for KeepAliveUntilConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; - type Error = Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = DeniedUpgrade; - type InboundOpenInfo = (); - type OutboundOpenInfo = Void; + #[test] + fn compute_new_shutdown_does_not_panic() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + #[derive(Debug)] + struct ArbitraryShutdown(Shutdown); + + impl Clone for ArbitraryShutdown { + fn clone(&self) -> Self { + let shutdown = match self.0 { + Shutdown::None => Shutdown::None, + Shutdown::Asap => Shutdown::Asap, + Shutdown::Later(_, instant) => Shutdown::Later( + // compute_new_shutdown does not touch the delay. Delay does not + // implement Clone. Thus use a placeholder delay. + Delay::new(Duration::from_secs(1)), + instant, + ), + }; - fn listen_protocol( - &self, - ) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) + ArbitraryShutdown(shutdown) + } } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Until(self.until) - } + impl Arbitrary for ArbitraryShutdown { + fn arbitrary(g: &mut Gen) -> Self { + let shutdown = match g.gen_range(1u8..4) { + 1 => Shutdown::None, + 2 => Shutdown::Asap, + 3 => Shutdown::Later( + Delay::new(Duration::from_secs(u32::arbitrary(g) as u64)), + Instant::now() + .checked_add(Duration::arbitrary(g)) + .unwrap_or(Instant::now()), + ), + _ => unreachable!(), + }; - fn poll( - &mut self, - _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, - > { - Poll::Pending + Self(shutdown) + } } - fn on_behaviour_event(&mut self, _: Self::FromBehaviour) {} - - fn on_connection_event( - &mut self, - _: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + fn prop( + handler_keep_alive: bool, + current_shutdown: ArbitraryShutdown, + idle_timeout: Duration, ) { + compute_new_shutdown(handler_keep_alive, ¤t_shutdown.0, idle_timeout); } + + QuickCheck::new().quickcheck(prop as fn(_, _, _)); } struct DummyStreamMuxer { @@ -1153,7 +1107,7 @@ mod tests { #[derive(Default)] struct ConfigurableProtocolConnectionHandler { - events: Vec>, + events: Vec>, active_protocols: HashSet, local_added: Vec>, local_removed: Vec>, @@ -1188,7 +1142,6 @@ mod tests { impl ConnectionHandler for MockConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1232,8 +1185,8 @@ mod tests { void::unreachable(event) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes + fn connection_keep_alive(&self) -> bool { + true } fn poll( @@ -1244,7 +1197,6 @@ mod tests { Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour, - Self::Error, >, > { if self.outbound_requested { @@ -1262,7 +1214,6 @@ mod tests { impl ConnectionHandler for ConfigurableProtocolConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = ManyProtocolsUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1309,8 +1260,8 @@ mod tests { void::unreachable(event) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes + fn connection_keep_alive(&self) -> bool { + true } fn poll( @@ -1321,7 +1272,6 @@ mod tests { Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour, - Self::Error, >, > { if let Some(event) = self.events.pop() { diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 5d5dda578681..33aa81c19a9a 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -25,47 +25,36 @@ use std::{fmt, io}; /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] -pub enum ConnectionError { +pub enum ConnectionError { /// An I/O error occurred on the connection. // TODO: Eventually this should also be a custom error? IO(io::Error), /// The connection keep-alive timeout expired. KeepAliveTimeout, - - /// The connection handler produced an error. - Handler(THandlerErr), } -impl fmt::Display for ConnectionError -where - THandlerErr: fmt::Display, -{ +impl fmt::Display for ConnectionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ConnectionError::IO(err) => write!(f, "Connection error: I/O error: {err}"), ConnectionError::KeepAliveTimeout => { write!(f, "Connection closed due to expired keep-alive timeout.") } - ConnectionError::Handler(err) => write!(f, "Connection error: Handler error: {err}"), } } } -impl std::error::Error for ConnectionError -where - THandlerErr: std::error::Error + 'static, -{ +impl std::error::Error for ConnectionError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { ConnectionError::IO(err) => Some(err), ConnectionError::KeepAliveTimeout => None, - ConnectionError::Handler(err) => Some(err), } } } -impl From for ConnectionError { +impl From for ConnectionError { fn from(error: io::Error) -> Self { ConnectionError::IO(error) } diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index b6100989a04e..9bcd1b446d34 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -49,6 +49,7 @@ use std::{ task::Context, task::Poll, }; +use tracing::Instrument; use void::Void; mod concurrent_dial; @@ -131,7 +132,7 @@ where /// Receivers for events reported from established connections. established_connection_events: - SelectAll>>, + SelectAll>>, /// Receivers for [`NewConnection`] objects that are dropped. new_connection_dropped_listeners: FuturesUnordered>, @@ -225,7 +226,7 @@ impl fmt::Debug for Pool { /// Event that can happen on the `Pool`. #[derive(Debug)] -pub(crate) enum PoolEvent { +pub(crate) enum PoolEvent { /// A new connection has been established. ConnectionEstablished { id: ConnectionId, @@ -257,10 +258,9 @@ pub(crate) enum PoolEvent { connected: Connected, /// The error that occurred, if any. If `None`, the connection /// was closed by the local peer. - error: Option>, + error: Option, /// The remaining established connections to the same peer. remaining_established_connection_ids: Vec, - handler: THandler, }, /// An outbound connection attempt failed. @@ -290,7 +290,7 @@ pub(crate) enum PoolEvent { id: ConnectionId, peer_id: PeerId, /// The produced event. - event: THandler::ToBehaviour, + event: ToBehaviour, }, /// The connection to a node has changed its address. @@ -426,20 +426,22 @@ where dial_concurrency_factor_override: Option, connection_id: ConnectionId, ) { - let dial = ConcurrentDial::new( - dials, - dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor), - ); + let concurrency_factor = + dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor); + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_outgoing_connection", %concurrency_factor, num_dials=%dials.len(), id = %connection_id); + span.follows_from(tracing::Span::current()); let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_outgoing_connection( + self.executor.spawn( + task::new_for_pending_outgoing_connection( connection_id, - dial, + ConcurrentDial::new(dials, concurrency_factor), abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); let endpoint = PendingPoint::Dialer { role_override }; @@ -469,13 +471,18 @@ where let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_incoming_connection( + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_incoming_connection", remote_addr = %info.send_back_addr, id = %connection_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_pending_incoming_connection( connection_id, future, abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); self.counters.inc_pending_incoming(); self.pending.insert( @@ -498,7 +505,6 @@ where handler: THandler, ) { let connection = connection.extract(); - let conns = self.established.entry(obtained_peer_id).or_default(); self.counters.inc_established(endpoint); @@ -525,17 +531,24 @@ where self.idle_connection_timeout, ); - self.executor.spawn(task::new_for_established_connection( - id, - obtained_peer_id, - connection, - command_receiver, - event_sender, - )) + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_established_connection", remote_addr = %endpoint.get_remote_address(), %id, peer = %obtained_peer_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_established_connection( + id, + obtained_peer_id, + connection, + command_receiver, + event_sender, + ) + .instrument(span), + ) } /// Polls the connection pool for events. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> + #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, ::OutboundOpenInfo: Send, @@ -576,12 +589,7 @@ where old_endpoint, }); } - Poll::Ready(Some(task::EstablishedConnectionEvent::Closed { - id, - peer_id, - error, - handler, - })) => { + Poll::Ready(Some(task::EstablishedConnectionEvent::Closed { id, peer_id, error })) => { let connections = self .established .get_mut(&peer_id) @@ -599,7 +607,6 @@ where connected: Connected { endpoint, peer_id }, error, remaining_established_connection_ids, - handler, }); } } @@ -692,10 +699,10 @@ where if let Err(error) = check_peer_id() { self.executor.spawn(poll_fn(move |cx| { if let Err(e) = ready!(muxer.poll_close_unpin(cx)) { - log::debug!( - "Failed to close connection {:?} to peer {}: {:?}", - id, - obtained_peer_id, + tracing::debug!( + peer=%obtained_peer_id, + connection=%id, + "Failed to close connection to peer: {:?}", e ); } diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 175da668bda4..08674fd2ee57 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -66,7 +66,7 @@ pub(crate) enum PendingConnectionEvent { } #[derive(Debug)] -pub(crate) enum EstablishedConnectionEvent { +pub(crate) enum EstablishedConnectionEvent { /// A node we are connected to has changed its address. AddressChange { id: ConnectionId, @@ -77,7 +77,7 @@ pub(crate) enum EstablishedConnectionEvent { Notify { id: ConnectionId, peer_id: PeerId, - event: THandler::ToBehaviour, + event: ToBehaviour, }, /// A connection closed, possibly due to an error. /// @@ -86,8 +86,7 @@ pub(crate) enum EstablishedConnectionEvent { Closed { id: ConnectionId, peer_id: PeerId, - error: Option>, - handler: THandler, + error: Option, }, } @@ -172,7 +171,7 @@ pub(crate) async fn new_for_established_connection( peer_id: PeerId, mut connection: crate::connection::Connection, mut command_receiver: mpsc::Receiver>, - mut events: mpsc::Sender>, + mut events: mpsc::Sender>, ) where THandler: ConnectionHandler, { @@ -187,15 +186,25 @@ pub(crate) async fn new_for_established_connection( Command::NotifyHandler(event) => connection.on_behaviour_event(event), Command::Close => { command_receiver.close(); - let (handler, closing_muxer) = connection.close(); + let (remaining_events, closing_muxer) = connection.close(); + + let _ = events + .send_all(&mut remaining_events.map(|event| { + Ok(EstablishedConnectionEvent::Notify { + id: connection_id, + event, + peer_id, + }) + })) + .await; let error = closing_muxer.await.err().map(ConnectionError::IO); + let _ = events .send(EstablishedConnectionEvent::Closed { id: connection_id, peer_id, error, - handler, }) .await; return; @@ -227,14 +236,24 @@ pub(crate) async fn new_for_established_connection( } Err(error) => { command_receiver.close(); - let (handler, _closing_muxer) = connection.close(); + let (remaining_events, _closing_muxer) = connection.close(); + + let _ = events + .send_all(&mut remaining_events.map(|event| { + Ok(EstablishedConnectionEvent::Notify { + id: connection_id, + event, + peer_id, + }) + })) + .await; + // Terminate the task with the error, dropping the connection. let _ = events .send(EstablishedConnectionEvent::Closed { id: connection_id, peer_id, error: Some(error), - handler, }) .await; return; diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index 9be7280b3dfb..4442d9138474 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -311,14 +311,18 @@ impl WithoutPeerIdWithAddress { #[derive(Debug, Copy, Clone, Default)] pub enum PeerCondition { /// A new dialing attempt is initiated _only if_ the peer is currently - /// considered disconnected, i.e. there is no established connection - /// and no ongoing dialing attempt. - #[default] + /// considered disconnected, i.e. there is no established connection. Disconnected, /// A new dialing attempt is initiated _only if_ there is currently /// no ongoing dialing attempt, i.e. the peer is either considered /// disconnected or connected but without an ongoing dialing attempt. NotDialing, + /// A combination of [`Disconnected`](PeerCondition::Disconnected) and + /// [`NotDialing`](PeerCondition::NotDialing). A new dialing attempt is + /// iniated _only if_ the peer is both considered disconnected and there + /// is currently no ongoing dialing attempt. + #[default] + DisconnectedAndNotDialing, /// A new dialing attempt is always initiated, only subject to the /// configured connection limits. Always, diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index 6810abec591c..86df676443b6 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -1,11 +1,11 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, PollParameters, ToSwarm}; +use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}; use crate::connection::ConnectionId; use crate::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use crate::{ - ConnectionDenied, ConnectionHandlerEvent, KeepAlive, StreamUpgradeError, SubstreamProtocol, - THandler, THandlerInEvent, THandlerOutEvent, + ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler, + THandlerInEvent, THandlerOutEvent, }; use libp2p_core::upgrade::DeniedUpgrade; use libp2p_core::Endpoint; @@ -50,31 +50,11 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } /// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. @@ -84,7 +64,6 @@ pub struct ConnectionHandler; impl crate::handler::ConnectionHandler for ConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -98,20 +77,11 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { void::unreachable(event) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 9374903f9b74..31d2c91e3915 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -55,7 +55,6 @@ pub use select::ConnectionHandlerSelect; use crate::StreamProtocol; use ::either::Either; -use instant::Instant; use libp2p_core::Multiaddr; use once_cell::sync::Lazy; use smallvec::SmallVec; @@ -63,7 +62,7 @@ use std::collections::hash_map::RandomState; use std::collections::hash_set::{Difference, Intersection}; use std::collections::HashSet; use std::iter::Peekable; -use std::{cmp::Ordering, error, fmt, io, task::Context, task::Poll, time::Duration}; +use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// A handler for a set of protocols used on a connection with a remote. /// @@ -103,8 +102,6 @@ pub trait ConnectionHandler: Send + 'static { type FromBehaviour: fmt::Debug + Send + 'static; /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. type ToBehaviour: fmt::Debug + Send + 'static; - /// The type of errors returned by [`ConnectionHandler::poll`]. - type Error: error::Error + fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; /// The outbound upgrade for the protocol(s) used by the handler. @@ -123,41 +120,53 @@ pub trait ConnectionHandler: Send + 'static { /// > This allows a remote to put the list of supported protocols in a cache. fn listen_protocol(&self) -> SubstreamProtocol; - /// Returns until when the connection should be kept alive. + /// Returns whether the connection should be kept alive. /// - /// This method is called by the `Swarm` after each invocation of - /// [`ConnectionHandler::poll`] to determine if the connection and the associated - /// [`ConnectionHandler`]s should be kept alive as far as this handler is concerned - /// and if so, for how long. + /// ## Keep alive algorithm /// - /// Returning [`KeepAlive::No`] indicates that the connection should be - /// closed and this handler destroyed immediately. + /// A connection is always kept alive: /// - /// Returning [`KeepAlive::Until`] indicates that the connection may be closed - /// and this handler destroyed after the specified `Instant`. + /// - Whilst a [`ConnectionHandler`] returns [`Poll::Ready`]. + /// - We are negotiating inbound or outbound streams. + /// - There are active [`Stream`](crate::Stream)s on the connection. /// - /// Returning [`KeepAlive::Yes`] indicates that the connection should - /// be kept alive until the next call to this method. + /// The combination of the above means that _most_ protocols will not need to override this method. + /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle. /// - /// > **Note**: The connection is always closed and the handler destroyed - /// > when [`ConnectionHandler::poll`] returns an error. Furthermore, the - /// > connection may be closed for reasons outside of the control - /// > of the handler. - fn connection_keep_alive(&self) -> KeepAlive; + /// ## Exceptions + /// + /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method. + /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams. + /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm. + fn connection_keep_alive(&self) -> bool { + false + } /// Should behave like `Stream::poll()`. fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, >; + /// Gracefully close the [`ConnectionHandler`]. + /// + /// The contract for this function is equivalent to a [`Stream`](futures::Stream). + /// When a connection is being shut down, we will first poll this function to completion. + /// Following that, the physical connection will be shut down. + /// + /// This is also called when the shutdown was initiated due to an error on the connection. + /// We therefore cannot guarantee that performing IO within here will succeed. + /// + /// To signal completion, [`Poll::Ready(None)`] should be returned. + /// + /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. + /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. + fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(None) + } + /// Adds a closure that turns the input event into something else. fn map_in_event(self, map: TMap) -> MapInEvent where @@ -178,10 +187,6 @@ pub trait ConnectionHandler: Send + 'static { /// Creates a new [`ConnectionHandler`] that selects either this handler or /// `other` by delegating methods calls appropriately. - /// - /// > **Note**: The largest `KeepAlive` returned by the two handlers takes precedence, - /// > i.e. is returned from [`ConnectionHandler::connection_keep_alive`] by the returned - /// > handler. fn select(self, other: TProto2) -> ConnectionHandlerSelect where Self: Sized, @@ -205,6 +210,7 @@ pub trait ConnectionHandler: Send + 'static { /// Enumeration with the list of the possible stream events /// to pass to [`on_connection_event`](ConnectionHandler::on_connection_event). +#[non_exhaustive] pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> { /// Informs the handler about the output of a successful upgrade on a new inbound substream. FullyNegotiatedInbound(FullyNegotiatedInbound), @@ -530,22 +536,13 @@ impl SubstreamProtocol { /// Event produced by a handler. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum ConnectionHandlerEvent { +#[non_exhaustive] +pub enum ConnectionHandlerEvent { /// Request a new outbound substream to be opened with the remote. OutboundSubstreamRequest { /// The protocol(s) to apply on the substream. protocol: SubstreamProtocol, }, - - /// Close the connection for the given reason. - /// - /// Note this will affect all [`ConnectionHandler`]s handling this - /// connection, in other words it will close the connection for all - /// [`ConnectionHandler`]s. To signal that one has no more need for the - /// connection, while allowing other [`ConnectionHandler`]s to continue using - /// the connection, return [`KeepAlive::No`] in - /// [`ConnectionHandler::connection_keep_alive`]. - Close(TErr), /// We learned something about the protocols supported by the remote. ReportRemoteProtocols(ProtocolSupport), @@ -562,15 +559,15 @@ pub enum ProtocolSupport { } /// Event produced by a handler. -impl - ConnectionHandlerEvent +impl + ConnectionHandlerEvent { /// If this is an `OutboundSubstreamRequest`, maps the `info` member from a /// `TOutboundOpenInfo` to something else. pub fn map_outbound_open_info( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TOutboundOpenInfo) -> I, { @@ -583,7 +580,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(val) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } @@ -592,10 +588,7 @@ impl /// If this is an `OutboundSubstreamRequest`, maps the protocol (`TConnectionUpgrade`) /// to something else. - pub fn map_protocol( - self, - map: F, - ) -> ConnectionHandlerEvent + pub fn map_protocol(self, map: F) -> ConnectionHandlerEvent where F: FnOnce(TConnectionUpgrade) -> I, { @@ -608,7 +601,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(val) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } @@ -619,7 +611,7 @@ impl pub fn map_custom( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TCustom) -> I, { @@ -630,29 +622,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(map(val)) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), - ConnectionHandlerEvent::ReportRemoteProtocols(support) => { - ConnectionHandlerEvent::ReportRemoteProtocols(support) - } - } - } - - /// If this is a `Close` event, maps the content to something else. - pub fn map_close( - self, - map: F, - ) -> ConnectionHandlerEvent - where - F: FnOnce(TErr) -> I, - { - match self { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } - } - ConnectionHandlerEvent::NotifyBehaviour(val) => { - ConnectionHandlerEvent::NotifyBehaviour(val) - } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(map(val)), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } @@ -660,9 +629,6 @@ impl } } -#[deprecated(note = "Renamed to `StreamUpgradeError`")] -pub type ConnectionHandlerUpgrErr = StreamUpgradeError; - /// Error that can happen on an outbound substream opening attempt. #[derive(Debug)] pub enum StreamUpgradeError { @@ -724,43 +690,6 @@ where } } -/// How long the connection should be kept alive. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum KeepAlive { - /// If nothing new happens, the connection should be closed at the given `Instant`. - Until(Instant), - /// Keep the connection alive. - Yes, - /// Close the connection as soon as possible. - No, -} - -impl KeepAlive { - /// Returns true for `Yes`, false otherwise. - pub fn is_yes(&self) -> bool { - matches!(*self, KeepAlive::Yes) - } -} - -impl PartialOrd for KeepAlive { - fn partial_cmp(&self, other: &KeepAlive) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for KeepAlive { - fn cmp(&self, other: &KeepAlive) -> Ordering { - use self::KeepAlive::*; - - match (self, other) { - (No, No) | (Yes, Yes) => Ordering::Equal, - (No, _) | (_, Yes) => Ordering::Less, - (_, No) | (Yes, _) => Ordering::Greater, - (Until(t1), Until(t2)) => t1.cmp(t2), - } - } -} - /// A statically declared, empty [`HashSet`] allows us to work around borrow-checker rules for /// [`ProtocolsAdded::from_set`]. The lifetimes don't work unless we have a [`HashSet`] with a `'static' lifetime. static EMPTY_HASHSET: Lazy> = Lazy::new(HashSet::new); diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index 6a60427228d1..a5aab9b5fee8 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -20,7 +20,7 @@ use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - InboundUpgradeSend, KeepAlive, ListenUpgradeError, SubstreamProtocol, + InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; use either::Either; @@ -80,7 +80,6 @@ where { type FromBehaviour = Either; type ToBehaviour = Either; - type Error = Either; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = Either, SendWrapper>; @@ -108,7 +107,7 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { match self { Either::Left(handler) => handler.connection_keep_alive(), Either::Right(handler) => handler.connection_keep_alive(), @@ -119,22 +118,15 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { let event = match self { Either::Left(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Left) - .map_close(Either::Left) .map_protocol(|p| Either::Left(SendWrapper(p))) .map_outbound_open_info(Either::Left), Either::Right(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Right) - .map_close(Either::Right) .map_protocol(|p| Either::Right(SendWrapper(p))) .map_outbound_open_info(Either::Right), }; @@ -142,6 +134,15 @@ where Poll::Ready(event) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let event = match self { + Either::Left(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Left), + Either::Right(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Right), + }; + + Poll::Ready(event) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index 82cb12a183d1..9316ef4d2ce7 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; @@ -52,7 +52,6 @@ where { type FromBehaviour = TNewIn; type ToBehaviour = TConnectionHandler::ToBehaviour; - type Error = TConnectionHandler::Error; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -68,7 +67,7 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner.connection_keep_alive() } @@ -76,16 +75,15 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_close(cx) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index 8528b563ece4..f877bfa6f64c 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -19,8 +19,9 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; +use futures::ready; use std::fmt::Debug; use std::task::{Context, Poll}; @@ -47,7 +48,6 @@ where { type FromBehaviour = TConnectionHandler::FromBehaviour; type ToBehaviour = TNewOut; - type Error = TConnectionHandler::Error; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -61,7 +61,7 @@ where self.inner.on_behaviour_event(event) } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner.connection_keep_alive() } @@ -69,18 +69,12 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx).map(|ev| match ev { ConnectionHandlerEvent::NotifyBehaviour(ev) => { ConnectionHandlerEvent::NotifyBehaviour((self.map)(ev)) } - ConnectionHandlerEvent::Close(err) => ConnectionHandlerEvent::Close(err), ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } } @@ -90,6 +84,14 @@ where }) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(e) = ready!(self.inner.poll_close(cx)) else { + return Poll::Ready(None); + }; + + Poll::Ready(Some((self.map)(e))) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index ced94f1213c8..0b4549ed733e 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -23,12 +23,11 @@ use crate::handler::{ AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, ListenUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; use crate::Stream; -use futures::{future::BoxFuture, prelude::*}; +use futures::{future::BoxFuture, prelude::*, ready}; use rand::Rng; use std::{ cmp, @@ -112,7 +111,6 @@ where { type FromBehaviour = (K, ::FromBehaviour); type ToBehaviour = (K, ::ToBehaviour); - type Error = ::Error; type InboundProtocol = Upgrade::InboundProtocol>; type OutboundProtocol = ::OutboundProtocol; type InboundOpenInfo = Info::InboundOpenInfo>; @@ -162,7 +160,7 @@ where }, )); } else { - log::error!("FullyNegotiatedOutbound: no handler for key") + tracing::error!("FullyNegotiatedOutbound: no handler for key") } } ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -179,7 +177,7 @@ where )); } } else { - log::error!("FullyNegotiatedInbound: no handler for key") + tracing::error!("FullyNegotiatedInbound: no handler for key") } } ConnectionEvent::AddressChange(AddressChange { new_address }) => { @@ -199,7 +197,7 @@ where error, })); } else { - log::error!("DialUpgradeError: no handler for protocol") + tracing::error!("DialUpgradeError: no handler for protocol") } } ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { @@ -226,28 +224,23 @@ where if let Some(h) = self.handlers.get_mut(&key) { h.on_behaviour_event(event) } else { - log::error!("on_behaviour_event: no handler for key") + tracing::error!("on_behaviour_event: no handler for key") } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.handlers .values() .map(|h| h.connection_keep_alive()) .max() - .unwrap_or(KeepAlive::No) + .unwrap_or(false) } fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to avoid // that situation. @@ -278,6 +271,17 @@ where Poll::Pending } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + for (k, h) in self.handlers.iter_mut() { + let Some(e) = ready!(h.poll_close(cx)) else { + continue; + }; + return Poll::Ready(Some((k.clone(), e))); + } + + Poll::Ready(None) + } } /// Split [`MultiHandler`] into parts. diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 439d3f47ee3e..b1fc41e90987 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -20,11 +20,10 @@ use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; -use instant::Instant; +use crate::StreamUpgradeError; use smallvec::SmallVec; use std::{error, fmt::Debug, task::Context, task::Poll, time::Duration}; @@ -36,16 +35,12 @@ where { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, - /// If `Some`, something bad happened and we should shut down the handler with an error. - pending_error: Option::Error>>, /// Queue of events to produce in `poll()`. - events_out: SmallVec<[TEvent; 4]>, + events_out: SmallVec<[Result>; 4]>, /// Queue of outbound substreams to open. dial_queue: SmallVec<[TOutbound; 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, - /// Value to return from `connection_keep_alive`. - keep_alive: KeepAlive, /// The configuration container for the handler config: OneShotHandlerConfig, } @@ -61,11 +56,9 @@ where ) -> Self { OneShotHandler { listen_protocol, - pending_error: None, events_out: SmallVec::new(), dial_queue: SmallVec::new(), dial_negotiated: 0, - keep_alive: KeepAlive::Yes, config, } } @@ -93,7 +86,6 @@ where /// Opens an outbound substream with `upgrade`. pub fn send_request(&mut self, upgrade: TOutbound) { - self.keep_alive = KeepAlive::Yes; self.dial_queue.push(upgrade); } } @@ -122,8 +114,7 @@ where TEvent: Debug + Send + 'static, { type FromBehaviour = TOutbound; - type ToBehaviour = TEvent; - type Error = StreamUpgradeError<::Error>; + type ToBehaviour = Result>; type InboundProtocol = TInbound; type OutboundProtocol = TOutbound; type OutboundOpenInfo = (); @@ -137,25 +128,12 @@ where self.send_request(event); } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { - if let Some(err) = self.pending_error.take() { - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - if !self.events_out.is_empty() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( self.events_out.remove(0), @@ -175,10 +153,6 @@ where } } else { self.dial_queue.shrink_to_fit(); - - if self.dial_negotiated == 0 && self.keep_alive.is_yes() { - self.keep_alive = KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); - } } Poll::Pending @@ -198,26 +172,17 @@ where protocol: out, .. }) => { - // If we're shutting down the connection for inactivity, reset the timeout. - if !self.keep_alive.is_yes() { - self.keep_alive = - KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); - } - - self.events_out.push(out.into()); + self.events_out.push(Ok(out.into())); } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol: out, .. }) => { self.dial_negotiated -= 1; - self.events_out.push(out.into()); + self.events_out.push(Ok(out.into())); } ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { - if self.pending_error.is_none() { - log::debug!("DialUpgradeError: {error}"); - self.keep_alive = KeepAlive::No; - } + self.events_out.push(Err(error)); } ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) @@ -230,8 +195,6 @@ where /// Configuration parameters for the `OneShotHandler` #[derive(Debug)] pub struct OneShotHandlerConfig { - /// Keep-alive timeout for idle connections. - pub keep_alive_timeout: Duration, /// Timeout for outbound substream upgrades. pub outbound_substream_timeout: Duration, /// Maximum number of concurrent outbound substreams being opened. @@ -241,7 +204,6 @@ pub struct OneShotHandlerConfig { impl Default for OneShotHandlerConfig { fn default() -> Self { OneShotHandlerConfig { - keep_alive_timeout: Duration::from_secs(10), outbound_substream_timeout: Duration::from_secs(10), max_dial_negotiated: 8, } @@ -270,9 +232,6 @@ mod tests { } })); - assert!(matches!( - handler.connection_keep_alive(), - KeepAlive::Until(_) - )); + assert!(matches!(handler.connection_keep_alive(), false)); } } diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index ee6829356bdf..23b9adcfd900 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -21,7 +21,7 @@ use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - FullyNegotiatedOutbound, KeepAlive, SubstreamProtocol, + FullyNegotiatedOutbound, SubstreamProtocol, }; use libp2p_core::upgrade::PendingUpgrade; use std::task::{Context, Poll}; @@ -42,7 +42,6 @@ impl PendingConnectionHandler { impl ConnectionHandler for PendingConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = PendingUpgrade; type OutboundProtocol = PendingUpgrade; type OutboundOpenInfo = Void; @@ -56,20 +55,11 @@ impl ConnectionHandler for PendingConnectionHandler { void::unreachable(v) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index 65db4ab525bc..e049252d4480 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -20,12 +20,12 @@ use crate::handler::{ AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, KeepAlive, - ListenUpgradeError, OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, ListenUpgradeError, + OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; use either::Either; -use futures::future; +use futures::{future, ready}; use libp2p_core::upgrade::SelectUpgrade; use std::{cmp, task::Context, task::Poll}; @@ -181,7 +181,6 @@ where { type FromBehaviour = Either; type ToBehaviour = Either; - type Error = Either; type InboundProtocol = SelectUpgrade< SendWrapper<::InboundProtocol>, SendWrapper<::InboundProtocol>, @@ -208,7 +207,7 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { cmp::max( self.proto1.connection_keep_alive(), self.proto2.connection_keep_alive(), @@ -219,20 +218,12 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { match self.proto1.poll(cx) { Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Either::Left(event))); } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Left(event))); - } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: protocol @@ -252,9 +243,6 @@ where event, ))); } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Right(event))); - } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: protocol @@ -271,6 +259,18 @@ where Poll::Pending } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(e) = ready!(self.proto1.poll_close(cx)) { + return Poll::Ready(Some(Either::Left(e))); + } + + if let Some(e) = ready!(self.proto2.poll_close(cx)) { + return Poll::Ready(Some(Either::Right(e))); + } + + Poll::Ready(None) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/keep_alive.rs b/swarm/src/keep_alive.rs deleted file mode 100644 index 05cbcdf7b8cf..000000000000 --- a/swarm/src/keep_alive.rs +++ /dev/null @@ -1,145 +0,0 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, PollParameters, ToSwarm}; -use crate::connection::ConnectionId; -use crate::handler::{ - ConnectionEvent, ConnectionHandlerEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, - KeepAlive, SubstreamProtocol, -}; -use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::{Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use std::task::{Context, Poll}; -use void::Void; - -/// Implementation of [`NetworkBehaviour`] that doesn't do anything other than keep all connections alive. -/// -/// This is primarily useful for test code. In can however occasionally be useful for production code too. -/// The caveat is that open connections consume system resources and should thus be shutdown when -/// they are not in use. Connections can also fail at any time so really, your application should be -/// designed to establish them when necessary, making the use of this behaviour likely redundant. -#[derive(Default)] -pub struct Behaviour; - -impl NetworkBehaviour for Behaviour { - type ConnectionHandler = ConnectionHandler; - type ToSwarm = Void; - - fn handle_established_inbound_connection( - &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: &Multiaddr, - ) -> Result, ConnectionDenied> { - Ok(ConnectionHandler) - } - - fn handle_established_outbound_connection( - &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: Endpoint, - ) -> Result, ConnectionDenied> { - Ok(ConnectionHandler) - } - - fn on_connection_handler_event( - &mut self, - _: PeerId, - _: ConnectionId, - event: THandlerOutEvent, - ) { - void::unreachable(event) - } - - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { - Poll::Pending - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } -} - -/// Implementation of [`ConnectionHandler`] that doesn't handle anything but keeps the connection alive. -#[derive(Clone, Debug)] -pub struct ConnectionHandler; - -impl crate::handler::ConnectionHandler for ConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; - type Error = Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = DeniedUpgrade; - type InboundOpenInfo = (); - type OutboundOpenInfo = Void; - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { - void::unreachable(v) - } - - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes - } - - fn poll( - &mut self, - _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, - > { - Poll::Pending - } - - fn on_connection_event( - &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, - ) { - match event { - ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol, .. - }) => void::unreachable(protocol), - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol, .. - }) => void::unreachable(protocol), - ConnectionEvent::DialUpgradeError(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} - } - } -} diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index b81dcff802c2..2f02e43348d6 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -67,10 +67,6 @@ pub mod behaviour; pub mod dial_opts; pub mod dummy; pub mod handler; -#[deprecated( - note = "Configure an appropriate idle connection timeout via `SwarmBuilder::idle_connection_timeout` instead. To keep connections alive 'forever', use `Duration::from_secs(u64::MAX)`." -)] -pub mod keep_alive; mod listen_opts; /// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro. @@ -88,6 +84,7 @@ pub mod derive_prelude { pub use crate::behaviour::ListenerClosed; pub use crate::behaviour::ListenerError; pub use crate::behaviour::NewExternalAddrCandidate; + pub use crate::behaviour::NewExternalAddrOfPeer; pub use crate::behaviour::NewListenAddr; pub use crate::behaviour::NewListener; pub use crate::connection::ConnectionId; @@ -96,7 +93,6 @@ pub mod derive_prelude { pub use crate::ConnectionHandlerSelect; pub use crate::DialError; pub use crate::NetworkBehaviour; - pub use crate::PollParameters; pub use crate::THandler; pub use crate::THandlerInEvent; pub use crate::THandlerOutEvent; @@ -113,14 +109,14 @@ pub mod derive_prelude { pub use behaviour::{ AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredListenAddr, ExternalAddrExpired, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, - ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, NewListenAddr, - NotifyHandler, PollParameters, ToSwarm, + ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, + NewExternalAddrOfPeer, NewListenAddr, NotifyHandler, PeerAddresses, ToSwarm, }; pub use connection::pool::ConnectionCounters; pub use connection::{ConnectionError, ConnectionId, SupportedProtocols}; pub use executor::Executor; pub use handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, KeepAlive, OneShotHandler, + ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, OneShotHandler, OneShotHandlerConfig, StreamUpgradeError, SubstreamProtocol, }; #[cfg(feature = "macros")] @@ -140,14 +136,13 @@ use dial_opts::{DialOpts, PeerCondition}; use futures::{prelude::*, stream::FusedStream}; use libp2p_core::{ connection::ConnectedPoint, - multiaddr, muxing::StreamMuxerBox, transport::{self, ListenerId, TransportError, TransportEvent}, Endpoint, Multiaddr, Transport, }; use libp2p_identity::PeerId; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; use std::time::Duration; use std::{ @@ -156,12 +151,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; - -/// Substream for which a protocol has been chosen. -/// -/// Implements the [`AsyncRead`] and [`AsyncWrite`] traits. -#[deprecated(note = "The 'substream' terminology is deprecated. Use 'Stream' instead")] -pub type NegotiatedSubstream = Stream; +use tracing::Instrument; /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; @@ -177,12 +167,10 @@ pub type THandlerInEvent = as ConnectionHandle /// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. pub type THandlerOutEvent = as ConnectionHandler>::ToBehaviour; -/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. -pub type THandlerErr = as ConnectionHandler>::Error; - /// Event generated by the `Swarm`. #[derive(Debug)] -pub enum SwarmEvent { +#[non_exhaustive] +pub enum SwarmEvent { /// Event generated by the `NetworkBehaviour`. Behaviour(TBehaviourOutEvent), /// A connection to the given peer has been opened. @@ -216,7 +204,7 @@ pub enum SwarmEvent { num_established: u32, /// Reason for the disconnection, if it was not a successful /// active close. - cause: Option>, + cause: Option, }, /// A new connection arrived on a listener and is in the process of protocol negotiation. /// @@ -305,9 +293,17 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, }, + /// We have discovered a new candidate for an external address for us. + NewExternalAddrCandidate { address: Multiaddr }, + /// An external address of the local node was confirmed. + ExternalAddrConfirmed { address: Multiaddr }, + /// An external address of the local node expired, i.e. is no-longer confirmed. + ExternalAddrExpired { address: Multiaddr }, + /// We have discovered a new address of a peer. + NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } -impl SwarmEvent { +impl SwarmEvent { /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { @@ -350,7 +346,9 @@ where /// Pending event to be delivered to connection handlers /// (or dropped if the peer disconnected) before the `behaviour` /// can be polled again. - pending_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + pending_handler_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + + pending_swarm_events: VecDeque>, } impl Unpin for Swarm where TBehaviour: NetworkBehaviour {} @@ -359,6 +357,29 @@ impl Swarm where TBehaviour: NetworkBehaviour, { + /// Creates a new [`Swarm`] from the given [`Transport`], [`NetworkBehaviour`], [`PeerId`] and + /// [`Config`]. + pub fn new( + transport: transport::Boxed<(PeerId, StreamMuxerBox)>, + behaviour: TBehaviour, + local_peer_id: PeerId, + config: Config, + ) -> Self { + tracing::info!(%local_peer_id); + + Swarm { + local_peer_id, + transport, + pool: Pool::new(local_peer_id, config.pool_config), + behaviour, + supported_protocols: Default::default(), + confirmed_external_addr: Default::default(), + listened_addrs: HashMap::new(), + pending_handler_event: None, + pending_swarm_events: VecDeque::default(), + } + } + /// Returns information about the connections underlying the [`Swarm`]. pub fn network_info(&self) -> NetworkInfo { let num_peers = self.pool.num_peers(); @@ -394,24 +415,27 @@ where /// See also [`DialOpts`]. /// /// ``` - /// # use libp2p_swarm::SwarmBuilder; + /// # use libp2p_swarm::Swarm; /// # use libp2p_swarm::dial_opts::{DialOpts, PeerCondition}; /// # use libp2p_core::{Multiaddr, Transport}; /// # use libp2p_core::transport::dummy::DummyTransport; /// # use libp2p_swarm::dummy; /// # use libp2p_identity::PeerId; /// # - /// let mut swarm = SwarmBuilder::without_executor( - /// DummyTransport::new().boxed(), - /// dummy::Behaviour, - /// PeerId::random(), - /// ).build(); + /// # #[tokio::main] + /// # async fn main() { + /// let mut swarm = build_swarm(); /// /// // Dial a known peer. /// swarm.dial(PeerId::random()); /// /// // Dial an unknown peer. /// swarm.dial("/ip6/::1/tcp/12345".parse::().unwrap()); + /// # } + /// + /// # fn build_swarm() -> Swarm { + /// # Swarm::new(DummyTransport::new().boxed(), dummy::Behaviour, PeerId::random(), libp2p_swarm::Config::with_tokio_executor()) + /// # } /// ``` pub fn dial(&mut self, opts: impl Into) -> Result<(), DialError> { let dial_opts = opts.into(); @@ -421,11 +445,13 @@ where let connection_id = dial_opts.connection_id(); let should_dial = match (condition, peer_id) { + (_, None) => true, (PeerCondition::Always, _) => true, - (PeerCondition::Disconnected, None) => true, - (PeerCondition::NotDialing, None) => true, (PeerCondition::Disconnected, Some(peer_id)) => !self.pool.is_connected(peer_id), (PeerCondition::NotDialing, Some(peer_id)) => !self.pool.is_dialing(peer_id), + (PeerCondition::DisconnectedAndNotDialing, Some(peer_id)) => { + !self.pool.is_dialing(peer_id) && !self.pool.is_connected(peer_id) + } }; if !should_dial { @@ -457,7 +483,11 @@ where let num_addresses = addresses.len(); if num_addresses > 0 { - log::debug!("discarding {num_addresses} addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection {connection_id:?}") + tracing::debug!( + connection=%connection_id, + discarded_addresses_count=%num_addresses, + "discarding addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection" + ) } } } @@ -497,15 +527,24 @@ where let dials = addresses .into_iter() - .map(|a| match p2p_addr(peer_id, a) { + .map(|a| match peer_id.map_or(Ok(a.clone()), |p| a.with_p2p(p)) { Ok(address) => { - let dial = match dial_opts.role_override() { - Endpoint::Dialer => self.transport.dial(address.clone()), - Endpoint::Listener => self.transport.dial_as_listener(address.clone()), + let (dial, span) = match dial_opts.role_override() { + Endpoint::Dialer => ( + self.transport.dial(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial", %address), + ), + Endpoint::Listener => ( + self.transport.dial_as_listener(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial_as_listener", %address), + ), }; + span.follows_from(tracing::Span::current()); + match dial { Ok(fut) => fut .map(|r| (address, r.map_err(TransportError::Other))) + .instrument(span) .boxed(), Err(err) => futures::future::ready((address, Err(err))).boxed(), } @@ -587,16 +626,23 @@ where self.confirmed_external_addr.remove(addr); } + /// Add a new external address of a remote peer. + /// + /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrOfPeer`]. + pub fn add_peer_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.behaviour + .on_swarm_event(FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { + peer_id, + addr: &addr, + })) + } + /// Disconnects a peer by its peer ID, closing all connections to said peer. /// /// Returns `Ok(())` if there was one or more established connections to the peer. /// - /// Note: Closing a connection via [`Swarm::disconnect_peer_id`] does - /// not inform the corresponding [`ConnectionHandler`]. - /// Closing a connection via a [`ConnectionHandler`] can be done either in a - /// collaborative manner across [`ConnectionHandler`]s - /// with [`ConnectionHandler::connection_keep_alive`] or directly with - /// [`ConnectionHandlerEvent::Close`]. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. + /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -647,10 +693,7 @@ where &mut self.behaviour } - fn handle_pool_event( - &mut self, - event: PoolEvent>, - ) -> Option>> { + fn handle_pool_event(&mut self, event: PoolEvent>) { match event { PoolEvent::ConnectionEstablished { peer_id, @@ -682,11 +725,14 @@ where }, )); - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: Some(peer_id), - connection_id: id, - error: dial_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::OutgoingConnectionError { + peer_id: Some(peer_id), + connection_id: id, + error: dial_error, + }, + ); + return; } } } @@ -712,12 +758,15 @@ where }, )); - return Some(SwarmEvent::IncomingConnectionError { - connection_id: id, - send_back_addr, - local_addr, - error: listen_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::IncomingConnectionError { + connection_id: id, + send_back_addr, + local_addr, + error: listen_error, + }, + ); + return; } } } @@ -741,11 +790,11 @@ where self.pool .spawn_connection(id, peer_id, &endpoint, connection, handler); - log::debug!( - "Connection established: {:?} {:?}; Total (peer): {}.", - peer_id, - endpoint, - num_established, + tracing::debug!( + peer=%peer_id, + ?endpoint, + total_peers=%num_established, + "Connection established" ); let failed_addresses = concurrent_dial_errors .as_ref() @@ -767,14 +816,15 @@ where }, )); self.supported_protocols = supported_protocols; - return Some(SwarmEvent::ConnectionEstablished { - peer_id, - connection_id: id, - num_established, - endpoint, - concurrent_dial_errors, - established_in, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id: id, + num_established, + endpoint, + concurrent_dial_errors, + established_in, + }); } PoolEvent::PendingOutboundConnectionError { id: connection_id, @@ -791,16 +841,17 @@ where })); if let Some(peer) = peer { - log::debug!("Connection attempt to {:?} failed with {:?}.", peer, error,); + tracing::debug!(%peer, "Connection attempt to peer failed with {:?}.", error,); } else { - log::debug!("Connection attempt to unknown peer failed with {:?}", error); + tracing::debug!("Connection attempt to unknown peer failed with {:?}", error); } - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: peer, - connection_id, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::OutgoingConnectionError { + peer_id: peer, + connection_id, + error, + }); } PoolEvent::PendingInboundConnectionError { id, @@ -810,7 +861,7 @@ where } => { let error = error.into(); - log::debug!("Incoming connection failed: {:?}", error); + tracing::debug!("Incoming connection failed: {:?}", error); self.behaviour .on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr: &local_addr, @@ -818,33 +869,33 @@ where error: &error, connection_id: id, })); - return Some(SwarmEvent::IncomingConnectionError { - connection_id: id, - local_addr, - send_back_addr, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id: id, + local_addr, + send_back_addr, + error, + }); } PoolEvent::ConnectionClosed { id, connected, error, remaining_established_connection_ids, - handler, .. } => { if let Some(error) = error.as_ref() { - log::debug!( - "Connection closed with error {:?}: {:?}; Total (peer): {}.", + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed with error {:?}: {:?}", error, connected, - remaining_established_connection_ids.len() ); } else { - log::debug!( - "Connection closed: {:?}; Total (peer): {}.", - connected, - remaining_established_connection_ids.len() + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed: {:?}", + connected ); } let peer_id = connected.peer_id; @@ -857,16 +908,16 @@ where peer_id, connection_id: id, endpoint: &endpoint, - handler, remaining_established: num_established as usize, })); - return Some(SwarmEvent::ConnectionClosed { - peer_id, - connection_id: id, - endpoint, - cause: error, - num_established, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionClosed { + peer_id, + connection_id: id, + endpoint, + cause: error, + num_established, + }); } PoolEvent::ConnectionEvent { peer_id, id, event } => { self.behaviour @@ -887,8 +938,6 @@ where })); } } - - None } fn handle_transport_event( @@ -897,7 +946,7 @@ where as Transport>::ListenerUpgrade, io::Error, >, - ) -> Option>> { + ) { match event { TransportEvent::Incoming { listener_id: _, @@ -924,12 +973,14 @@ where connection_id, })); - return Some(SwarmEvent::IncomingConnectionError { - connection_id, - local_addr, - send_back_addr, - error: listen_error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error: listen_error, + }); + return; } } @@ -942,17 +993,22 @@ where connection_id, ); - Some(SwarmEvent::IncomingConnection { - connection_id, - local_addr, - send_back_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + }) } TransportEvent::NewAddress { listener_id, listen_addr, } => { - log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "New listener address" + ); let addrs = self.listened_addrs.entry(listener_id).or_default(); if !addrs.contains(&listen_addr) { addrs.push(listen_addr.clone()) @@ -962,19 +1018,20 @@ where listener_id, addr: &listen_addr, })); - Some(SwarmEvent::NewListenAddr { - listener_id, - address: listen_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::NewListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::AddressExpired { listener_id, listen_addr, } => { - log::debug!( - "Listener {:?}; Expired address {:?}.", - listener_id, - listen_addr + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "Expired listener address" ); if let Some(addrs) = self.listened_addrs.get_mut(&listener_id) { addrs.retain(|a| a != &listen_addr); @@ -984,16 +1041,21 @@ where listener_id, addr: &listen_addr, })); - Some(SwarmEvent::ExpiredListenAddr { - listener_id, - address: listen_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::ExpiredListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::ListenerClosed { listener_id, reason, } => { - log::debug!("Listener {:?}; Closed by {:?}.", listener_id, reason); + tracing::debug!( + listener=?listener_id, + ?reason, + "Listener closed" + ); let addrs = self.listened_addrs.remove(&listener_id).unwrap_or_default(); for addr in addrs.iter() { self.behaviour.on_swarm_event(FromSwarm::ExpiredListenAddr( @@ -1005,11 +1067,12 @@ where listener_id, reason: reason.as_ref().copied(), })); - Some(SwarmEvent::ListenerClosed { - listener_id, - addresses: addrs.to_vec(), - reason, - }) + self.pending_swarm_events + .push_back(SwarmEvent::ListenerClosed { + listener_id, + addresses: addrs.to_vec(), + reason, + }) } TransportEvent::ListenerError { listener_id, error } => { self.behaviour @@ -1017,7 +1080,8 @@ where listener_id, err: &error, })); - Some(SwarmEvent::ListenerError { listener_id, error }) + self.pending_swarm_events + .push_back(SwarmEvent::ListenerError { listener_id, error }) } } } @@ -1025,14 +1089,17 @@ where fn handle_behaviour_event( &mut self, event: ToSwarm>, - ) -> Option>> { + ) { match event { - ToSwarm::GenerateEvent(event) => return Some(SwarmEvent::Behaviour(event)), + ToSwarm::GenerateEvent(event) => { + self.pending_swarm_events + .push_back(SwarmEvent::Behaviour(event)); + } ToSwarm::Dial { opts } => { let peer_id = opts.get_peer_id(); let connection_id = opts.connection_id(); if let Ok(()) = self.dial(opts) { - return Some(SwarmEvent::Dialing { + self.pending_swarm_events.push_back(SwarmEvent::Dialing { peer_id, connection_id, }); @@ -1050,7 +1117,7 @@ where handler, event, } => { - assert!(self.pending_event.is_none()); + assert!(self.pending_handler_event.is_none()); let handler = match handler { NotifyHandler::One(connection) => PendingNotifyHandler::One(connection), NotifyHandler::Any => { @@ -1062,7 +1129,7 @@ where } }; - self.pending_event = Some((peer_id, handler, event)); + self.pending_handler_event = Some((peer_id, handler, event)); } ToSwarm::NewExternalAddrCandidate(addr) => { // Apply address translation to the candidate address. @@ -1087,20 +1154,28 @@ where .on_swarm_event(FromSwarm::NewExternalAddrCandidate( NewExternalAddrCandidate { addr: &addr }, )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); } else { for addr in translated_addresses { self.behaviour .on_swarm_event(FromSwarm::NewExternalAddrCandidate( NewExternalAddrCandidate { addr: &addr }, )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); } } } ToSwarm::ExternalAddrConfirmed(addr) => { - self.add_external_address(addr); + self.add_external_address(addr.clone()); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrConfirmed { address: addr }); } ToSwarm::ExternalAddrExpired(addr) => { self.remove_external_address(&addr); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrExpired { address: addr }); } ToSwarm::CloseConnection { peer_id, @@ -1115,18 +1190,26 @@ where self.pool.disconnect(peer_id); } }, + ToSwarm::NewExternalAddrOfPeer { peer_id, address } => { + self.behaviour + .on_swarm_event(FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { + peer_id, + addr: &address, + })); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrOfPeer { peer_id, address }); + } } - - None } /// Internal function used by everything event-related. /// /// Polls the `Swarm` for the next event. + #[tracing::instrument(level = "debug", name = "Swarm::poll", skip(self, cx))] fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll> { // We use a `this` variable because the compiler can't mutably borrow multiple times // across a `Deref`. let this = &mut *self; @@ -1141,7 +1224,11 @@ where // // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. loop { - match this.pending_event.take() { + if let Some(swarm_event) = this.pending_swarm_events.pop_front() { + return Poll::Ready(swarm_event); + } + + match this.pending_handler_event.take() { // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous // iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { @@ -1150,7 +1237,7 @@ where Some(conn) => match notify_one(conn, event, cx) { None => continue, Some(event) => { - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } }, None => continue, @@ -1161,54 +1248,36 @@ where None => continue, Some((event, ids)) => { let handler = PendingNotifyHandler::Any(ids); - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } } } }, // No pending event. Allow the [`NetworkBehaviour`] to make progress. - None => { - let behaviour_poll = { - let mut parameters = SwarmPollParameters { - supported_protocols: &this.supported_protocols, - }; - this.behaviour.poll(cx, &mut parameters) - }; - - match behaviour_poll { - Poll::Pending => {} - Poll::Ready(behaviour_event) => { - if let Some(swarm_event) = this.handle_behaviour_event(behaviour_event) - { - return Poll::Ready(swarm_event); - } + None => match this.behaviour.poll(cx) { + Poll::Pending => {} + Poll::Ready(behaviour_event) => { + this.handle_behaviour_event(behaviour_event); - continue; - } + continue; } - } + }, } // Poll the known peers. match this.pool.poll(cx) { Poll::Pending => {} Poll::Ready(pool_event) => { - if let Some(swarm_event) = this.handle_pool_event(pool_event) { - return Poll::Ready(swarm_event); - } - + this.handle_pool_event(pool_event); continue; } - }; + } // Poll the listener(s) for new connections. match Pin::new(&mut this.transport).poll(cx) { Poll::Pending => {} Poll::Ready(transport_event) => { - if let Some(swarm_event) = this.handle_transport_event(transport_event) { - return Poll::Ready(swarm_event); - } - + this.handle_transport_event(transport_event); continue; } } @@ -1315,7 +1384,7 @@ impl futures::Stream for Swarm where TBehaviour: NetworkBehaviour, { - type Item = SwarmEvent, THandlerErr>; + type Item = SwarmEvent>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.as_mut().poll_next_event(cx).map(Some) @@ -1332,45 +1401,15 @@ where } } -/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to. -// TODO: #[derive(Debug)] -pub struct SwarmPollParameters<'a> { - supported_protocols: &'a [Vec], -} - -impl<'a> PollParameters for SwarmPollParameters<'a> { - type SupportedProtocolsIter = std::iter::Cloned>>; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - self.supported_protocols.iter().cloned() - } -} - -/// A [`SwarmBuilder`] provides an API for configuring and constructing a [`Swarm`]. -pub struct SwarmBuilder { - local_peer_id: PeerId, - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, +pub struct Config { pool_config: PoolConfig, } -impl SwarmBuilder -where - TBehaviour: NetworkBehaviour, -{ - /// Creates a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and - /// executor. The `Swarm` with its underlying `Network` is obtained via - /// [`SwarmBuilder::build`]. - pub fn with_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - executor: impl Executor + Send + 'static, - ) -> Self { +impl Config { + /// Creates a new [`Config`] from the given executor. The [`Swarm`] is obtained via + /// [`Swarm::new`]. + pub fn with_executor(executor: impl Executor + Send + 'static) -> Self { Self { - local_peer_id, - transport, - behaviour, pool_config: PoolConfig::new(Some(Box::new(executor))), } } @@ -1385,75 +1424,26 @@ where /// } /// ``` #[cfg(feature = "wasm-bindgen")] - pub fn with_wasm_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::WasmBindgenExecutor, - ) + pub fn with_wasm_executor() -> Self { + Self::with_executor(crate::executor::WasmBindgenExecutor) } - /// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a - /// `tokio` executor. + /// Builds a new [`Config`] from the given `tokio` executor. #[cfg(all( feature = "tokio", not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) ))] - pub fn with_tokio_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::TokioExecutor, - ) + pub fn with_tokio_executor() -> Self { + Self::with_executor(crate::executor::TokioExecutor) } - /// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a - /// `async-std` executor. + /// Builds a new [`Config`] from the given `async-std` executor. #[cfg(all( feature = "async-std", not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) ))] - pub fn with_async_std_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::AsyncStdExecutor, - ) - } - - /// Creates a new [`SwarmBuilder`] from the given transport, behaviour and local peer ID. The - /// `Swarm` with its underlying `Network` is obtained via [`SwarmBuilder::build`]. - /// - /// ## ⚠️ Performance warning - /// All connections will be polled on the current task, thus quite bad performance - /// characteristics should be expected. Whenever possible use an executor and - /// [`SwarmBuilder::with_executor`]. - pub fn without_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self { - local_peer_id, - transport, - behaviour, - pool_config: PoolConfig::new(None), - } + pub fn with_async_std_executor() -> Self { + Self::with_executor(crate::executor::AsyncStdExecutor) } /// Configures the number of events from the [`NetworkBehaviour`] in @@ -1465,7 +1455,7 @@ where /// volume of events. If this value is too low, then the [`Swarm`] will /// be sleeping more often than necessary. Increasing this value increases /// the overall memory usage. - pub fn notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { + pub fn with_notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { self.pool_config = self.pool_config.with_notify_handler_buffer_size(n); self } @@ -1481,13 +1471,13 @@ where /// usage, and more importantly the latency between the moment when an /// event is emitted and the moment when it is received by the /// [`NetworkBehaviour`]. - pub fn per_connection_event_buffer_size(mut self, n: usize) -> Self { + pub fn with_per_connection_event_buffer_size(mut self, n: usize) -> Self { self.pool_config = self.pool_config.with_per_connection_event_buffer_size(n); self } /// Number of addresses concurrently dialed for a single outbound connection attempt. - pub fn dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { + pub fn with_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.pool_config = self.pool_config.with_dial_concurrency_factor(factor); self } @@ -1502,7 +1492,10 @@ where /// > **Note**: If configured, specific upgrade protocols for /// > individual [`SubstreamProtocol`]s emitted by the `NetworkBehaviour` /// > are ignored. - pub fn substream_upgrade_protocol_override(mut self, v: libp2p_core::upgrade::Version) -> Self { + pub fn with_substream_upgrade_protocol_override( + mut self, + v: libp2p_core::upgrade::Version, + ) -> Self { self.pool_config = self.pool_config.with_substream_upgrade_protocol_override(v); self } @@ -1514,8 +1507,9 @@ where /// Note: This only enforces a limit on the number of concurrently /// negotiating inbound streams. The total number of inbound streams on a /// connection is the sum of negotiating and negotiated streams. A limit on - /// the total number of streams can be enforced at the [`StreamMuxerBox`] level. - pub fn max_negotiating_inbound_streams(mut self, v: usize) -> Self { + /// the total number of streams can be enforced at the + /// [`StreamMuxerBox`] level. + pub fn with_max_negotiating_inbound_streams(mut self, v: usize) -> Self { self.pool_config = self.pool_config.with_max_negotiating_inbound_streams(v); self } @@ -1523,25 +1517,10 @@ where /// How long to keep a connection alive once it is idling. /// /// Defaults to 0. - pub fn idle_connection_timeout(mut self, timeout: Duration) -> Self { + pub fn with_idle_connection_timeout(mut self, timeout: Duration) -> Self { self.pool_config.idle_connection_timeout = timeout; self } - - /// Builds a `Swarm` with the current configuration. - pub fn build(self) -> Swarm { - log::info!("Local peer id: {}", self.local_peer_id); - Swarm { - local_peer_id: self.local_peer_id, - transport: self.transport, - pool: Pool::new(self.local_peer_id, self.pool_config), - behaviour: self.behaviour, - supported_protocols: Default::default(), - confirmed_external_addr: Default::default(), - listened_addrs: HashMap::new(), - pending_event: None, - } - } } /// Possible errors when trying to establish or upgrade an outbound connection. @@ -1593,6 +1572,7 @@ impl fmt::Display for DialError { ), DialError::DialPeerConditionFalse(PeerCondition::Disconnected) => write!(f, "Dial error: dial condition was configured to only happen when disconnected (`PeerCondition::Disconnected`), but node is already connected, thus cancelling new dial."), DialError::DialPeerConditionFalse(PeerCondition::NotDialing) => write!(f, "Dial error: dial condition was configured to only happen if there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but a dial is in progress, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::DisconnectedAndNotDialing) => write!(f, "Dial error: dial condition was configured to only happen when both disconnected (`PeerCondition::Disconnected`) and there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but node is already connected or dial is in progress, thus cancelling new dial."), DialError::DialPeerConditionFalse(PeerCondition::Always) => unreachable!("Dial peer condition is by definition true."), DialError::Aborted => write!( f, @@ -1788,41 +1768,12 @@ impl NetworkInfo { } } -/// Ensures a given `Multiaddr` is a `/p2p/...` address for the given peer. -/// -/// If the given address is already a `p2p` address for the given peer, -/// i.e. the last encapsulated protocol is `/p2p/`, this is a no-op. -/// -/// If the given address is already a `p2p` address for a different peer -/// than the one given, the given `Multiaddr` is returned as an `Err`. -/// -/// If the given address is not yet a `p2p` address for the given peer, -/// the `/p2p/` protocol is appended to the returned address. -fn p2p_addr(peer: Option, addr: Multiaddr) -> Result { - let peer = match peer { - Some(p) => p, - None => return Ok(addr), - }; - - if let Some(multiaddr::Protocol::P2p(peer_id)) = addr.iter().last() { - if peer_id != peer { - return Err(addr); - } - - return Ok(addr); - } - - Ok(addr.with(multiaddr::Protocol::P2p(peer))) -} - #[cfg(test)] mod tests { use super::*; use crate::dummy; use crate::test::{CallTraceBehaviour, MockBehaviour}; - use futures::executor::block_on; - use futures::executor::ThreadPool; - use futures::{executor, future}; + use futures::future; use libp2p_core::multiaddr::multiaddr; use libp2p_core::transport::memory::MemoryTransportError; use libp2p_core::transport::TransportEvent; @@ -1841,7 +1792,8 @@ mod tests { } fn new_test_swarm( - ) -> SwarmBuilder>> { + config: Config, + ) -> Swarm>> { let id_keys = identity::Keypair::generate_ed25519(); let local_public_key = id_keys.public(); let transport = transport::MemoryTransport::default() @@ -1850,14 +1802,13 @@ mod tests { .multiplex(yamux::Config::default()) .boxed(); let behaviour = CallTraceBehaviour::new(MockBehaviour::new(dummy::ConnectionHandler)); - let builder = match ThreadPool::new().ok() { - Some(tp) => { - SwarmBuilder::with_executor(transport, behaviour, local_public_key.into(), tp) - } - None => SwarmBuilder::without_executor(transport, behaviour, local_public_key.into()), - }; - builder.idle_connection_timeout(Duration::from_secs(5)) + Swarm::new( + transport, + behaviour, + local_public_key.into(), + config.with_idle_connection_timeout(Duration::from_secs(5)), + ) } fn swarms_connected( @@ -1906,10 +1857,10 @@ mod tests { /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_swarm_disconnect() { - let mut swarm1 = new_test_swarm().build(); - let mut swarm2 = new_test_swarm().build(); + #[tokio::test] + async fn test_swarm_disconnect() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -1927,7 +1878,7 @@ mod tests { } let mut state = State::Connecting; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -1959,7 +1910,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } /// Establishes multiple connections between two peers, @@ -1968,10 +1920,10 @@ mod tests { /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_behaviour_disconnect_all() { - let mut swarm1 = new_test_swarm().build(); - let mut swarm2 = new_test_swarm().build(); + #[tokio::test] + async fn test_behaviour_disconnect_all() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -1989,7 +1941,7 @@ mod tests { } let mut state = State::Connecting; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -2025,7 +1977,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } /// Establishes multiple connections between two peers, @@ -2034,10 +1987,10 @@ mod tests { /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_behaviour_disconnect_one() { - let mut swarm1 = new_test_swarm().build(); - let mut swarm2 = new_test_swarm().build(); + #[tokio::test] + async fn test_behaviour_disconnect_one() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2055,7 +2008,7 @@ mod tests { let mut state = State::Connecting; let mut disconnected_conn_id = None; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -2099,7 +2052,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } #[test] @@ -2114,10 +2068,11 @@ mod tests { } fn prop(concurrency_factor: DialConcurrencyFactor) { - block_on(async { - let mut swarm = new_test_swarm() - .dial_concurrency_factor(concurrency_factor.0) - .build(); + tokio::runtime::Runtime::new().unwrap().block_on(async { + let mut swarm = new_test_swarm( + Config::with_tokio_executor() + .with_dial_concurrency_factor(concurrency_factor.0), + ); // Listen on `concurrency_factor + 1` addresses. // @@ -2173,31 +2128,29 @@ mod tests { QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _); } - #[test] - fn invalid_peer_id() { + #[tokio::test] + async fn invalid_peer_id() { // Checks whether dialing an address containing the wrong peer id raises an error // for the expected peer id instead of the obtained peer id. - let mut swarm1 = new_test_swarm().build(); - let mut swarm2 = new_test_swarm().build(); + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); swarm1.listen_on("/memory/0".parse().unwrap()).unwrap(); - let address = - futures::executor::block_on(future::poll_fn(|cx| match swarm1.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => { - Poll::Ready(address) - } - Poll::Pending => Poll::Pending, - _ => panic!("Was expecting the listen address to be reported"), - })); + let address = future::poll_fn(|cx| match swarm1.poll_next_unpin(cx) { + Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => Poll::Ready(address), + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), + }) + .await; let other_id = PeerId::random(); let other_addr = address.with(multiaddr::Protocol::P2p(other_id)); swarm2.dial(other_addr.clone()).unwrap(); - let (peer_id, error) = futures::executor::block_on(future::poll_fn(|cx| { + let (peer_id, error) = future::poll_fn(|cx| { if let Poll::Ready(Some(SwarmEvent::IncomingConnection { .. })) = swarm1.poll_next_unpin(cx) {} @@ -2209,7 +2162,8 @@ mod tests { Poll::Ready(x) => panic!("unexpected {x:?}"), Poll::Pending => Poll::Pending, } - })); + }) + .await; assert_eq!(peer_id.unwrap(), other_id); match error { DialError::WrongPeerId { obtained, endpoint } => { @@ -2226,8 +2180,8 @@ mod tests { } } - #[test] - fn dial_self() { + #[tokio::test] + async fn dial_self() { // Check whether dialing ourselves correctly fails. // // Dialing the same address we're listening should result in three events: @@ -2238,17 +2192,15 @@ mod tests { // // The last two can happen in any order. - let mut swarm = new_test_swarm().build(); + let mut swarm = new_test_swarm(Config::with_tokio_executor()); swarm.listen_on("/memory/0".parse().unwrap()).unwrap(); - let local_address = - futures::executor::block_on(future::poll_fn(|cx| match swarm.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => { - Poll::Ready(address) - } - Poll::Pending => Poll::Pending, - _ => panic!("Was expecting the listen address to be reported"), - })); + let local_address = future::poll_fn(|cx| match swarm.poll_next_unpin(cx) { + Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => Poll::Ready(address), + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), + }) + .await; swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would otherwise be filtered. @@ -2256,7 +2208,7 @@ mod tests { let mut got_dial_err = false; let mut got_inc_err = false; - futures::executor::block_on(future::poll_fn(|cx| -> Poll> { + future::poll_fn(|cx| -> Poll> { loop { match swarm.poll_next_unpin(cx) { Poll::Ready(Some(SwarmEvent::OutgoingConnectionError { @@ -2290,26 +2242,27 @@ mod tests { Poll::Pending => break Poll::Pending, } } - })) + }) + .await .unwrap(); } - #[test] - fn dial_self_by_id() { + #[tokio::test] + async fn dial_self_by_id() { // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first // place. - let swarm = new_test_swarm().build(); + let swarm = new_test_swarm(Config::with_tokio_executor()); let peer_id = *swarm.local_peer_id(); assert!(!swarm.is_connected(&peer_id)); } - #[async_std::test] + #[tokio::test] async fn multiple_addresses_err() { // Tries dialing multiple addresses, and makes sure there's one dialing error per address. let target = PeerId::random(); - let mut swarm = new_test_swarm().build(); + let mut swarm = new_test_swarm(Config::with_tokio_executor()); let addresses = HashSet::from([ multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], @@ -2351,16 +2304,18 @@ mod tests { } } - #[test] - fn aborting_pending_connection_surfaces_error() { - let _ = env_logger::try_init(); + #[tokio::test] + async fn aborting_pending_connection_surfaces_error() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); - let mut dialer = new_test_swarm().build(); - let mut listener = new_test_swarm().build(); + let mut dialer = new_test_swarm(Config::with_tokio_executor()); + let mut listener = new_test_swarm(Config::with_tokio_executor()); let listener_peer_id = *listener.local_peer_id(); listener.listen_on(multiaddr![Memory(0u64)]).unwrap(); - let listener_address = match block_on(listener.next()).unwrap() { + let listener_address = match listener.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => address, e => panic!("Unexpected network event: {e:?}"), }; @@ -2377,7 +2332,7 @@ mod tests { .disconnect_peer_id(listener_peer_id) .expect_err("Expect peer to not yet be connected."); - match block_on(dialer.next()).unwrap() { + match dialer.next().await.unwrap() { SwarmEvent::OutgoingConnectionError { error: DialError::Aborted, .. diff --git a/swarm/src/stream.rs b/swarm/src/stream.rs index 3c4c52afc33e..871352f3c6a9 100644 --- a/swarm/src/stream.rs +++ b/swarm/src/stream.rs @@ -1,16 +1,55 @@ use futures::{AsyncRead, AsyncWrite}; use libp2p_core::muxing::SubstreamBox; use libp2p_core::Negotiated; -use std::io::{IoSlice, IoSliceMut}; -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + io::{IoSlice, IoSliceMut}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +/// Counter for the number of active streams on a connection. +#[derive(Debug, Clone)] +pub(crate) struct ActiveStreamCounter(Arc<()>); + +impl ActiveStreamCounter { + pub(crate) fn default() -> Self { + Self(Arc::new(())) + } + + pub(crate) fn has_no_active_streams(&self) -> bool { + self.num_alive_streams() == 1 + } + + fn num_alive_streams(&self) -> usize { + Arc::strong_count(&self.0) + } +} #[derive(Debug)] -pub struct Stream(Negotiated); +pub struct Stream { + stream: Negotiated, + counter: Option, +} impl Stream { - pub(crate) fn new(stream: Negotiated) -> Self { - Self(stream) + pub(crate) fn new(stream: Negotiated, counter: ActiveStreamCounter) -> Self { + Self { + stream, + counter: Some(counter), + } + } + + /// Ignore this stream in the [Swarm](crate::Swarm)'s connection-keep-alive algorithm. + /// + /// By default, any active stream keeps a connection alive. For most protocols, + /// this is a good default as it ensures that the protocol is completed before + /// a connection is shut down. + /// Some protocols like libp2p's [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) + /// for example never complete and are of an auxiliary nature. + /// These protocols should opt-out of the keep alive algorithm using this method. + pub fn ignore_for_keep_alive(&mut self) { + self.counter.take(); } } @@ -20,7 +59,7 @@ impl AsyncRead for Stream { cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_read(cx, buf) + Pin::new(&mut self.get_mut().stream).poll_read(cx, buf) } fn poll_read_vectored( @@ -28,7 +67,7 @@ impl AsyncRead for Stream { cx: &mut Context<'_>, bufs: &mut [IoSliceMut<'_>], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_read_vectored(cx, bufs) + Pin::new(&mut self.get_mut().stream).poll_read_vectored(cx, bufs) } } @@ -38,7 +77,7 @@ impl AsyncWrite for Stream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_write(cx, buf) + Pin::new(&mut self.get_mut().stream).poll_write(cx, buf) } fn poll_write_vectored( @@ -46,14 +85,14 @@ impl AsyncWrite for Stream { cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_write_vectored(cx, bufs) + Pin::new(&mut self.get_mut().stream).poll_write_vectored(cx, bufs) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_flush(cx) + Pin::new(&mut self.get_mut().stream).poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_close(cx) + Pin::new(&mut self.get_mut().stream).poll_close(cx) } } diff --git a/swarm/src/stream_protocol.rs b/swarm/src/stream_protocol.rs index bce0ec51279d..f746429a3d70 100644 --- a/swarm/src/stream_protocol.rs +++ b/swarm/src/stream_protocol.rs @@ -7,7 +7,7 @@ use std::sync::Arc; /// /// libp2p nodes use stream protocols to negotiate what to do with a newly opened stream. /// Stream protocols are string-based and must start with a forward slash: `/`. -#[derive(Debug, Clone, Eq)] +#[derive(Clone, Eq)] pub struct StreamProtocol { inner: Either<&'static str, Arc>, } @@ -50,6 +50,12 @@ impl AsRef for StreamProtocol { } } +impl fmt::Debug for StreamProtocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + either::for_both!(&self.inner, s => s.fmt(f)) + } +} + impl fmt::Display for StreamProtocol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(f) @@ -102,3 +108,25 @@ impl fmt::Display for InvalidProtocol { } impl std::error::Error for InvalidProtocol {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn stream_protocol_print() { + let protocol = StreamProtocol::new("/foo/bar/1.0.0"); + + let debug = format!("{protocol:?}"); + let display = format!("{protocol}"); + + assert_eq!( + debug, r#""/foo/bar/1.0.0""#, + "protocol to debug print as string with quotes" + ); + assert_eq!( + display, "/foo/bar/1.0.0", + "protocol to display print as string without quotes" + ); + } +} diff --git a/swarm/src/test.rs b/swarm/src/test.rs index 6f39d56da910..547277550bb3 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -23,8 +23,8 @@ use crate::behaviour::{ FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, NewListenAddr, NewListener, }; use crate::{ - ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use libp2p_core::{multiaddr::Multiaddr, transport::ListenerId, ConnectedPoint, Endpoint}; use libp2p_identity::PeerId; @@ -110,31 +110,11 @@ where Ok(self.addresses.get(&p).map_or(Vec::new(), |v| v.clone())) } - fn poll( - &mut self, - _: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { self.next_action.take().map_or(Poll::Pending, Poll::Ready) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -320,9 +300,8 @@ where peer_id, connection_id, endpoint, - handler, remaining_established, - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { let mut other_closed_connections = self .on_connection_established @@ -370,7 +349,6 @@ where peer_id, connection_id, endpoint, - handler, remaining_established, })); } @@ -458,7 +436,9 @@ where .handle_established_outbound_connection(connection_id, peer, addr, role_override) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { + self.inner.on_swarm_event(event); + match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -466,68 +446,33 @@ where FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } - FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - }) => { + FromSwarm::DialFailure(DialFailure { peer_id, .. }) => { self.on_dial_failure.push(peer_id); - self.inner - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - })); } FromSwarm::NewListener(NewListener { listener_id }) => { self.on_new_listener.push(listener_id); - self.inner - .on_swarm_event(FromSwarm::NewListener(NewListener { listener_id })); } FromSwarm::NewListenAddr(NewListenAddr { listener_id, addr }) => { self.on_new_listen_addr.push((listener_id, addr.clone())); - self.inner - .on_swarm_event(FromSwarm::NewListenAddr(NewListenAddr { - listener_id, - addr, - })); } FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { self.on_expired_listen_addr .push((listener_id, addr.clone())); - self.inner - .on_swarm_event(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })); } FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { self.on_new_external_addr.push(addr.clone()); - self.inner - .on_swarm_event(FromSwarm::NewExternalAddrCandidate( - NewExternalAddrCandidate { addr }, - )); } FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr }) => { self.on_expired_external_addr.push(addr.clone()); - self.inner - .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); } - FromSwarm::ListenerError(ListenerError { listener_id, err }) => { + FromSwarm::ListenerError(ListenerError { listener_id, .. }) => { self.on_listener_error.push(listener_id); - self.inner - .on_swarm_event(FromSwarm::ListenerError(ListenerError { listener_id, err })); } FromSwarm::ListenerClosed(ListenerClosed { listener_id, reason, }) => { self.on_listener_closed.push((listener_id, reason.is_ok())); - self.inner - .on_swarm_event(FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - })); } _ => {} } @@ -559,10 +504,9 @@ where fn poll( &mut self, - cx: &mut Context, - args: &mut impl PollParameters, + cx: &mut Context<'_>, ) -> Poll>> { self.poll += 1; - self.inner.poll(cx, args) + self.inner.poll(cx) } } diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs new file mode 100644 index 000000000000..4efe8d17e49e --- /dev/null +++ b/swarm/tests/connection_close.rs @@ -0,0 +1,146 @@ +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::handler::ConnectionEvent; +use libp2p_swarm::{ + ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, FromSwarm, + NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; +use libp2p_swarm_test::SwarmExt; +use std::task::{Context, Poll}; +use void::Void; + +#[async_std::test] +async fn sends_remaining_events_to_behaviour_on_connection_close() { + let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(3)); + let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(3)); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + swarm1.disconnect_peer_id(*swarm2.local_peer_id()).unwrap(); + + match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { + ([SwarmEvent::ConnectionClosed { .. }], [SwarmEvent::ConnectionClosed { .. }]) => { + assert_eq!(swarm1.behaviour().state, 0); + assert_eq!(swarm2.behaviour().state, 0); + } + (e1, e2) => panic!("Unexpected events: {:?} {:?}", e1, e2), + } +} + +struct HandlerWithState { + precious_state: u64, +} + +struct Behaviour { + state: u64, +} + +impl Behaviour { + fn new(state: u64) -> Self { + Behaviour { state } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = HandlerWithState; + type ToSwarm = (); + + fn handle_established_inbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(HandlerWithState { + precious_state: self.state, + }) + } + + fn handle_established_outbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(HandlerWithState { + precious_state: self.state, + }) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + if let FromSwarm::ConnectionClosed(_) = event { + assert_eq!(self.state, 0); + } + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + assert_eq!(self.state, event); + self.state -= 1; + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + Poll::Pending + } +} + +impl ConnectionHandler for HandlerWithState { + type FromBehaviour = Void; + type ToBehaviour = u64; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn connection_keep_alive(&self) -> bool { + true + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent, + > { + Poll::Pending + } + + fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { + if self.precious_state > 0 { + let state = self.precious_state; + self.precious_state -= 1; + + return Poll::Ready(Some(state)); + } + + Poll::Ready(None) + } + + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + void::unreachable(event) + } + + fn on_connection_event( + &mut self, + _: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + } +} diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs index 71d92cb0e1f0..8d22acc90e29 100644 --- a/swarm/tests/listener.rs +++ b/swarm/tests/listener.rs @@ -7,8 +7,8 @@ use libp2p_core::{multiaddr::Protocol, transport::ListenerId, Endpoint, Multiadd use libp2p_identity::PeerId; use libp2p_swarm::{ derive_prelude::NewListener, dummy, ConnectionDenied, ConnectionId, FromSwarm, ListenOpts, - ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, PollParameters, Swarm, - SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, Swarm, SwarmEvent, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p_swarm_test::SwarmExt; @@ -105,7 +105,7 @@ impl NetworkBehaviour for Behaviour { ) { } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::NewListener(NewListener { listener_id }) => { assert!(self.listeners.contains(&listener_id)); @@ -129,11 +129,7 @@ impl NetworkBehaviour for Behaviour { } } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index d06805916215..707abb03d6eb 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -98,7 +98,7 @@ fn three_fields() { struct Foo { ping: ping::Behaviour, identify: identify::Behaviour, - kad: libp2p_kad::Behaviour, + kad: libp2p_kad::Behaviour, } #[allow( @@ -327,7 +327,7 @@ fn with_either() { #[derive(NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Foo { - kad: libp2p_kad::Behaviour, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } @@ -350,10 +350,7 @@ fn with_generics() { #[allow(dead_code)] fn foo() { require_net_behaviour::< - Foo< - libp2p_kad::Behaviour, - libp2p_ping::Behaviour, - >, + Foo, libp2p_ping::Behaviour>, >(); } } @@ -370,8 +367,92 @@ fn with_generics_mixed() { #[allow(dead_code)] fn foo() { - require_net_behaviour::>>( - ); + require_net_behaviour::>>(); + } +} + +#[test] +fn with_generics_constrained() { + use std::task::{Context, Poll}; + trait Mark {} + struct Marked; + impl Mark for Marked {} + + /// A struct with a generic constraint, for which we manually implement `NetworkBehaviour`. + #[allow(dead_code)] + struct Bar { + a: A, + } + + impl NetworkBehaviour for Bar { + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = void::Void; + + fn handle_established_inbound_connection( + &mut self, + _: libp2p_swarm::ConnectionId, + _: libp2p_identity::PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _: libp2p_swarm::ConnectionId, + _: libp2p_identity::PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, _event: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + _: libp2p_identity::PeerId, + _: libp2p_swarm::ConnectionId, + _: THandlerOutEvent, + ) { + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll>> { + Poll::Pending + } + } + + /// A struct which uses the above, inheriting the generic constraint, + /// for which we want to derive the `NetworkBehaviour`. + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Foo1 { + bar: Bar, + } + + /// A struct which uses the above, inheriting the generic constraint, + /// for which we want to derive the `NetworkBehaviour`. + /// + /// Using a where clause instead of inline constraint. + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Foo2 + where + A: Mark, + { + bar: Bar, + } + + #[allow(dead_code)] + fn foo() { + require_net_behaviour::>(); + require_net_behaviour::>(); } } @@ -404,7 +485,7 @@ fn custom_event_with_either() { prelude = "libp2p_swarm::derive_prelude" )] struct Foo { - kad: libp2p_kad::Behaviour, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } @@ -457,7 +538,7 @@ fn multiple_behaviour_attributes() { #[test] fn custom_out_event_no_type_parameters() { use libp2p_identity::PeerId; - use libp2p_swarm::{ConnectionId, PollParameters, ToSwarm}; + use libp2p_swarm::{ConnectionId, ToSwarm}; use std::task::Context; use std::task::Poll; @@ -500,29 +581,12 @@ fn custom_out_event_no_type_parameters() { fn poll( &mut self, - _ctx: &mut Context, - _: &mut impl PollParameters, + _: &mut Context<'_>, ) -> Poll>> { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } #[derive(NetworkBehaviour)] diff --git a/swarm/tests/ui/fail/out_event_deprecation.rs b/swarm/tests/ui/fail/out_event_deprecation.rs deleted file mode 100644 index f552ca32d101..000000000000 --- a/swarm/tests/ui/fail/out_event_deprecation.rs +++ /dev/null @@ -1,21 +0,0 @@ -#![deny(warnings)] // Ensure the warnings we produce surface as errors. - -use libp2p_ping as ping; - -#[derive(libp2p_swarm::NetworkBehaviour)] -#[behaviour(out_event = "FooEvent", prelude = "libp2p_swarm::derive_prelude")] -struct Foo { - ping: ping::Behaviour, -} - -struct FooEvent; - -impl From for FooEvent { - fn from(_: ping::Event) -> Self { - unimplemented!() - } -} - -fn main() { - -} diff --git a/swarm/tests/ui/fail/out_event_deprecation.stderr b/swarm/tests/ui/fail/out_event_deprecation.stderr deleted file mode 100644 index e8c9c20a9950..000000000000 --- a/swarm/tests/ui/fail/out_event_deprecation.stderr +++ /dev/null @@ -1,12 +0,0 @@ -error: use of deprecated constant `out_event_renamed_to_to_swarm::_w`: The `out_event` attribute has been renamed to `to_swarm`. - --> tests/ui/fail/out_event_deprecation.rs:6:13 - | -6 | #[behaviour(out_event = "FooEvent", prelude = "libp2p_swarm::derive_prelude")] - | ^^^^^^^^^ - | -note: the lint level is defined here - --> tests/ui/fail/out_event_deprecation.rs:1:9 - | -1 | #![deny(warnings)] // Ensure the warnings we produce surface as errors. - | ^^^^^^^^ - = note: `#[deny(deprecated)]` implied by `#[deny(warnings)]` diff --git a/transports/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md deleted file mode 100644 index bb1b85d64db1..000000000000 --- a/transports/deflate/CHANGELOG.md +++ /dev/null @@ -1,112 +0,0 @@ -## 0.40.1 - unreleased - -- Deprecate in preparation for removal from the workspace. - See [issue 4522](https://github.com/libp2p/rust-libp2p/issues/4522) for details. - See [PR 4540](https://github.com/libp2p/rust-libp2p/pull/4540). - -## 0.40.0 - -- Raise MSRV to 1.65. - See [PR 3715]. - -[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 - -## 0.39.0 - -- Update to `libp2p-core` `v0.39.0`. - -## 0.38.0 - -- Update to `libp2p-core` `v0.38.0`. - -- Update `rust-version` to reflect the actual MSRV: 1.60.0. See [PR 3090]. - -[PR 3090]: https://github.com/libp2p/rust-libp2p/pull/3090 - -## 0.37.0 - -- Update to `libp2p-core` `v0.37.0`. - -## 0.36.0 - -- Update to `libp2p-core` `v0.36.0`. - -## 0.35.0 - -- Update to `libp2p-core` `v0.35.0`. - -## 0.34.0 - -- Update to `libp2p-core` `v0.34.0`. - -## 0.33.0 - -- Update to `libp2p-core` `v0.33.0`. - -## 0.32.0 [2022-02-22] - -- Update to `libp2p-core` `v0.32.0`. - -## 0.31.0 [2022-01-27] - -- Update dependencies. - -- Migrate to Rust edition 2021 (see [PR 2339]). - -[PR 2339]: https://github.com/libp2p/rust-libp2p/pull/2339 - -## 0.30.0 [2021-11-01] - -- Make default features of `libp2p-core` optional. - [PR 2181](https://github.com/libp2p/rust-libp2p/pull/2181) - -- Update dependencies. - -## 0.29.0 [2021-07-12] - -- Update dependencies. - -## 0.28.0 [2021-03-17] - -- Update `libp2p-core`. - -## 0.27.1 [2021-01-27] - -- Ensure read buffers are initialised. - [PR 1933](https://github.com/libp2p/rust-libp2p/pull/1933). - -## 0.27.0 [2021-01-12] - -- Update dependencies. - -## 0.26.0 [2020-12-17] - -- Update `libp2p-core`. - -## 0.25.0 [2020-11-25] - -- Update `libp2p-core`. - -## 0.24.0 [2020-11-09] - -- Update dependencies. - -## 0.23.0 [2020-10-16] - -- Bump `libp2p-core` dependency. - -## 0.22.0 [2020-09-09] - -- Bump `libp2p-core` dependency. - -## 0.21.0 [2020-08-18] - -- Bump `libp2p-core` dependency. - -## 0.20.0 [2020-07-01] - -- Updated dependencies. - -## 0.19.2 [2020-06-22] - -- Updated dependencies. diff --git a/transports/deflate/Cargo.toml b/transports/deflate/Cargo.toml deleted file mode 100644 index 9e2e1f4e0292..000000000000 --- a/transports/deflate/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "libp2p-deflate" -edition = "2021" -rust-version = { workspace = true } -description = "Deflate encryption protocol for libp2p" -version = "0.40.1" -authors = ["Parity Technologies "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[dependencies] -futures = "0.3.28" -libp2p-core = { workspace = true } -flate2 = "1.0" - -[dev-dependencies] -async-std = "1.6.2" -libp2p-tcp = { workspace = true, features = ["async-io"] } -quickcheck = { workspace = true } -rand = "0.8" -futures_ringbuf = "0.4.0" - -# Passing arguments to the docsrs builder in order to properly document cfg's. -# More information: https://docs.rs/about/builds#cross-compiling -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] -rustc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true diff --git a/transports/deflate/src/lib.rs b/transports/deflate/src/lib.rs deleted file mode 100644 index 54367ff2bff7..000000000000 --- a/transports/deflate/src/lib.rs +++ /dev/null @@ -1,278 +0,0 @@ -#![allow(deprecated)] -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use futures::{prelude::*, ready}; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use std::{io, iter, pin::Pin, task::Context, task::Poll}; - -#[deprecated( - note = "Will be removed in the next release, see https://github.com/libp2p/rust-libp2p/issues/4522 for details." -)] -#[derive(Debug, Copy, Clone)] -pub struct DeflateConfig { - compression: flate2::Compression, -} - -impl Default for DeflateConfig { - fn default() -> Self { - DeflateConfig { - compression: flate2::Compression::fast(), - } - } -} - -impl UpgradeInfo for DeflateConfig { - type Info = &'static str; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once("/deflate/1.0.0") - } -} - -impl InboundUpgrade for DeflateConfig -where - C: AsyncRead + AsyncWrite, -{ - type Output = DeflateOutput; - type Error = io::Error; - type Future = future::Ready>; - - fn upgrade_inbound(self, r: C, _: Self::Info) -> Self::Future { - future::ok(DeflateOutput::new(r, self.compression)) - } -} - -impl OutboundUpgrade for DeflateConfig -where - C: AsyncRead + AsyncWrite, -{ - type Output = DeflateOutput; - type Error = io::Error; - type Future = future::Ready>; - - fn upgrade_outbound(self, w: C, _: Self::Info) -> Self::Future { - future::ok(DeflateOutput::new(w, self.compression)) - } -} - -/// Decodes and encodes traffic using DEFLATE. -#[derive(Debug)] -pub struct DeflateOutput { - /// Inner stream where we read compressed data from and write compressed data to. - inner: S, - /// Internal object used to hold the state of the compression. - compress: flate2::Compress, - /// Internal object used to hold the state of the decompression. - decompress: flate2::Decompress, - /// Temporary buffer between `compress` and `inner`. Stores compressed bytes that need to be - /// sent out once `inner` is ready to accept more. - write_out: Vec, - /// Temporary buffer between `decompress` and `inner`. Stores compressed bytes that need to be - /// given to `decompress`. - read_interm: Vec, - /// When we read from `inner` and `Ok(0)` is returned, we set this to `true` so that we don't - /// read from it again. - inner_read_eof: bool, -} - -impl DeflateOutput { - fn new(inner: S, compression: flate2::Compression) -> Self { - DeflateOutput { - inner, - compress: flate2::Compress::new(compression, false), - decompress: flate2::Decompress::new(false), - write_out: Vec::with_capacity(256), - read_interm: Vec::with_capacity(256), - inner_read_eof: false, - } - } - - /// Tries to write the content of `self.write_out` to `self.inner`. - /// Returns `Ready(Ok(()))` if `self.write_out` is empty. - fn flush_write_out(&mut self, cx: &mut Context<'_>) -> Poll> - where - S: AsyncWrite + Unpin, - { - loop { - if self.write_out.is_empty() { - return Poll::Ready(Ok(())); - } - - match AsyncWrite::poll_write(Pin::new(&mut self.inner), cx, &self.write_out) { - Poll::Ready(Ok(0)) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Poll::Ready(Ok(n)) => self.write_out = self.write_out.split_off(n), - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => return Poll::Pending, - }; - } - } -} - -impl AsyncRead for DeflateOutput -where - S: AsyncRead + Unpin, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - loop { - // Read from `self.inner` into `self.read_interm` if necessary. - if this.read_interm.is_empty() && !this.inner_read_eof { - this.read_interm - .resize(this.read_interm.capacity() + 256, 0); - - match AsyncRead::poll_read(Pin::new(&mut this.inner), cx, &mut this.read_interm) { - Poll::Ready(Ok(0)) => { - this.inner_read_eof = true; - this.read_interm.clear(); - } - Poll::Ready(Ok(n)) => this.read_interm.truncate(n), - Poll::Ready(Err(err)) => { - this.read_interm.clear(); - return Poll::Ready(Err(err)); - } - Poll::Pending => { - this.read_interm.clear(); - return Poll::Pending; - } - } - } - debug_assert!(!this.read_interm.is_empty() || this.inner_read_eof); - - let before_out = this.decompress.total_out(); - let before_in = this.decompress.total_in(); - let ret = this.decompress.decompress( - &this.read_interm, - buf, - if this.inner_read_eof { - flate2::FlushDecompress::Finish - } else { - flate2::FlushDecompress::None - }, - )?; - - // Remove from `self.read_interm` the bytes consumed by the decompressor. - let consumed = (this.decompress.total_in() - before_in) as usize; - this.read_interm = this.read_interm.split_off(consumed); - - let read = (this.decompress.total_out() - before_out) as usize; - if read != 0 || ret == flate2::Status::StreamEnd { - return Poll::Ready(Ok(read)); - } - } - } -} - -impl AsyncWrite for DeflateOutput -where - S: AsyncWrite + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - // We don't want to accumulate too much data in `self.write_out`, so we only proceed if it - // is empty. - ready!(this.flush_write_out(cx))?; - - // We special-case this, otherwise an empty buffer would make the loop below infinite. - if buf.is_empty() { - return Poll::Ready(Ok(0)); - } - - // Unfortunately, the compressor might be in a "flushing mode", not accepting any input - // data. We don't want to return `Ok(0)` in that situation, as that would be wrong. - // Instead, we invoke the compressor in a loop until it accepts some of our data. - loop { - let before_in = this.compress.total_in(); - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - let ret = this.compress.compress_vec( - buf, - &mut this.write_out, - flate2::FlushCompress::None, - )?; - let written = (this.compress.total_in() - before_in) as usize; - - if written != 0 || ret == flate2::Status::StreamEnd { - return Poll::Ready(Ok(written)); - } - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - ready!(this.flush_write_out(cx))?; - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::Sync)?; - - loop { - ready!(this.flush_write_out(cx))?; - - debug_assert!(this.write_out.is_empty()); - // We ask the compressor to flush everything into `self.write_out`. - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::None)?; - if this.write_out.is_empty() { - break; - } - } - - AsyncWrite::poll_flush(Pin::new(&mut this.inner), cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - loop { - ready!(this.flush_write_out(cx))?; - - // We ask the compressor to flush everything into `self.write_out`. - debug_assert!(this.write_out.is_empty()); - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::Finish)?; - if this.write_out.is_empty() { - break; - } - } - - AsyncWrite::poll_close(Pin::new(&mut this.inner), cx) - } -} diff --git a/transports/deflate/tests/test.rs b/transports/deflate/tests/test.rs deleted file mode 100644 index 4224dcf435be..000000000000 --- a/transports/deflate/tests/test.rs +++ /dev/null @@ -1,82 +0,0 @@ -#![allow(deprecated)] - -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::prelude::*; -use libp2p_core::OutboundUpgrade; -use libp2p_deflate::DeflateConfig; -use quickcheck::{QuickCheck, TestResult}; -use rand::RngCore; - -#[test] -fn deflate() { - fn prop(message: Vec) -> TestResult { - if message.is_empty() { - return TestResult::discard(); - } - futures::executor::block_on(run(message)); - TestResult::passed() - } - QuickCheck::new().quickcheck(prop as fn(Vec) -> TestResult) -} - -#[test] -fn lot_of_data() { - let mut v = vec![0; 2 * 1024 * 1024]; - rand::thread_rng().fill_bytes(&mut v); - futures::executor::block_on(run(v)); -} - -async fn run(message1: Vec) { - let (server, client) = futures_ringbuf::Endpoint::pair(100, 100); - - let message2 = message1.clone(); - - let client_task = async move { - let mut client = DeflateConfig::default() - .upgrade_outbound(client, "") - .await - .unwrap(); - - let mut buf = vec![0; message2.len()]; - client.read_exact(&mut buf).await.expect("read_exact"); - assert_eq!(&buf[..], &message2[..]); - - client.write_all(&message2).await.expect("write_all"); - client.close().await.expect("close") - }; - - let server_task = async move { - let mut server = DeflateConfig::default() - .upgrade_outbound(server, "") - .await - .unwrap(); - - server.write_all(&message1).await.expect("write_all"); - server.close().await.expect("close"); - - let mut buf = Vec::new(); - server.read_to_end(&mut buf).await.expect("read_to_end"); - assert_eq!(&buf[..], &message1[..]); - }; - - futures::future::join(server_task, client_task).await; -} diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index 29b5ac4403a1..91cfbc00883c 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,4 +1,20 @@ -## 0.40.1 - unreleased +## 0.41.1 + +- Add hidden API that removes unnecessary async for `async-std`. + See [PR 4808](https://github.com/libp2p/rust-libp2p/pull/4808). + +## 0.41.0 + +- Make `tokio::Transport::custom` and `async_std::Transport::custom` constructors infallible. + See [PR 4464](https://github.com/libp2p/rust-libp2p/pull/4464). +- Remove deprecated type-aliases. + See [PR 4739](https://github.com/libp2p/rust-libp2p/pull/4739). +- Migrate to the `hickory-dns` project which has rebranded from `trust-dns`. + We also remove the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` features. + Users should activate these features themselves on `hickory-resolver` if so desired. + See [PR 4780](https://github.com/libp2p/rust-libp2p/pull/4780). + +## 0.40.1 - Remove `Dns` prefix from types like `TokioDnsConfig` and `DnsConfig` in favor of modules that describe the different variants. Users are encouraged to import the `libp2p::dns` module and refer to types as `dns::tokio::Transport` and `dns::async_std::Transport`. @@ -6,7 +22,7 @@ [PR 4505]: https://github.com/libp2p/rust-libp2p/pull/4505 -## 0.40.0 +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 1d5fb1bc7f9a..9650893d5751 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = { workspace = true } description = "DNS transport implementation for libp2p" -version = "0.40.1" +version = "0.41.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,29 +11,25 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-trait = "0.1.72" +async-std-resolver = { version = "0.24", optional = true } +async-trait = "0.1.77" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" -futures = "0.3.28" -async-std-resolver = { version = "0.23", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } -smallvec = "1.11.1" +hickory-resolver = { version = "0.24.0", default-features = false, features = ["system-config"] } +smallvec = "1.12.0" +tracing = "0.1.37" [dev-dependencies] -env_logger = "0.10" +libp2p-identity = { workspace = true, features = ["rand"] } tokio-crate = { package = "tokio", version = "1.0", default-features = false, features = ["rt", "time"] } async-std-crate = { package = "async-std", version = "1.6" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] async-std = ["async-std-resolver"] -tokio = ["trust-dns-resolver/tokio-runtime"] -# The `tokio-` prefix and feature dependency is just to be explicit, -# since these features of `trust-dns-resolver` are currently only -# available for `tokio`. -tokio-dns-over-rustls = ["tokio", "trust-dns-resolver/dns-over-rustls"] -tokio-dns-over-https-rustls = ["tokio", "trust-dns-resolver/dns-over-https-rustls"] +tokio = ["hickory-resolver/tokio-runtime"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index b27b14a77c0b..be847e568ed1 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -60,12 +60,13 @@ #[cfg(feature = "async-std")] pub mod async_std { use async_std_resolver::AsyncStdResolver; - use parking_lot::Mutex; - use std::{io, sync::Arc}; - use trust_dns_resolver::{ + use futures::FutureExt; + use hickory_resolver::{ config::{ResolverConfig, ResolverOpts}, system_conf, }; + use parking_lot::Mutex; + use std::{io, sync::Arc}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `async-std` for all async I/O. @@ -75,32 +76,48 @@ pub mod async_std { /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. pub async fn system(inner: T) -> Result, io::Error> { let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts).await + Ok(Self::custom(inner, cfg, opts).await) } /// Creates a [`Transport`] with a custom resolver configuration and options. - pub async fn custom( - inner: T, - cfg: ResolverConfig, - opts: ResolverOpts, - ) -> Result, io::Error> { - Ok(Transport { + pub async fn custom(inner: T, cfg: ResolverConfig, opts: ResolverOpts) -> Transport { + Transport { inner: Arc::new(Mutex::new(inner)), resolver: async_std_resolver::resolver(cfg, opts).await, + } + } + + // TODO: Replace `system` implementation with this + #[doc(hidden)] + pub fn system2(inner: T) -> Result, io::Error> { + Ok(Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver_from_system_conf() + .now_or_never() + .expect( + "async_std_resolver::resolver_from_system_conf did not resolve immediately", + )?, }) } + + // TODO: Replace `custom` implementation with this + #[doc(hidden)] + pub fn custom2(inner: T, cfg: ResolverConfig, opts: ResolverOpts) -> Transport { + Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver(cfg, opts) + .now_or_never() + .expect("async_std_resolver::resolver did not resolve immediately"), + } + } } } -#[cfg(feature = "async-std")] -#[deprecated(note = "Use `async_std::Transport` instead.")] -pub type DnsConfig = async_std::Transport; - #[cfg(feature = "tokio")] pub mod tokio { + use hickory_resolver::{system_conf, TokioAsyncResolver}; use parking_lot::Mutex; use std::sync::Arc; - use trust_dns_resolver::{system_conf, TokioAsyncResolver}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `tokio` for all async I/O. @@ -108,31 +125,26 @@ pub mod tokio { impl Transport { /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. - pub fn system(inner: T) -> Result, std::io::Error> { + pub fn system(inner: T) -> Result, std::io::Error> { let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts) + Ok(Self::custom(inner, cfg, opts)) } /// Creates a [`Transport`] with a custom resolver configuration /// and options. pub fn custom( inner: T, - cfg: trust_dns_resolver::config::ResolverConfig, - opts: trust_dns_resolver::config::ResolverOpts, - ) -> Result, std::io::Error> { - // TODO: Make infallible in next breaking release. Or deprecation? - Ok(Transport { + cfg: hickory_resolver::config::ResolverConfig, + opts: hickory_resolver::config::ResolverOpts, + ) -> Transport { + Transport { inner: Arc::new(Mutex::new(inner)), resolver: TokioAsyncResolver::tokio(cfg, opts), - }) + } } } } -#[cfg(feature = "tokio")] -#[deprecated(note = "Use `tokio::Transport` instead.")] -pub type TokioDnsConfig = tokio::Transport; - use async_trait::async_trait; use futures::{future::BoxFuture, prelude::*}; use libp2p_core::{ @@ -154,12 +166,12 @@ use std::{ task::{Context, Poll}, }; -pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts}; -pub use trust_dns_resolver::error::{ResolveError, ResolveErrorKind}; -use trust_dns_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; -use trust_dns_resolver::lookup_ip::LookupIp; -use trust_dns_resolver::name_server::ConnectionProvider; -use trust_dns_resolver::AsyncResolver; +pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; +pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; +use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; +use hickory_resolver::lookup_ip::LookupIp; +use hickory_resolver::name_server::ConnectionProvider; +use hickory_resolver::AsyncResolver; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -189,9 +201,6 @@ pub struct Transport { resolver: R, } -#[deprecated(note = "Use `async_std::Transport` or `tokio::Transport` instead.")] -pub type GenDnsConfig = Transport; - impl libp2p_core::Transport for Transport where T: libp2p_core::Transport + Send + Unpin + 'static, @@ -268,7 +277,7 @@ where let resolver = self.resolver.clone(); let inner = self.inner.clone(); - // Asynchronlously resolve all DNS names in the address before proceeding + // Asynchronously resolve all DNS names in the address before proceeding // with dialing on the underlying transport. Ok(async move { let mut last_err = None; @@ -293,7 +302,7 @@ where ) }) { if dns_lookups == MAX_DNS_LOOKUPS { - log::debug!("Too many DNS lookups. Dropping unresolved {}.", addr); + tracing::debug!(address=%addr, "Too many DNS lookups, dropping unresolved address"); last_err = Some(Error::TooManyLookups); // There may still be fully resolved addresses in `unresolved`, // so keep going until `unresolved` is empty. @@ -310,13 +319,13 @@ where last_err = Some(e); } Ok(Resolved::One(ip)) => { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); } Ok(Resolved::Many(ips)) => { for ip in ips { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); @@ -330,14 +339,14 @@ where if a.ends_with(&suffix) { if n < MAX_TXT_RECORDS { n += 1; - log::trace!("Resolved {} -> {}", name, a); + tracing::trace!(protocol=%name, resolved=%a); let addr = prefix.iter().chain(a.iter()).collect::(); unresolved.push(addr); } else { - log::debug!( - "Too many TXT records. Dropping resolved {}.", - a + tracing::debug!( + resolved=%a, + "Too many TXT records, dropping resolved" ); } } @@ -346,7 +355,7 @@ where } } else { // We have a fully resolved address, so try to dial it. - log::debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); let transport = inner.clone(); let dial = match role_override { @@ -370,12 +379,12 @@ where match result { Ok(out) => return Ok(out), Err(err) => { - log::debug!("Dial error: {:?}.", err); + tracing::debug!("Dial error: {:?}.", err); if unresolved.is_empty() { return Err(err); } if dial_attempts == MAX_DIAL_ATTEMPTS { - log::debug!( + tracing::debug!( "Aborting dialing after {} attempts.", MAX_DIAL_ATTEMPTS ); @@ -407,6 +416,7 @@ pub enum Error { /// The underlying transport encountered an error. Transport(TErr), /// DNS resolution failed. + #[allow(clippy::enum_variant_names)] ResolveError(ResolveError), /// DNS resolution was successful, but the underlying transport refused the resolved address. MultiaddrNotSupported(Multiaddr), @@ -419,9 +429,6 @@ pub enum Error { TooManyLookups, } -#[deprecated(note = "Use `Error` instead.")] -pub type DnsErr = Error; - impl fmt::Display for Error where TErr: fmt::Display, @@ -556,7 +563,7 @@ fn resolve<'a, E: 'a + Send, R: Resolver>( match parse_dnsaddr_txt(chars) { Err(e) => { // Skip over seemingly invalid entries. - log::debug!("Invalid TXT record: {:?}", e); + tracing::debug!("Invalid TXT record: {:?}", e); } Ok(a) => { addrs.push(a); @@ -631,7 +638,9 @@ mod tests { #[test] fn basic_resolve() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); #[derive(Clone)] struct CustomTransport; @@ -764,8 +773,7 @@ mod tests { let config = ResolverConfig::quad9(); let opts = ResolverOpts::default(); async_std_crate::task::block_on( - async_std::Transport::custom(CustomTransport, config, opts) - .then(|dns| run(dns.unwrap())), + async_std::Transport::custom(CustomTransport, config, opts).then(run), ); } @@ -781,9 +789,7 @@ mod tests { .build() .unwrap(); - rt.block_on(run( - tokio::Transport::custom(CustomTransport, config, opts).unwrap() - )); + rt.block_on(run(tokio::Transport::custom(CustomTransport, config, opts))); } } } diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index 2a0c31c96a89..78effb673d26 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,3 +1,12 @@ +## 0.44.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.43.2 + +- Update x25519-dalek to 2.0.0. + ## 0.43.1 - Update dependencies. diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 876a3cd34d50..83bb6de8c0e8 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,39 +3,41 @@ name = "libp2p-noise" edition = "2021" rust-version = { workspace = true } description = "Cryptographic handshake protocol using the noise framework." -version = "0.43.1" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" [dependencies] +asynchronous-codec = { workspace = true } bytes = "1" -curve25519-dalek = "4.1.1" -futures = "0.3.28" +curve25519-dalek = "4.1.2" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } -once_cell = "1.18.0" +once_cell = "1.19.0" quick-protobuf = "0.8" rand = "0.8.3" sha2 = "0.10.8" static_assertions = "1" -thiserror = "1.0.49" -x25519-dalek = "1.1.0" +thiserror = "1.0.57" +tracing = "0.1.37" +x25519-dalek = "2" zeroize = "1" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -snow = { version = "0.9.2", features = ["ring-resolver"], default-features = false } +snow = { version = "0.9.6", features = ["ring-resolver"], default-features = false } [target.'cfg(target_arch = "wasm32")'.dependencies] -snow = { version = "0.9.2", features = ["default-resolver"], default-features = false } +snow = { version = "0.9.5", features = ["default-resolver"], default-features = false } [dev-dependencies] -env_logger = "0.10.0" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index ee1846956967..9cd4cfed52a9 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -22,11 +22,11 @@ mod framed; pub(crate) mod handshake; +use asynchronous_codec::Framed; use bytes::Bytes; -use framed::{NoiseFramed, MAX_FRAME_LEN}; +use framed::{Codec, MAX_FRAME_LEN}; use futures::prelude::*; use futures::ready; -use log::trace; use std::{ cmp::min, fmt, io, @@ -38,7 +38,7 @@ use std::{ /// /// `T` is the type of the underlying I/O resource. pub struct Output { - io: NoiseFramed, + io: Framed>, recv_buffer: Bytes, recv_offset: usize, send_buffer: Vec, @@ -47,12 +47,12 @@ pub struct Output { impl fmt::Debug for Output { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NoiseOutput").field("io", &self.io).finish() + f.debug_struct("NoiseOutput").finish() } } impl Output { - fn new(io: NoiseFramed) -> Self { + fn new(io: Framed>) -> Self { Output { io, recv_buffer: Bytes::new(), @@ -75,10 +75,10 @@ impl AsyncRead for Output { if len > 0 { let n = min(len - off, buf.len()); buf[..n].copy_from_slice(&self.recv_buffer[off..off + n]); - trace!("read: copied {}/{} bytes", off + n, len); + tracing::trace!(copied_bytes=%(off + n), total_bytes=%len, "read: copied"); self.recv_offset += n; if len == self.recv_offset { - trace!("read: frame consumed"); + tracing::trace!("read: frame consumed"); // Drop the existing view so `NoiseFramed` can reuse // the buffer when polling for the next frame below. self.recv_buffer = Bytes::new(); @@ -111,7 +111,7 @@ impl AsyncWrite for Output { // The MAX_FRAME_LEN is the maximum buffer size before a frame must be sent. if this.send_offset == MAX_FRAME_LEN { - trace!("write: sending {} bytes", MAX_FRAME_LEN); + tracing::trace!(bytes=%MAX_FRAME_LEN, "write: sending"); ready!(io.as_mut().poll_ready(cx))?; io.as_mut().start_send(frame_buf)?; this.send_offset = 0; @@ -123,7 +123,7 @@ impl AsyncWrite for Output { let n = min(MAX_FRAME_LEN - off, buf.len()); this.send_buffer[off..off + n].copy_from_slice(&buf[..n]); this.send_offset += n; - trace!("write: buffered {} bytes", this.send_offset); + tracing::trace!(bytes=%this.send_offset, "write: buffered"); Poll::Ready(Ok(n)) } @@ -136,7 +136,7 @@ impl AsyncWrite for Output { // Check if there is still one more frame to send. if this.send_offset > 0 { ready!(io.as_mut().poll_ready(cx))?; - trace!("flush: sending {} bytes", this.send_offset); + tracing::trace!(bytes= %this.send_offset, "flush: sending"); io.as_mut().start_send(frame_buf)?; this.send_offset = 0; } diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index d7fa79fc815f..9ed6045cf38e 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -18,20 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! This module provides a `Sink` and `Stream` for length-delimited -//! Noise protocol messages in form of [`NoiseFramed`]. +//! Provides a [`Codec`] type implementing the [`Encoder`] and [`Decoder`] traits. +//! +//! Alongside a [`asynchronous_codec::Framed`] this provides a [Sink](futures::Sink) +//! and [Stream](futures::Stream) for length-delimited Noise protocol messages. -use crate::io::Output; +use super::handshake::proto; use crate::{protocol::PublicKey, Error}; -use bytes::{Bytes, BytesMut}; -use futures::prelude::*; -use futures::ready; -use log::{debug, trace}; -use std::{ - fmt, io, - pin::Pin, - task::{Context, Poll}, -}; +use asynchronous_codec::{Decoder, Encoder}; +use bytes::{Buf, Bytes, BytesMut}; +use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use std::io; +use std::mem::size_of; /// Max. size of a noise message. const MAX_NOISE_MSG_LEN: usize = 65535; @@ -43,61 +41,49 @@ static_assertions::const_assert! { MAX_FRAME_LEN + EXTRA_ENCRYPT_SPACE <= MAX_NOISE_MSG_LEN } -/// A `NoiseFramed` is a `Sink` and `Stream` for length-delimited -/// Noise protocol messages. -/// -/// `T` is the type of the underlying I/O resource and `S` the -/// type of the Noise session state. -pub(crate) struct NoiseFramed { - io: T, +/// Codec holds the noise session state `S` and acts as a medium for +/// encoding and decoding length-delimited session messages. +pub(crate) struct Codec { session: S, - read_state: ReadState, - write_state: WriteState, - read_buffer: Vec, - write_buffer: Vec, - decrypt_buffer: BytesMut, -} -impl fmt::Debug for NoiseFramed { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NoiseFramed") - .field("read_state", &self.read_state) - .field("write_state", &self.write_state) - .finish() - } + // We reuse write and encryption buffers across multiple messages to avoid reallocations. + // We cannot reuse read and decryption buffers because we cannot return borrowed data. + write_buffer: BytesMut, + encrypt_buffer: BytesMut, } -impl NoiseFramed { - /// Creates a nwe `NoiseFramed` for beginning a Noise protocol handshake. - pub(crate) fn new(io: T, state: snow::HandshakeState) -> Self { - NoiseFramed { - io, - session: state, - read_state: ReadState::Ready, - write_state: WriteState::Ready, - read_buffer: Vec::new(), - write_buffer: Vec::new(), - decrypt_buffer: BytesMut::new(), +impl Codec { + pub(crate) fn new(session: S) -> Self { + Codec { + session, + write_buffer: BytesMut::default(), + encrypt_buffer: BytesMut::default(), } } +} +impl Codec { + /// Checks if the session was started in the `initiator` role. pub(crate) fn is_initiator(&self) -> bool { self.session.is_initiator() } + /// Checks if the session was started in the `responder` role. pub(crate) fn is_responder(&self) -> bool { !self.session.is_initiator() } - /// Converts the `NoiseFramed` into a `NoiseOutput` encrypted data stream - /// once the handshake is complete, including the static DH [`PublicKey`] - /// of the remote, if received. + /// Converts the underlying Noise session from the [`snow::HandshakeState`] to a + /// [`snow::TransportState`] once the handshake is complete, including the static + /// DH [`PublicKey`] of the remote if received. /// - /// If the underlying Noise protocol session state does not permit - /// transitioning to transport mode because the handshake is incomplete, - /// an error is returned. Similarly if the remote's static DH key, if - /// present, cannot be parsed. - pub(crate) fn into_transport(self) -> Result<(PublicKey, Output), Error> { + /// If the Noise protocol session state does not permit transitioning to + /// transport mode because the handshake is incomplete, an error is returned. + /// + /// An error is also returned if the remote's static DH key is not present or + /// cannot be parsed, as that indicates a fatal handshake error for the noise + /// `XX` pattern, which is the only handshake protocol libp2p currently supports. + pub(crate) fn into_transport(self) -> Result<(PublicKey, Codec), Error> { let dh_remote_pubkey = self.session.get_remote_static().ok_or_else(|| { Error::Io(io::Error::new( io::ErrorKind::Other, @@ -106,355 +92,151 @@ impl NoiseFramed { })?; let dh_remote_pubkey = PublicKey::from_slice(dh_remote_pubkey)?; + let codec = Codec::new(self.session.into_transport_mode()?); - let io = NoiseFramed { - session: self.session.into_transport_mode()?, - io: self.io, - read_state: ReadState::Ready, - write_state: WriteState::Ready, - read_buffer: self.read_buffer, - write_buffer: self.write_buffer, - decrypt_buffer: self.decrypt_buffer, - }; - - Ok((dh_remote_pubkey, Output::new(io))) + Ok((dh_remote_pubkey, codec)) } } -/// The states for reading Noise protocol frames. -#[derive(Debug)] -enum ReadState { - /// Ready to read another frame. - Ready, - /// Reading frame length. - ReadLen { buf: [u8; 2], off: usize }, - /// Reading frame data. - ReadData { len: usize, off: usize }, - /// EOF has been reached (terminal state). - /// - /// The associated result signals if the EOF was unexpected or not. - Eof(Result<(), ()>), - /// A decryption error occurred (terminal state). - DecErr, -} +impl Encoder for Codec { + type Error = io::Error; + type Item<'a> = &'a proto::NoiseHandshakePayload; -/// The states for writing Noise protocol frames. -#[derive(Debug)] -enum WriteState { - /// Ready to write another frame. - Ready, - /// Writing the frame length. - WriteLen { - len: usize, - buf: [u8; 2], - off: usize, - }, - /// Writing the frame data. - WriteData { len: usize, off: usize }, - /// EOF has been reached unexpectedly (terminal state). - Eof, - /// An encryption error occurred (terminal state). - EncErr, -} + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + let item_size = item.get_size(); -impl WriteState { - fn is_ready(&self) -> bool { - if let WriteState::Ready = self { - return true; - } - false - } -} + self.write_buffer.resize(item_size, 0); + let mut writer = Writer::new(&mut self.write_buffer[..item_size]); + item.write_message(&mut writer) + .expect("Protobuf encoding to succeed"); -impl futures::stream::Stream for NoiseFramed -where - T: AsyncRead + Unpin, - S: SessionState + Unpin, -{ - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = Pin::into_inner(self); - loop { - trace!("read state: {:?}", this.read_state); - match this.read_state { - ReadState::Ready => { - this.read_state = ReadState::ReadLen { - buf: [0, 0], - off: 0, - }; - } - ReadState::ReadLen { mut buf, mut off } => { - let n = match read_frame_len(&mut this.io, cx, &mut buf, &mut off) { - Poll::Ready(Ok(Some(n))) => n, - Poll::Ready(Ok(None)) => { - trace!("read: eof"); - this.read_state = ReadState::Eof(Ok(())); - return Poll::Ready(None); - } - Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), - Poll::Pending => { - this.read_state = ReadState::ReadLen { buf, off }; - return Poll::Pending; - } - }; - trace!("read: frame len = {}", n); - if n == 0 { - trace!("read: empty frame"); - this.read_state = ReadState::Ready; - continue; - } - this.read_buffer.resize(usize::from(n), 0u8); - this.read_state = ReadState::ReadData { - len: usize::from(n), - off: 0, - } - } - ReadState::ReadData { len, ref mut off } => { - let n = { - let f = - Pin::new(&mut this.io).poll_read(cx, &mut this.read_buffer[*off..len]); - match ready!(f) { - Ok(n) => n, - Err(e) => return Poll::Ready(Some(Err(e))), - } - }; - trace!("read: {}/{} bytes", *off + n, len); - if n == 0 { - trace!("read: eof"); - this.read_state = ReadState::Eof(Err(())); - return Poll::Ready(Some(Err(io::ErrorKind::UnexpectedEof.into()))); - } - *off += n; - if len == *off { - trace!("read: decrypting {} bytes", len); - this.decrypt_buffer.resize(len, 0); - if let Ok(n) = this - .session - .read_message(&this.read_buffer, &mut this.decrypt_buffer) - { - this.decrypt_buffer.truncate(n); - trace!("read: payload len = {} bytes", n); - this.read_state = ReadState::Ready; - // Return an immutable view into the current buffer. - // If the view is dropped before the next frame is - // read, the `BytesMut` will reuse the same buffer - // for the next frame. - let view = this.decrypt_buffer.split().freeze(); - return Poll::Ready(Some(Ok(view))); - } else { - debug!("read: decryption error"); - this.read_state = ReadState::DecErr; - return Poll::Ready(Some(Err(io::ErrorKind::InvalidData.into()))); - } - } - } - ReadState::Eof(Ok(())) => { - trace!("read: eof"); - return Poll::Ready(None); - } - ReadState::Eof(Err(())) => { - trace!("read: eof (unexpected)"); - return Poll::Ready(Some(Err(io::ErrorKind::UnexpectedEof.into()))); - } - ReadState::DecErr => { - return Poll::Ready(Some(Err(io::ErrorKind::InvalidData.into()))) - } - } - } + encrypt( + &self.write_buffer[..item_size], + dst, + &mut self.encrypt_buffer, + |item, buffer| self.session.write_message(item, buffer), + )?; + + Ok(()) } } -impl futures::sink::Sink<&Vec> for NoiseFramed -where - T: AsyncWrite + Unpin, - S: SessionState + Unpin, -{ +impl Decoder for Codec { type Error = io::Error; + type Item = proto::NoiseHandshakePayload; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + let cleartext = match decrypt(src, |ciphertext, decrypt_buffer| { + self.session.read_message(ciphertext, decrypt_buffer) + })? { + None => return Ok(None), + Some(cleartext) => cleartext, + }; - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = Pin::into_inner(self); - loop { - trace!("write state {:?}", this.write_state); - match this.write_state { - WriteState::Ready => { - return Poll::Ready(Ok(())); - } - WriteState::WriteLen { len, buf, mut off } => { - trace!("write: frame len ({}, {:?}, {}/2)", len, buf, off); - match write_frame_len(&mut this.io, cx, &buf, &mut off) { - Poll::Ready(Ok(true)) => (), - Poll::Ready(Ok(false)) => { - trace!("write: eof"); - this.write_state = WriteState::Eof; - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), - Poll::Pending => { - this.write_state = WriteState::WriteLen { len, buf, off }; - return Poll::Pending; - } - } - this.write_state = WriteState::WriteData { len, off: 0 } - } - WriteState::WriteData { len, ref mut off } => { - let n = { - let f = - Pin::new(&mut this.io).poll_write(cx, &this.write_buffer[*off..len]); - match ready!(f) { - Ok(n) => n, - Err(e) => return Poll::Ready(Err(e)), - } - }; - if n == 0 { - trace!("write: eof"); - this.write_state = WriteState::Eof; - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - *off += n; - trace!("write: {}/{} bytes written", *off, len); - if len == *off { - trace!("write: finished with {} bytes", len); - this.write_state = WriteState::Ready; - } - } - WriteState::Eof => { - trace!("write: eof"); - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - WriteState::EncErr => return Poll::Ready(Err(io::ErrorKind::InvalidData.into())), - } - } - } + let mut reader = BytesReader::from_bytes(&cleartext[..]); + let pb = + proto::NoiseHandshakePayload::from_reader(&mut reader, &cleartext).map_err(|_| { + io::Error::new( + io::ErrorKind::InvalidData, + "Failed decoding handshake payload", + ) + })?; - fn start_send(self: Pin<&mut Self>, frame: &Vec) -> Result<(), Self::Error> { - assert!(frame.len() <= MAX_FRAME_LEN); - let this = Pin::into_inner(self); - assert!(this.write_state.is_ready()); - - this.write_buffer - .resize(frame.len() + EXTRA_ENCRYPT_SPACE, 0u8); - match this - .session - .write_message(frame, &mut this.write_buffer[..]) - { - Ok(n) => { - trace!("write: cipher text len = {} bytes", n); - this.write_buffer.truncate(n); - this.write_state = WriteState::WriteLen { - len: n, - buf: u16::to_be_bytes(n as u16), - off: 0, - }; - Ok(()) - } - Err(e) => { - log::error!("encryption error: {:?}", e); - this.write_state = WriteState::EncErr; - Err(io::ErrorKind::InvalidData.into()) - } - } + Ok(Some(pb)) } +} + +impl Encoder for Codec { + type Error = io::Error; + type Item<'a> = &'a [u8]; - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_ready(cx))?; - Pin::new(&mut self.io).poll_flush(cx) + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + encrypt(item, dst, &mut self.encrypt_buffer, |item, buffer| { + self.session.write_message(item, buffer) + }) } +} + +impl Decoder for Codec { + type Error = io::Error; + type Item = Bytes; - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush(cx))?; - Pin::new(&mut self.io).poll_close(cx) + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + decrypt(src, |ciphertext, decrypt_buffer| { + self.session.read_message(ciphertext, decrypt_buffer) + }) } } -/// A stateful context in which Noise protocol messages can be read and written. -pub(crate) trait SessionState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result; - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result; +/// Encrypts the given cleartext to `dst`. +/// +/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across different session states of the noise protocol. +fn encrypt( + cleartext: &[u8], + dst: &mut BytesMut, + encrypt_buffer: &mut BytesMut, + encrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, +) -> io::Result<()> { + tracing::trace!("Encrypting {} bytes", cleartext.len()); + + encrypt_buffer.resize(cleartext.len() + EXTRA_ENCRYPT_SPACE, 0); + let n = encrypt_fn(cleartext, encrypt_buffer).map_err(into_io_error)?; + + tracing::trace!("Outgoing ciphertext has {n} bytes"); + + encode_length_prefixed(&encrypt_buffer[..n], dst); + + Ok(()) } -impl SessionState for snow::HandshakeState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.read_message(msg, buf) - } +/// Encrypts the given ciphertext. +/// +/// This is a standalone function so we can use it across different session states of the noise protocol. +/// In case `ciphertext` does not contain enough bytes to decrypt the entire frame, `Ok(None)` is returned. +fn decrypt( + ciphertext: &mut BytesMut, + decrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, +) -> io::Result> { + let Some(ciphertext) = decode_length_prefixed(ciphertext)? else { + return Ok(None); + }; - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.write_message(msg, buf) - } + tracing::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); + + let mut decrypt_buffer = BytesMut::zeroed(ciphertext.len()); + let n = decrypt_fn(&ciphertext, &mut decrypt_buffer).map_err(into_io_error)?; + + tracing::trace!("Decrypted cleartext has {n} bytes"); + + Ok(Some(decrypt_buffer.split_to(n).freeze())) } -impl SessionState for snow::TransportState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.read_message(msg, buf) - } +fn into_io_error(err: snow::Error) -> io::Error { + io::Error::new(io::ErrorKind::InvalidData, err) +} - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.write_message(msg, buf) - } +const U16_LENGTH: usize = size_of::(); + +fn encode_length_prefixed(src: &[u8], dst: &mut BytesMut) { + dst.reserve(U16_LENGTH + src.len()); + dst.extend_from_slice(&(src.len() as u16).to_be_bytes()); + dst.extend_from_slice(src); } -/// Read 2 bytes as frame length from the given source into the given buffer. -/// -/// Panics if `off >= 2`. -/// -/// When [`Poll::Pending`] is returned, the given buffer and offset -/// may have been updated (i.e. a byte may have been read) and must be preserved -/// for the next invocation. -/// -/// Returns `None` if EOF has been encountered. -fn read_frame_len( - mut io: &mut R, - cx: &mut Context<'_>, - buf: &mut [u8; 2], - off: &mut usize, -) -> Poll>> { - loop { - match ready!(Pin::new(&mut io).poll_read(cx, &mut buf[*off..])) { - Ok(n) => { - if n == 0 { - return Poll::Ready(Ok(None)); - } - *off += n; - if *off == 2 { - return Poll::Ready(Ok(Some(u16::from_be_bytes(*buf)))); - } - } - Err(e) => { - return Poll::Ready(Err(e)); - } - } +fn decode_length_prefixed(src: &mut BytesMut) -> Result, io::Error> { + if src.len() < size_of::() { + return Ok(None); } -} -/// Write 2 bytes as frame length from the given buffer into the given sink. -/// -/// Panics if `off >= 2`. -/// -/// When [`Poll::Pending`] is returned, the given offset -/// may have been updated (i.e. a byte may have been written) and must -/// be preserved for the next invocation. -/// -/// Returns `false` if EOF has been encountered. -fn write_frame_len( - mut io: &mut W, - cx: &mut Context<'_>, - buf: &[u8; 2], - off: &mut usize, -) -> Poll> { - loop { - match ready!(Pin::new(&mut io).poll_write(cx, &buf[*off..])) { - Ok(n) => { - if n == 0 { - return Poll::Ready(Ok(false)); - } - *off += n; - if *off == 2 { - return Poll::Ready(Ok(true)); - } - } - Err(e) => { - return Poll::Ready(Err(e)); - } - } + let mut len_bytes = [0u8; U16_LENGTH]; + len_bytes.copy_from_slice(&src[..U16_LENGTH]); + let len = u16::from_be_bytes(len_bytes) as usize; + + if src.len() - U16_LENGTH >= len { + // Skip the length header we already read. + src.advance(U16_LENGTH); + Ok(Some(src.split_to(len).freeze())) + } else { + Ok(None) } } diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index c853af7b1893..7cc0f859e6e6 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -20,23 +20,24 @@ //! Noise protocol handshake I/O. -mod proto { +pub(super) mod proto { #![allow(unreachable_pub)] include!("../generated/mod.rs"); pub use self::payload::proto::NoiseExtensions; pub use self::payload::proto::NoiseHandshakePayload; } -use crate::io::{framed::NoiseFramed, Output}; -use crate::protocol::{KeypairIdentity, STATIC_KEY_DOMAIN}; -use crate::{DecodeError, Error}; -use bytes::Bytes; +use super::framed::Codec; +use crate::io::Output; +use crate::protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}; +use crate::Error; +use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_identity as identity; use multihash::Multihash; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use quick_protobuf::MessageWrite; use std::collections::HashSet; -use std::io; +use std::{io, mem}; ////////////////////////////////////////////////////////////////////////////// // Internal @@ -44,7 +45,7 @@ use std::io; /// Handshake state. pub(crate) struct State { /// The underlying I/O resource. - io: NoiseFramed, + io: Framed>, /// The associated public identity of the local node's static DH keypair, /// which can be sent to the remote as part of an authenticated handshake. identity: KeypairIdentity, @@ -63,7 +64,10 @@ struct Extensions { webtransport_certhashes: HashSet>, } -impl State { +impl State +where + T: AsyncRead + AsyncWrite, +{ /// Initializes the state for a new Noise handshake, using the given local /// identity keypair and local DH static public key. The handshake messages /// will be sent and received on the given I/O resource and using the @@ -79,7 +83,7 @@ impl State { ) -> Self { Self { identity, - io: NoiseFramed::new(io, session), + io: Framed::new(io, Codec::new(session)), dh_remote_pubkey_sig: None, id_remote_pubkey: expected_remote_key, responder_webtransport_certhashes, @@ -88,12 +92,16 @@ impl State { } } -impl State { +impl State +where + T: AsyncRead + AsyncWrite, +{ /// Finish a handshake, yielding the established remote identity and the /// [`Output`] for communicating on the encrypted channel. pub(crate) fn finish(self) -> Result<(identity::PublicKey, Output), Error> { - let is_initiator = self.io.is_initiator(); - let (pubkey, io) = self.io.into_transport()?; + let is_initiator = self.io.codec().is_initiator(); + + let (pubkey, framed) = map_into_transport(self.io)?; let id_pk = self .id_remote_pubkey @@ -131,10 +139,34 @@ impl State { } } - Ok((id_pk, io)) + Ok((id_pk, Output::new(framed))) } } +/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the [`snow::TransportState`]. +/// +/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its [`FramedParts`](asynchronous_codec::FramedParts). +/// However, we need to retain the original [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write buffers. +/// +/// Those are likely **not** empty because the remote may directly write to the stream again after the noise handshake finishes. +fn map_into_transport( + framed: Framed>, +) -> Result<(PublicKey, Framed>), Error> +where + T: AsyncRead + AsyncWrite, +{ + let mut parts = framed.into_parts().map_codec(Some); + + let (pubkey, codec) = mem::take(&mut parts.codec) + .expect("We just set it to `Some`") + .into_transport()?; + + let parts = parts.map_codec(|_| codec); + let framed = Framed::from_parts(parts); + + Ok((pubkey, framed)) +} + impl From for Extensions { fn from(value: proto::NoiseExtensions) -> Self { Extensions { @@ -151,14 +183,14 @@ impl From for Extensions { // Handshake Message Futures /// A future for receiving a Noise handshake message. -async fn recv(state: &mut State) -> Result +async fn recv(state: &mut State) -> Result where T: AsyncRead + Unpin, { match state.io.next().await { None => Err(io::Error::new(io::ErrorKind::UnexpectedEof, "eof").into()), Some(Err(e)) => Err(e.into()), - Some(Ok(m)) => Ok(m), + Some(Ok(p)) => Ok(p), } } @@ -167,12 +199,11 @@ pub(crate) async fn recv_empty(state: &mut State) -> Result<(), Error> where T: AsyncRead + Unpin, { - let msg = recv(state).await?; - if !msg.is_empty() { - return Err( - io::Error::new(io::ErrorKind::InvalidData, "Unexpected handshake payload.").into(), - ); + let payload = recv(state).await?; + if payload.get_size() != 0 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "Expected empty payload.").into()); } + Ok(()) } @@ -181,7 +212,10 @@ pub(crate) async fn send_empty(state: &mut State) -> Result<(), Error> where T: AsyncWrite + Unpin, { - state.io.send(&Vec::new()).await?; + state + .io + .send(&proto::NoiseHandshakePayload::default()) + .await?; Ok(()) } @@ -190,11 +224,7 @@ pub(crate) async fn recv_identity(state: &mut State) -> Result<(), Error> where T: AsyncRead + Unpin, { - let msg = recv(state).await?; - let mut reader = BytesReader::from_bytes(&msg[..]); - let pb = - proto::NoiseHandshakePayload::from_reader(&mut reader, &msg[..]).map_err(DecodeError)?; - + let pb = recv(state).await?; state.id_remote_pubkey = Some(identity::PublicKey::try_decode_protobuf(&pb.identity_key)?); if !pb.identity_sig.is_empty() { @@ -211,7 +241,7 @@ where /// Send a Noise handshake message with a payload identifying the local node to the remote. pub(crate) async fn send_identity(state: &mut State) -> Result<(), Error> where - T: AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { identity_key: state.identity.public.encode_protobuf(), @@ -221,7 +251,7 @@ where pb.identity_sig = state.identity.signature.clone(); // If this is the responder then send WebTransport certhashes to initiator, if any. - if state.io.is_responder() { + if state.io.codec().is_responder() { if let Some(ref certhashes) = state.responder_webtransport_certhashes { let ext = pb .extensions @@ -231,11 +261,7 @@ where } } - let mut msg = Vec::with_capacity(pb.get_size()); - - let mut writer = Writer::new(&mut msg); - pb.write_message(&mut writer).expect("Encoding to succeed"); - state.io.send(&msg).await?; + state.io.send(&pb).await?; Ok(()) } diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index be73ea3f7f90..2557e76e2762 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -54,7 +54,6 @@ //! [noise]: http://noiseprotocol.org/ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#![allow(deprecated)] // Temporarily until we remove deprecated items. mod io; mod protocol; @@ -65,7 +64,8 @@ use crate::handshake::State; use crate::io::handshake; use crate::protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}; use futures::prelude::*; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use multiaddr::Protocol; @@ -121,7 +121,7 @@ impl Config { self } - fn into_responder(self, socket: S) -> Result, Error> { + fn into_responder(self, socket: S) -> Result, Error> { let session = noise_params_into_builder( self.params, &self.prologue, @@ -141,7 +141,7 @@ impl Config { Ok(state) } - fn into_initiator(self, socket: S) -> Result, Error> { + fn into_initiator(self, socket: S) -> Result, Error> { let session = noise_params_into_builder( self.params, &self.prologue, @@ -171,7 +171,7 @@ impl UpgradeInfo for Config { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -195,7 +195,7 @@ where } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -240,6 +240,7 @@ pub enum Error { #[error("failed to decode protobuf ")] InvalidPayload(#[from] DecodeError), #[error(transparent)] + #[allow(clippy::enum_variant_names)] SigningError(#[from] libp2p_identity::SigningError), #[error("Expected WebTransport certhashes ({}) are not a subset of received ones ({})", certhashes_to_string(.0), certhashes_to_string(.1))] UnknownWebTransportCerthashes(HashSet>, HashSet>), diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 6d1723ec7d6a..7100e7c7a8d2 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -20,16 +20,17 @@ use futures::prelude::*; use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::{upgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p_core::upgrade; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; -use log::info; use quickcheck::*; use std::{convert::TryInto, io}; +use tracing_subscriber::EnvFilter; #[allow(dead_code)] fn core_upgrade_compat() { - // Tests API compaibility with the libp2p-core upgrade API, + // Tests API compatibility with the libp2p-core upgrade API, // i.e. if it compiles, the "test" is considered a success. let id_keys = identity::Keypair::generate_ed25519(); let noise = noise::Config::new(&id_keys).unwrap(); @@ -40,7 +41,9 @@ fn core_upgrade_compat() { #[test] fn xx() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(mut messages: Vec) -> bool { messages.truncate(5); let server_id = identity::Keypair::generate_ed25519(); @@ -85,7 +88,7 @@ fn xx() { Err(e) => panic!("error reading len: {e}"), } }; - info!("server: reading message ({} bytes)", len); + tracing::info!(bytes=%len, "server: reading message"); let mut server_buffer = vec![0; len.try_into().unwrap()]; server_session .read_exact(&mut server_buffer) diff --git a/transports/noise/tests/webtransport_certhashes.rs b/transports/noise/tests/webtransport_certhashes.rs index 95ce0bf58db1..b3c924f8188e 100644 --- a/transports/noise/tests/webtransport_certhashes.rs +++ b/transports/noise/tests/webtransport_certhashes.rs @@ -1,4 +1,4 @@ -use libp2p_core::{InboundUpgrade, OutboundUpgrade}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; use multihash::Multihash; diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index dbbc04b50033..42b53d12a883 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,9 +1,16 @@ -## 0.40.1 - unreleased +## 0.41.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). +- Remove deprecated type-aliases and make `Config::local_public_key` private. + See [PR 4734](https://github.com/libp2p/rust-libp2p/pull/4734). + +## 0.40.1 - Rename `Plaintext2Config` to `Config` to follow naming conventions across repository. See [PR 4535](https://github.com/libp2p/rust-libp2p/pull/4535). -## 0.40.0 +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index f58fefb44ca8..e3f1e280851a 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-plaintext" edition = "2021" rust-version = { workspace = true } description = "Plaintext encryption dummy protocol for libp2p" -version = "0.40.1" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,21 +11,21 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } bytes = "1" -futures = "0.3.28" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +tracing = "0.1.37" +quick-protobuf-codec = { workspace = true } [dev-dependencies] -env_logger = "0.10.0" -libp2p-identity = { workspace = true, features = ["ed25519"] } +libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } quickcheck = { workspace = true } rand = "0.8" futures_ringbuf = "0.4.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index a1e4d8660df6..7480874a85ef 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -41,7 +41,7 @@ pub enum Error { } #[derive(Debug)] -pub struct DecodeError(pub(crate) quick_protobuf::Error); +pub struct DecodeError(pub(crate) quick_protobuf_codec::Error); impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 05e3b9085a05..ddd5f7f8a9b5 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -21,106 +21,53 @@ use crate::error::{DecodeError, Error}; use crate::proto::Exchange; use crate::Config; - use asynchronous_codec::{Framed, FramedParts}; -use bytes::{Bytes, BytesMut}; +use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use log::{debug, trace}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; -use unsigned_varint::codec::UviBytes; - -struct HandshakeContext { - config: Config, - state: T, -} - -// HandshakeContext<()> --with_local-> HandshakeContext -struct Local { - // Our local exchange's raw bytes: - exchange_bytes: Vec, -} - -// HandshakeContext --with_remote-> HandshakeContext -pub(crate) struct Remote { - // The remote's peer ID: - pub(crate) peer_id: PeerId, // The remote's public key: - pub(crate) public_key: PublicKey, -} - -impl HandshakeContext { - fn new(config: Config) -> Self { - #[allow(deprecated)] - let exchange = Exchange { - id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.encode_protobuf()), - }; - let mut buf = Vec::with_capacity(exchange.get_size()); - let mut writer = Writer::new(&mut buf); - exchange - .write_message(&mut writer) - .expect("Encoding to succeed"); - - Self { - config, - state: Local { - exchange_bytes: buf, - }, - } - } - - fn with_remote(self, exchange_bytes: BytesMut) -> Result, Error> { - let mut reader = BytesReader::from_bytes(&exchange_bytes); - let prop = Exchange::from_reader(&mut reader, &exchange_bytes).map_err(DecodeError)?; - let public_key = PublicKey::try_decode_protobuf(&prop.pubkey.unwrap_or_default())?; - let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; - - // Check the validity of the remote's `Exchange`. - if peer_id != public_key.to_peer_id() { - return Err(Error::PeerIdMismatch); - } - - Ok(HandshakeContext { - config: self.config, - state: Remote { - peer_id, - public_key, - }, - }) - } -} - -pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, Remote, Bytes), Error> +pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where S: AsyncRead + AsyncWrite + Send + Unpin, { // The handshake messages all start with a variable-length integer indicating the size. - let mut framed_socket = Framed::new(socket, UviBytes::default()); + let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); - trace!("starting handshake"); - let context = HandshakeContext::new(config); - - trace!("sending exchange to remote"); + tracing::trace!("sending exchange to remote"); framed_socket - .send(BytesMut::from(&context.state.exchange_bytes[..])) - .await?; - - trace!("receiving the remote's exchange"); - let context = match framed_socket.next().await { - Some(p) => context.with_remote(p?)?, + .send(Exchange { + id: Some(config.local_public_key.to_peer_id().to_bytes()), + pubkey: Some(config.local_public_key.encode_protobuf()), + }) + .await + .map_err(DecodeError)?; + + tracing::trace!("receiving the remote's exchange"); + let public_key = match framed_socket + .next() + .await + .transpose() + .map_err(DecodeError)? + { + Some(remote) => { + let public_key = PublicKey::try_decode_protobuf(&remote.pubkey.unwrap_or_default())?; + let peer_id = PeerId::from_bytes(&remote.id.unwrap_or_default())?; + + if peer_id != public_key.to_peer_id() { + return Err(Error::PeerIdMismatch); + } + + public_key + } None => { - debug!("unexpected eof while waiting for remote's exchange"); + tracing::debug!("unexpected eof while waiting for remote's exchange"); let err = IoError::new(IoErrorKind::BrokenPipe, "unexpected eof"); return Err(err.into()); } }; - trace!( - "received exchange from remote; pubkey = {:?}", - context.state.public_key - ); + tracing::trace!(?public_key, "received exchange from remote"); let FramedParts { io, @@ -129,5 +76,5 @@ where .. } = framed_socket.into_parts(); assert!(write_buffer.is_empty()); - Ok((io, context.state, read_buffer.freeze())) + Ok((io, public_key, read_buffer.freeze())) } diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index fa7cba6b8ffd..4a322d63fab9 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -27,11 +27,11 @@ use crate::error::Error; use bytes::Bytes; use futures::future::BoxFuture; use futures::prelude::*; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; -use log::debug; use std::{ io, iter, pin::Pin, @@ -46,21 +46,13 @@ mod proto { pub(crate) use self::structs::Exchange; } -#[deprecated(note = "Has been renamed to `Config`.")] -pub type PlainText2Config = Config; - -#[deprecated(note = "Has been renamed to `Output`.")] -pub type PlainTextOutput = Output; - /// [`Config`] is an insecure connection handshake for testing purposes only. #[derive(Clone)] pub struct Config { - #[deprecated(note = "Will be made private in the future, please use `Config::new` instead!")] - pub local_public_key: identity::PublicKey, + local_public_key: identity::PublicKey, } impl Config { - #[allow(deprecated)] pub fn new(identity: &identity::Keypair) -> Self { Self { local_public_key: identity.public(), @@ -77,7 +69,7 @@ impl UpgradeInfo for Config { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -90,7 +82,7 @@ where } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -108,15 +100,15 @@ impl Config { where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - debug!("Starting plaintext handshake."); - let (socket, remote, read_buffer) = handshake::handshake(socket, self).await?; - debug!("Finished plaintext handshake."); + tracing::debug!("Starting plaintext handshake."); + let (socket, remote_key, read_buffer) = handshake::handshake(socket, self).await?; + tracing::debug!("Finished plaintext handshake."); Ok(( - remote.peer_id, + remote_key.to_peer_id(), Output { socket, - remote_key: remote.public_key, + remote_key, read_buffer, }, )) diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index ed18fb44cbad..f77f23d3ad39 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -19,15 +19,17 @@ // DEALINGS IN THE SOFTWARE. use futures::io::{AsyncReadExt, AsyncWriteExt}; -use libp2p_core::InboundUpgrade; +use libp2p_core::upgrade::InboundConnectionUpgrade; use libp2p_identity as identity; use libp2p_plaintext as plaintext; -use log::debug; use quickcheck::QuickCheck; +use tracing_subscriber::EnvFilter; #[test] fn variable_msg_length() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(msg: Vec) { let msg_to_send = msg.clone(); @@ -53,18 +55,18 @@ fn variable_msg_length() { assert_eq!(received_client_id, client_id.public().to_peer_id()); let client_fut = async { - debug!("Client: writing message."); + tracing::debug!("Client: writing message."); client_channel .write_all(&msg_to_send) .await .expect("no error"); - debug!("Client: flushing channel."); + tracing::debug!("Client: flushing channel."); client_channel.flush().await.expect("no error"); }; let server_fut = async { let mut server_buffer = vec![0; msg_to_receive.len()]; - debug!("Server: reading message."); + tracing::debug!("Server: reading message."); server_channel .read_exact(&mut server_buffer) .await diff --git a/transports/pnet/CHANGELOG.md b/transports/pnet/CHANGELOG.md index 3b425d85dcdf..1fbc2d08807d 100644 --- a/transports/pnet/CHANGELOG.md +++ b/transports/pnet/CHANGELOG.md @@ -1,4 +1,15 @@ -## 0.23.0 +## 0.24.0 + + +## 0.23.1 + + + +## 0.23.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index 1a3a1224fbbe..bdefa76bbd18 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-pnet" edition = "2021" rust-version = { workspace = true } description = "Private swarm support for libp2p" -version = "0.23.0" +version = "0.24.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,23 +11,23 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" -log = "0.4.20" +futures = "0.3.30" salsa20 = "0.10" sha3 = "0.10" +tracing = "0.1.37" rand = "0.8" -pin-project = "1.1.3" +pin-project = "1.1.4" [dev-dependencies] libp2p-core = { workspace = true } -libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "ecdsa","secp256k1"] } +libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "ecdsa","secp256k1", "rand"] } libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-websocket = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -tokio = { version = "1.32.0", features = ["full"] } +tokio = { version = "1.36.0", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index c59935482392..06f932fbe719 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -23,7 +23,6 @@ use futures::{ ready, task::{Context, Poll}, }; -use log::trace; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; use std::{fmt, pin::Pin}; @@ -120,7 +119,7 @@ impl AsyncWrite for CryptWriter { let res = Pin::new(&mut *this.buf).poll_write(cx, buf); if let Poll::Ready(Ok(count)) = res { this.cipher.apply_keystream(&mut this.buf[0..count]); - trace!("encrypted {} bytes", count); + tracing::trace!(bytes=%count, "encrypted bytes"); } else { debug_assert!(false); }; diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index 15f42556c628..083ffff36a36 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -29,7 +29,6 @@ mod crypt_writer; use crypt_writer::CryptWriter; use futures::prelude::*; -use log::trace; use pin_project::pin_project; use rand::RngCore; use salsa20::{ @@ -159,6 +158,7 @@ impl fmt::Display for Fingerprint { /// Error when parsing a PreSharedKey #[derive(Clone, Debug, PartialEq, Eq)] +#[allow(clippy::enum_variant_names)] // Maybe fix at some stage, not important now. pub enum KeyParseError { /// file does not have the expected structure InvalidKeyFile, @@ -209,7 +209,7 @@ impl PnetConfig { where TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - trace!("exchanging nonces"); + tracing::trace!("exchanging nonces"); let mut local_nonce = [0u8; NONCE_SIZE]; let mut remote_nonce = [0u8; NONCE_SIZE]; rand::thread_rng().fill_bytes(&mut local_nonce); @@ -222,7 +222,7 @@ impl PnetConfig { .read_exact(&mut remote_nonce) .await .map_err(PnetError::HandshakeError)?; - trace!("setting up ciphers"); + tracing::trace!("setting up ciphers"); let write_cipher = XSalsa20::new(&self.key.0.into(), &local_nonce.into()); let read_cipher = XSalsa20::new(&self.key.0.into(), &remote_nonce.into()); Ok(PnetOutput::new(socket, write_cipher, read_cipher)) @@ -256,9 +256,9 @@ impl AsyncRead for PnetOutput { let this = self.project(); let result = this.inner.get_pin_mut().poll_read(cx, buf); if let Poll::Ready(Ok(size)) = &result { - trace!("read {} bytes", size); + tracing::trace!(bytes=%size, "read bytes"); this.read_cipher.apply_keystream(&mut buf[..*size]); - trace!("decrypted {} bytes", size); + tracing::trace!(bytes=%size, "decrypted bytes"); } result } diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index 5e02ed856c6f..79ffaeab4471 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -6,7 +6,7 @@ use libp2p_core::upgrade::Version; use libp2p_core::Transport; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_pnet::{PnetConfig, PreSharedKey}; -use libp2p_swarm::{dummy, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, Config, NetworkBehaviour, Swarm, SwarmEvent}; const TIMEOUT: Duration = Duration::from_secs(5); @@ -113,9 +113,12 @@ where .authenticate(libp2p_noise::Config::new(&identity).unwrap()) .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::with_tokio_executor(transport, dummy::Behaviour, identity.public().to_peer_id()) - .idle_connection_timeout(Duration::from_secs(5)) - .build() + Swarm::new( + transport, + dummy::Behaviour, + identity.public().to_peer_id(), + Config::with_tokio_executor(), + ) } async fn listen_on(swarm: &mut Swarm, addr: Multiaddr) -> Multiaddr { diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index a7ad810bdb61..3c34a1989f91 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,7 +1,29 @@ -## 0.9.3 - unreleased +## 0.10.2 + +- Change `max_idle_timeout`to 10s. + See [PR 4965](https://github.com/libp2p/rust-libp2p/pull/4965). + +## 0.10.1 + +- Allow disabling path MTU discovery. + See [PR 4823](https://github.com/libp2p/rust-libp2p/pull/4823). + +## 0.10.0 + +- Improve hole-punch timing. + This should improve success rates for hole-punching QUIC connections. + See [PR 4549](https://github.com/libp2p/rust-libp2p/pull/4549). +- Remove deprecated `Error::EndpointDriverCrashed` variant. + See [PR 4738](https://github.com/libp2p/rust-libp2p/pull/4738). + +## 0.9.3 + +- No longer report error when explicit closing of a QUIC endpoint succeeds. + See [PR 4621]. - Support QUIC stateless resets for supported `libp2p_identity::Keypair`s. See [PR 4554]. +[PR 4621]: https://github.com/libp2p/rust-libp2p/pull/4621 [PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554 ## 0.9.2 diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 697c257fa52c..b4bf93d6e104 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-quic" -version = "0.9.2" +version = "0.10.2" authors = ["Parity Technologies "] edition = "2021" rust-version = { workspace = true } @@ -11,20 +11,20 @@ license = "MIT" [dependencies] async-std = { version = "1.12.0", optional = true } bytes = "1.5.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0.2" -if-watch = "3.0.1" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" parking_lot = "0.12.0" quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" -rustls = { version = "0.21.7", default-features = false } -thiserror = "1.0.49" -tokio = { version = "1.32.0", default-features = false, features = ["net", "rt", "time"], optional = true } -socket2 = "0.5.4" +rustls = { version = "0.21.9", default-features = false } +thiserror = "1.0.57" +tokio = { version = "1.36.0", default-features = false, features = ["net", "rt", "time"], optional = true } +tracing = "0.1.37" +socket2 = "0.5.5" ring = "0.16.20" [features] @@ -40,13 +40,14 @@ rustc-args = ["--cfg", "docsrs"] [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../../muxers/test-harness" } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" -tokio = { version = "1.32.0", features = ["macros", "rt-multi-thread", "time"] } +tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "stream_compliance" diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 5351a537c763..540f13e726bc 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use quinn::VarInt; +use quinn::{MtuDiscoveryConfig, VarInt}; use std::{sync::Arc, time::Duration}; /// Config for the transport. @@ -63,6 +63,9 @@ pub struct Config { server_tls_config: Arc, /// Libp2p identity of the node. keypair: libp2p_identity::Keypair, + + /// Parameters governing MTU discovery. See [`MtuDiscoveryConfig`] for details. + mtu_discovery_config: Option, } impl Config { @@ -75,16 +78,23 @@ impl Config { server_tls_config, support_draft_29: false, handshake_timeout: Duration::from_secs(5), - max_idle_timeout: 30 * 1000, + max_idle_timeout: 10 * 1000, max_concurrent_stream_limit: 256, - keep_alive_interval: Duration::from_secs(15), + keep_alive_interval: Duration::from_secs(5), max_connection_data: 15_000_000, // Ensure that one stream is not consuming the whole connection. max_stream_data: 10_000_000, keypair: keypair.clone(), + mtu_discovery_config: Some(Default::default()), } } + + /// Disable MTU path discovery (it is enabled by default). + pub fn disable_path_mtu_discovery(mut self) -> Self { + self.mtu_discovery_config = None; + self + } } /// Represents the inner configuration for [`quinn`]. @@ -108,6 +118,7 @@ impl From for QuinnConfig { support_draft_29, handshake_timeout: _, keypair, + mtu_discovery_config, } = config; let mut transport = quinn::TransportConfig::default(); // Disable uni-directional streams. @@ -120,6 +131,7 @@ impl From for QuinnConfig { transport.allow_spin(false); transport.stream_receive_window(max_stream_data.into()); transport.receive_window(max_connection_data.into()); + transport.mtu_discovery_config(mtu_discovery_config); let transport = Arc::new(transport); let mut server_config = quinn::ServerConfig::with_crypto(server_tls_config); diff --git a/transports/quic/src/connection/connecting.rs b/transports/quic/src/connection/connecting.rs index b911eaa7dfe0..141f0b5542ef 100644 --- a/transports/quic/src/connection/connecting.rs +++ b/transports/quic/src/connection/connecting.rs @@ -58,7 +58,7 @@ impl Connecting { let certificates: Box> = identity.downcast().expect("we rely on rustls feature; qed"); let end_entity = certificates - .get(0) + .first() .expect("there should be exactly one certificate; qed"); let p2p_cert = libp2p_tls::certificate::parse(end_entity) .expect("the certificate was validated during TLS handshake; qed"); diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index 874bc659b2e2..605799af5e14 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -4,6 +4,7 @@ use futures::future::Either; use rand::{distributions, Rng}; +use std::convert::Infallible; use std::{ net::{SocketAddr, UdpSocket}, time::Duration, @@ -18,22 +19,26 @@ pub(crate) async fn hole_puncher( futures::pin_mut!(punch_holes_future); match futures::future::select(P::sleep(timeout_duration), punch_holes_future).await { Either::Left(_) => Error::HandshakeTimedOut, - Either::Right((hole_punch_err, _)) => hole_punch_err, + Either::Right((Err(hole_punch_err), _)) => hole_punch_err, + Either::Right((Ok(never), _)) => match never {}, } } -async fn punch_holes(socket: UdpSocket, remote_addr: SocketAddr) -> Error { +async fn punch_holes( + socket: UdpSocket, + remote_addr: SocketAddr, +) -> Result { loop { - let sleep_duration = Duration::from_millis(rand::thread_rng().gen_range(10..=200)); - P::sleep(sleep_duration).await; - let contents: Vec = rand::thread_rng() .sample_iter(distributions::Standard) .take(64) .collect(); - if let Err(e) = P::send_to(&socket, &contents, remote_addr).await { - return Error::Io(e); - } + tracing::trace!("Sending random UDP packet to {remote_addr}"); + + P::send_to(&socket, &contents, remote_addr).await?; + + let sleep_duration = Duration::from_millis(rand::thread_rng().gen_range(10..=200)); + P::sleep(sleep_duration).await; } } diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index 494ecfdcddb9..7ae649b6914b 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -90,10 +90,6 @@ pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), - /// The task to drive a quic endpoint has crashed. - #[error("Endpoint driver crashed")] - EndpointDriverCrashed, - /// The [`Connecting`] future timed out. #[error("Handshake with the remote timed out.")] HandshakeTimedOut, diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 16ffbc5a1636..aea3c91093f8 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -308,7 +308,7 @@ impl Transport for GenTransport